diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 00000000..06bf30a3 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,29 @@ +# This workflow will build a golang project +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go + +name: Go + +on: + push: + branches: [ "dev", "branch-2.0", "branch-2.1", "branch-3.0" ] + pull_request: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.20' + + - name: Format + run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi + + - name: Build + run: make + + - name: Test + run: make test diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 00000000..fda09ab1 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,29 @@ +name: golangci-lint +on: + push: + branches: + - main + - dev + - branch-3.0 + - branch-2.1 + - branch-2.0 + pull_request: + +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '1.20' + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.60 diff --git a/.gitignore b/.gitignore index 96cea04b..0773631c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,4 @@ bin output ccr.db -backup +tarball diff --git a/CHANGELOG.md b/CHANGELOG.md index 358b6657..774d4191 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,169 @@ # 更新日志 -## v 0.5 +### Fix + +## 3.0.4/2.1.8 + +注意:从这个版本开始 doris 和 ccr-syncer 的 2.0 版本将不再更新,需要使用 ccr-syncer 的需要先升级到 2.1 及以上版本。 + +### Fix + +- 修复 table name 中带 `-` 无法同步的问题 (selectdb/ccr-syncer#168) +- 修复部分同步下可能同步多次增量数据的问题 (selectdb/ccr-syncer#186) +- 修复 create 又立即 drop 的情况下无法找到 table 的问题 (selectdb/ccr-syncer#188) +- 跳过不支持的 table 类型,比如 ES TABLE +- 避免在同步快照、binlog 期间对上游 name 产生依赖 (selectdb/ccr-syncer#205, selectdb/ccr-syncer#239) +- 修复全量同步期间 view 的别名问题 (selectdb/ccr-syncer#207) +- 修复 add partition with keyword name 的问题 (selectdb/ccr-syncer#212) +- 跳过 drop tmp partition (selectdb/ccr-syncer#214) +- 修复快照过期的问题,过期后会重做 (selectdb/ccr-syncer#229) +- 修复 rename 导致的上下游 index name 无法匹配的问题 (selectdb/ccr-syncer#235) +- 修复并行创建 table/backup 时 table 丢失的问题 (selectdb/ccr-syncer#237) +- 修复 partial snapshot 期间,上游 table/partition 已经被删除/重命名/替换的问题 (selectdb/ccr-syncer#240, selectdb/ccr-syncer#241, selectdb/ccr-syncer#249, selectdb/ccr-syncer#255) +- 检查 database connection 错误 (selectdb/ccr-syncer#247) +- 过滤已经被删除的 table (selectdb/ccr-syncer#248) +- 修复 create table 时下游 table 已经存在的问题 (selectdb/ccr-syncer#161) + +### Feature + +- 支持 atomic restore,全量同步期间下游仍然可读 (selectdb/ccr-syncer#166) +- 支持处理包装在 barrier log 中的其他 binlog (主要用于在 2.0/2.1 上增加新增的 binlog 类型)(selectdb/ccr-syncer#208) +- 支持 rename table (2.1) (selectdb/ccr-syncer#209) +- 跳过 modify partition binlog (selectdb/ccr-syncer#213) +- 支持 modify comment binlog (selectdb/ccr-syncer#140) +- 支持 replace table binlog (selectdb/ccr-syncer#245) +- 支持 drop view binlog (selectdb/ccr-syncer#138) +- 支持 modify view def binlog (selectdb/ccr-syncer#184) +- 支持 inverted index 相关 binlog (selectdb/ccr-syncer#252) +- 支持 table sync 下的 txn insert (WIP) (selectdb/ccr-syncer#234, selectdb/ccr-syncer#259) +- 支持 rename partition/rollup binlogs (selectdb/ccr-syncer#268) +- 支持 add/drop rollup binlogs (selectdb/ccr-syncer#269) +- 支持 modify view/comment in 2.1 (selectdb/ccr-syncer#270, selectdb/ccr-syncer#273) +- 支持 table sync 下的 replace table (selectdb/ccr-syncer#279) + +### Improve + +- 支持同步 rename column,需要 doris xxxx (selectdb/ccr-syncer#139) +- 支持在全量同步过程中,遇到 table signature 不匹配时,使用 alias 替代 drop (selectdb/ccr-syncer#179) +- 增加 monitor,在日志中 dump 内存使用率 (selectdb/ccr-syncer#181) +- 过滤 schema change 删除的 indexes,避免全量同步 (selectdb/ccr-syncer#185) +- 过滤 schema change 创建的 shadow indexes 的更新,避免全量同步 (selectdb/ccr-syncer#187) +- 增加 `mysql_max_allowed_packet` 参数,控制 mysql sdk 允许发送的 packet 大小 (selectdb/ccr-syncer#196) +- 限制一个 JOB 中单个 BE 的 ingest 并发数,减少对 BE 的连接数和文件描述符消耗 (selectdb/ccr-syncer#195) +- 避免在获取 job status 等待锁 (selectdb/ccr-syncer#198) +- 避免 backup/restore 任务阻塞查询 ccr job progress (selectdb/ccr-syncer#201, selectdb/ccr-syncer#206) +- 避免将 snapshot job info 和 meta (这两个数据可能非常大)持久化到 mysql 中 (selectdb/ccr-syncer#204) +- 上游 db 中没有 table 时,打印 info 而不是 error (selectdb/ccr-syncer#211) +- 在 ccr syncer 重启后,复用由当前 job 发起的 backup/restore job (selectdb/ccr-syncer#218, selectdb/ccr-syncer#224, selectdb/ccr-syncer#226) +- 支持读取压缩后的快照/恢复快照时压缩,避免碰到 thrift max message size 限制 (selectdb/ccr-syncer#223) +- API job_progress 避免返回 persist data (selectdb/ccr-syncer#271) + +## 2.0.15/2.1.6 + +### Fix + +- 修复 `REPLACE_IF_NOT_NULL` 语句的默认值语法不兼容问题 (selectdb/ccr-syncer#180) +- 修复 table sync 下 partial snapshot 没有更新 dest table id 的问题 (selectdb/ccr-syncer#178) +- **修复 table sync with alias 时,lightning schema change 找不到 table 的问题** (selectdb/ccr-syncer#176) +- 修复 db sync 下 partial snapshot table 为空的问题 (selectdb/ccr-syncer#173) +- 修复 create table 时下游 view 已经存在的问题(先删除 view),feature gate: `feature_create_view_drop_exists` (selectdb/ccr-syncer#170,selectdb/ccr-syncer#171) +- 修复 table not found 时没有 rollback binlog 的问题 +- **修复下游删表后重做 snapshot 是 table mapping 过期的问题 (selectdb/ccr-syncer#162,selectdb/ccr-syncer#163,selectdb/ccr-syncer#164)** +- 修复 full sync 期间 view already exists 的问题,如果 signature 不匹配会先删除 (selectdb/ccr-syncer#152) +- 修复 2.0 中 get view 逻辑,兼容 default_cluster 语法 (selectdb/ccr-syncer#149) +- 修复 job state 变化时仍然更新了 job progress 的问题,对之前的逻辑无影响,主要用于支持 partial sync (selectdb/ccr-syncer#124) +- 修复 get_lag 接口中不含 lag 的问题 (selectdb/ccr-syncer#126) +- 修复下游 restore 时未清理 orphan tables/partitions 的问题 (selectdb/ccr-syncer#128) + - 备注: 暂时禁用,因为 doris 侧发现了 bug (selectdb/ccr-syncer#153,selectdb/ccr-syncer#161) +- **修复下游删表后重做 snapshot 时 dest meta cache 过期的问题 (selectdb/ccr-syncer#132)** + +### Feature + +- 增加 `/force_fullsync` 用于强制触发 fullsync (selectdb/ccr-syncer#167) +- 增加 `/features` 接口,用于列出当前有哪些 feature 以及是否打开 (selectdb/ccr-syncer#175) +- 支持同步 drop view(drop table 失败后使用 drop view 重试)(selectdb/ccr-syncer#169) +- 支持同步 rename 操作 (selectdb/ccr-syncer#147) +- schema change 使用 partial sync 而不是 fullsync (selectdb/ccr-syncer#151) +- partial sync 使用 rename 而不是直接修改 table,因此表的读写在同步过程中不受影响 (selectdb/ccr-syncer#148) +- 支持 partial sync,减少需要同步的数据量 (selectdb/ccr-syncer#125) +- 添加参数 `allowTableExists`,允许在下游 table 存在时,仍然创建 ccr job(如果 schema 不一致,会自动删表重建)(selectdb/ccr-syncer#136) + +### Improve + +- 日志输出 milliseconds (selectdb/ccr-syncer#182) +- 如果下游表的 schema 不一致,则将表移动到 RecycleBin 中(之前是强制删除)(selectdb/ccr-syncer#137) + +## 2.0.14/2.1.5 + +### Fix + +- 过滤已经删除的 partitions,避免 full sync,需要 doris 2.0.14/2.1.5 (selectdb/ccr-syncer#117) +- 过滤已经删除的 tables,避免 full sync (selectdb/ccr-syncer#123) +- 兼容 doris 3.0 alternative json name,doris 3.0 必须使用该版本的 CCR syncer (selectdb/ccr-syncer#121) +- 修复 list jobs 接口在高可用环境下不可用的问题 (selectdb/ccr-syncer#120) + +## 2.0.11 + +对应 doris 2.0.11。 + +### Feature + +- 支持以 postgresql 作为 ccr-syncer 的元数据库 (selectdb/ccr-syncer#77) +- 支持 insert overwrite 相关操作 (selectdb/ccr-syncer#97,selectdb/ccr-syncer#99) + +### Fix + +- 修复 drop partition 后因找不到 partition id 而无法继续同步的问题 (selectdb/ccr-syncer#82) +- 修复高可用模式下接口无法 redirect 的问题 (selectdb/ccr-syncer#81) +- 修复 binlog 可能因同步失败而丢失的问题 (selectdb/ccr-syncer#86,selectdb/ccr-syncer#91) +- 修改 connect 和 rpc 超时时间默认值,connect 默认 10s,rpc 默认 30s (selectdb/ccr-syncer#94,selectdb/ccr-syncer#95) +- 修复 view 和 materialized view 使用造成空指针问题 (selectdb/ccr-syncer#100) +- 修复 add partition sql 错误的问题 (selectdb/ccr-syncer#99) + + +## 2.1.3/2.0.3.10 + +### Fix + +- 修复因与上下游 FE 网络中断而触发 full sync 的问题 + +### Feature + +- 增加 `/job_progress` 接口用于获取 JOB 进度 +- 增加 `/job_details` 接口用于获取 JOB 信息 +- 保留 job 状态变更的各个时间点,并在 `/job_progress` 接口中展示 + +### Fix + +- 修复若干 keywords 没有 escape 的问题 + +## 2.0.3.9 + +配合 doris 2.0.9 版本 + +### Feature + +- 添加选项以启动 pprof server +- 允许配置 rpc 合 connection 超时 + +### Fix + +- restore 每次重试时使用不同的 label 名 +- update table 失败时(目标表不存在)会触发快照同步 +- 修复同步 sql 中包含关键字的问题 +- 如果恢复时碰到表 schema 发生变化,会先删表再重试恢复 + +## 0.5 ### 支持高可用 - 现在可以部署多个Syncer节点来保证CCR功能的高可用。 - db是Syncer集群划分的依据,同一个集群下的Syncer共用一个db。 - Syncer集群采用对称设计,每个Syncer都会相对独立的执行被分配到的job。在某个Syncer节点down掉后,它的jobs会依据负载均衡算法被分给其他Syncer节点。 -## v 0.4 +## 0.4 * 增加 enable_db_binlog.sh 方便用户对整库开启binlog -## v 0.3 +## 0.3 ### LOG diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..e0741144 --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +This free license agreement is made between SelectDB Inc. (hereinafter referred to as "SelectDB", "we", "our" or "us") and the users ("user", "your" or "you") of the SelectDB products. SelectDB products refer to the software and services provided by SelectDB, including any updates, error fixes, and documentation. You acknowledge that you have fully read, understood and accepted this agreement in its entirety before you begin a trial or purchase of SelectDB products or services. You agree that by clicking the "agree" box or similar or using our product and services, you are agreeing to enter to this agreement, which is a legally binding contract between you and SelectDB. If you do not agree with any provision of this agreement, you must not purchase or use any of our services. +SelectDB reserves the right to change our products and services in accordance with applicable laws and our corporate policies without prior notice. We will post the changes on SelectDB.io. You agree that by continuing to use our services after the announcement of any changes to this agreement, you acknowledge that you have fully read, understood and accepted the modified products and services and will use our products and services in accordance with the modified agreement. If you disagree with any changes, you should no longer use our products and services. +License Rights and Limitations +SelectDB grants you a free, non-exclusive, non-transferable, limited license to use the SelectDB products. +- You have the right to install and use the SelectDB product or service on multiple computers, and run it for your personal use or internal business operations, subject to the terms of this agreement. +- You may redistribute the unmodified software and product documentation in accordance with the terms of this agreement, provided that you do not charge any fees related to such distribution or use of the software. +The effectiveness of your license is subject to the following conditions: +- You shall not remove any proprietary notices or markings of SelectDB or the licensor from the software or documentation. +- You shall not modify, reverse engineer, decompile, or attempt to extract the source code of the software. +- You shall not use the software for illegal purposes or violate any applicable laws or regulations. +Intellectual Property Rights +- All intellectual property rights of SelectDB, including but not limited to copyrights, patents, and trademarks, are owned by the licensor. +- This agreement does not grant the user any intellectual property rights. +Disclaimer +- The software is provided "as is" without any warranties, representations, conditions, or guarantees of any kind. +- The licensor does not provide any warranties regarding the suitability, merchantability, accuracy, reliability, or any other aspect of the software. +- To the maximum extent permitted by applicable law, the licensor shall not be liable for any direct, indirect, incidental, special, or consequential damages arising from the use of the software. +Miscellaneous +- This agreement constitutes the entire agreement between the licensor and the user regarding the use of the software and supersedes any prior oral or written agreements. +- This agreement shall be governed by the laws of Singapore in terms of interpretation, validity, and performance. diff --git a/Makefile b/Makefile index c9afe40c..5bc8812d 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,31 @@ tag := $(shell git describe --abbrev=0 --always --dirty --tags) sha := $(shell git rev-parse --short HEAD) git_tag_sha := $(tag):$(sha) +ifeq ($(shell uname -i),x86_64) + # Make them happy + platform := x64 +else + platform := arm64 +endif +tarball_suffix := $(tag)-$(platform) + +LDFLAGS="-X 'github.com/selectdb/ccr_syncer/pkg/version.GitTagSha=$(git_tag_sha)'" +GOFLAGS= + +GOFORMAT := gofmt -l -d -w + +# COVERAGE=ON make +ifeq ($(COVERAGE),ON) + GOFLAGS += -cover +endif + +.PHONY: flag_coverage +## COVERAGE=ON : Set coverage flag + +.PHONY: default +## default: Build ccr_syncer +default: ccr_syncer + .PHONY: build ## build : Build binary build: ccr_syncer get_binlog ingest_binlog get_meta snapshot_op get_master_token spec_checker rows_parse @@ -27,12 +52,12 @@ lint: .PHONY: fmt ## fmt : Format all code fmt: - $(V)go fmt ./... + $(V)$(GOFORMAT) . .PHONY: test ## test : Run test test: - $(V)go test $(shell go list ./... | grep -v github.com/selectdb/ccr_syncer/cmd | grep -v github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/) | grep -F -v '[no test files]' + $(V)go test $(shell go list ./... | grep -v github.com/selectdb/ccr_syncer/cmd | grep -v github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/) .PHONY: help ## help : Print help message @@ -42,11 +67,24 @@ help: Makefile # --------------- ------------------ --------------- # --------------- User Defined Tasks --------------- -.PHONY: cmd/ccr_syncer + +.PHONY: cloc +## cloc : Count lines of code +cloc: + $(V)tokei -C . -e pkg/rpc/kitex_gen -e pkg/rpc/thrift + +.PHONY: gen_mock +## gen_mock : Generate mock +gen_mock: + $(V)mockgen -source=pkg/rpc/fe.go -destination=pkg/ccr/fe_mock.go -package=ccr + $(V)mockgen -source=pkg/ccr/metaer.go -destination=pkg/ccr/metaer_mock.go -package=ccr + $(V)mockgen -source=pkg/ccr/metaer_factory.go -destination=pkg/ccr/metaer_factory_mock.go -package=ccr + $(V)mockgen -source=pkg/rpc/rpc_factory.go -destination=pkg/ccr/rpc_factory_mock.go -package=ccr + .PHONY: ccr_syncer ## ccr_syncer : Build ccr_syncer binary ccr_syncer: bin - $(V)go build -ldflags "-X github.com/selectdb/ccr_syncer/pkg/version.GitTagSha=$(git_tag_sha)" -o bin/ccr_syncer ./cmd/ccr_syncer + $(V)go build ${GOFLAGS} -ldflags ${LDFLAGS} -o bin/ccr_syncer ./cmd/ccr_syncer .PHONY: get_binlog ## get_binlog : Build get_binlog binary @@ -59,9 +97,9 @@ run_get_binlog: get_binlog .PHONY: sync_thrift ## sync_thrift : Sync thrift -# TODO(Drogon): Add build thrift sync_thrift: - $(V)rsync -avc $(THRIFT_DIR)/ rpc/thrift/ + $(V)rsync -avc $(THRIFT_DIR)/ pkg/rpc/thrift/ + $(V)$(MAKE) -C pkg/rpc/ gen_thrift .PHONY: ingest_binlog ## ingest_binlog : Build ingest_binlog binary @@ -98,7 +136,29 @@ get_lag: bin rows_parse: bin $(V)go build -o bin/rows_parse ./cmd/rows_parse +.PHONY: thrift_get_meta +## thrift_get_meta : Build thrift_get_meta binary +thrift_get_meta: bin + $(V)go build -o bin/thrift_get_meta ./cmd/thrift_get_meta + +.PHONY: metrics +## metrics : Build metrics binary +metrics: bin + $(V)go build -o bin/metrics ./cmd/metrics + .PHONY: todos ## todos : Print all todos todos: - $(V)grep -rnw . -e "TODO" | grep -v '^./rpc/thrift' | grep -v '^./.git' \ No newline at end of file + $(V)grep -rnw . -e "TODO" | grep -v '^./pkg/rpc/thrift' | grep -v '^./.git' + +.PHONY: tarball +## tarball : Archive files and release ccr-syncer-$(version)-$(platform).tar.xz +tarball: default + $(V)mkdir -p tarball/ccr-syncer-$(tarball_suffix)/{bin,db,doc,log} + $(V)cp CHANGELOG.md README.md LICENSE tarball/ccr-syncer-$(tarball_suffix)/ + $(V)cp bin/ccr_syncer tarball/ccr-syncer-$(tarball_suffix)/bin/ + $(V)cp shell/{enable_db_binlog.sh,start_syncer.sh,stop_syncer.sh} tarball/ccr-syncer-$(tarball_suffix)/bin/ + $(V)cp -r doc/* tarball/ccr-syncer-$(tarball_suffix)/doc/ + $(V)cd tarball/ && tar cfJ ccr-syncer-$(tarball_suffix).tar.xz ccr-syncer-$(tarball_suffix) + $(V)echo archive: tarball/ccr-syncer-$(tarball_suffix).tar.xz + diff --git a/build.sh b/build.sh old mode 100644 new mode 100755 diff --git a/cmd/ccr_syncer/ccr_syncer.go b/cmd/ccr_syncer/ccr_syncer.go index 2e08a097..64f997e7 100644 --- a/cmd/ccr_syncer/ccr_syncer.go +++ b/cmd/ccr_syncer/ccr_syncer.go @@ -3,6 +3,8 @@ package main import ( "flag" "fmt" + "net/http" + _ "net/http/pprof" "os" "sync" "syscall" @@ -17,6 +19,8 @@ import ( "github.com/selectdb/ccr_syncer/pkg/version" "github.com/selectdb/ccr_syncer/pkg/xerror" + "github.com/hashicorp/go-metrics" + "github.com/hashicorp/go-metrics/prometheus" log "github.com/sirupsen/logrus" ) @@ -29,6 +33,8 @@ type Syncer struct { Db_port int Db_user string Db_password string + Pprof bool + Ppof_port int } var ( @@ -49,6 +55,8 @@ func init() { flag.StringVar(&syncer.Host, "host", "127.0.0.1", "syncer host") flag.IntVar(&syncer.Port, "port", 9190, "syncer port") + flag.IntVar(&syncer.Ppof_port, "pprof_port", 6060, "pprof port used for memory analyze") + flag.BoolVar(&syncer.Pprof, "pprof", false, "use pprof or not") flag.Parse() utils.InitLog() @@ -74,6 +82,8 @@ func main() { db, err = storage.NewSQLiteDB(dbPath) case "mysql": db, err = storage.NewMysqlDB(syncer.Db_host, syncer.Db_port, syncer.Db_user, syncer.Db_password) + case "postgresql": + db, err = storage.NewPostgresqlDB(syncer.Db_host, syncer.Db_port, syncer.Db_user, syncer.Db_password) default: err = xerror.Wrap(err, xerror.Normal, "new meta db failed.") } @@ -82,7 +92,7 @@ func main() { } // Step 2: init factory - factory := ccr.NewFactory(rpc.NewRpcFactory(), ccr.NewMetaFactory(), base.NewSpecerFactory()) + factory := ccr.NewFactory(rpc.NewRpcFactory(), ccr.NewMetaFactory(), base.NewSpecerFactory(), ccr.DefaultThriftMetaFactory) // Step 3: create job manager && http service && checker hostInfo := fmt.Sprintf("%s:%d", syncer.Host, syncer.Port) @@ -116,7 +126,22 @@ func main() { checker.Start() }() - // Step 6: start signal mux + // Step 7: init metrics + sink, err := prometheus.NewPrometheusSink() + if err != nil { + log.Fatalf("new prometheus sink failed: %+v", err) + } + metrics.NewGlobal(metrics.DefaultConfig("ccr-metrics"), sink) + + // Step 8: start monitor + monitor := NewMonitor(jobManager) + wg.Add(1) + go func() { + defer wg.Done() + monitor.Start() + }() + + // Step 9: start signal mux // use closure to capture httpService, checker, jobManager signalHandler := func(signal os.Signal) bool { switch signal { @@ -126,6 +151,7 @@ func main() { httpService.Stop() checker.Stop() jobManager.Stop() + monitor.Stop() log.Info("all service stop") return true case syscall.SIGHUP: @@ -143,6 +169,18 @@ func main() { signalMux.Serve() }() - // Step 6: wait for all task done + // Step 10: start pprof + if syncer.Pprof == true { + wg.Add(1) + go func() { + defer wg.Done() + var pprof_info string = fmt.Sprintf("%s:%d", syncer.Host, syncer.Ppof_port) + if err := http.ListenAndServe(pprof_info, nil); err != nil { + log.Infof("start pprof failed on: %s, error : %+v", pprof_info, err) + } + }() + } + + // Step 11: wait for all task done wg.Wait() } diff --git a/cmd/ccr_syncer/monitor.go b/cmd/ccr_syncer/monitor.go new file mode 100644 index 00000000..cf63ec8d --- /dev/null +++ b/cmd/ccr_syncer/monitor.go @@ -0,0 +1,92 @@ +package main + +import ( + "runtime" + "strings" + "time" + + "github.com/selectdb/ccr_syncer/pkg/ccr" + log "github.com/sirupsen/logrus" +) + +const ( + MONITOR_DURATION = time.Second * 60 +) + +type Monitor struct { + jobManager *ccr.JobManager + stop chan struct{} +} + +func NewMonitor(jm *ccr.JobManager) *Monitor { + return &Monitor{ + jobManager: jm, + stop: make(chan struct{}), + } +} + +func (m *Monitor) dump() { + log.Infof("[GOROUTINE] Total = %v", runtime.NumGoroutine()) + + mb := func(b uint64) uint64 { + return b / 1024 / 1024 + } + + // see: https://golang.org/pkg/runtime/#MemStats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + liveObjects := stats.Mallocs - stats.Frees + log.Infof("[MEMORY STATS] Alloc = %v MiB, TotalAlloc = %v MiB, Sys = %v MiB, NumGC = %v, LiveObjects = %v", + mb(stats.Alloc), mb(stats.TotalAlloc), mb(stats.Sys), stats.NumGC, liveObjects) + + jobs := m.jobManager.ListJobs() + numJobs := len(jobs) + numRunning := 0 + numFullSync := 0 + numIncremental := 0 + numPartialSync := 0 + numTableSync := 0 + numDbSync := 0 + for _, job := range jobs { + if strings.HasPrefix(job.ProgressState, "DB") { + numDbSync += 1 + } else { + numTableSync += 1 + } + if job.State == "running" { + numRunning += 1 + if strings.Contains(job.ProgressState, "FullSync") { + numFullSync += 1 + } else if strings.Contains(job.ProgressState, "PartialSync") { + numPartialSync += 1 + } else if strings.Contains(job.ProgressState, "IncrementalSync") { + numIncremental += 1 + } + } + } + + log.Infof("[JOB STATS] Total = %v, Running = %v, DBSync = %v, TableSync = %v", + numJobs, numRunning, numDbSync, numTableSync) + log.Infof("[JOB STATUS] FullSync = %v, PartialSync = %v, IncrementalSync = %v", + numFullSync, numPartialSync, numIncremental) +} + +func (m *Monitor) Start() { + ticker := time.NewTicker(MONITOR_DURATION) + defer ticker.Stop() + + for { + select { + case <-m.stop: + log.Info("monitor stopped") + return + case <-ticker.C: + m.dump() + } + } +} + +func (m *Monitor) Stop() { + log.Info("monitor stopping") + close(m.stop) +} diff --git a/cmd/ingest_binlog/ingest_binlog.go b/cmd/ingest_binlog/ingest_binlog.go index 486bb38c..933e579e 100644 --- a/cmd/ingest_binlog/ingest_binlog.go +++ b/cmd/ingest_binlog/ingest_binlog.go @@ -108,12 +108,11 @@ func test_commit(t *base.Spec) { func test_ingest_be() { backend := base.Backend{ - Id: 10028, - Host: "127.0.0.1", - HeartbeatPort: 9050, - BePort: 9060, - HttpPort: 8040, - BrpcPort: 8060, + Id: 10028, + Host: "127.0.0.1", + BePort: 9060, + HttpPort: 8040, + BrpcPort: 8060, } rpcFactory := rpc.NewRpcFactory() rpc, err := rpcFactory.NewBeRpc(&backend) @@ -152,7 +151,7 @@ func test_ingrest_binlog(src *base.Spec, dest *base.Spec) { case "commit": test_commit(dest) case "abort": - panic("unkown abort action") + panic("unknown abort action") case "ingest_be": test_ingest_be() default: diff --git a/cmd/metrics/metrics_demo.go b/cmd/metrics/metrics_demo.go new file mode 100644 index 00000000..5c1a9b93 --- /dev/null +++ b/cmd/metrics/metrics_demo.go @@ -0,0 +1,31 @@ +package main + +import ( + "log" + "net/http" + "time" + + "github.com/hashicorp/go-metrics" + prometheussink "github.com/hashicorp/go-metrics/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func promHttp() { + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +func main() { + go promHttp() + sink, _ := prometheussink.NewPrometheusSink() + metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + metrics.SetGauge([]string{"foo"}, 42) + metrics.EmitKey([]string{"bar"}, 30) + metrics.IncrCounter([]string{"baz"}, 42) + metrics.IncrCounter([]string{"baz"}, 1) + metrics.IncrCounter([]string{"baz"}, 80) + metrics.AddSample([]string{"method", "wow"}, 42) + metrics.AddSample([]string{"method", "wow"}, 100) + metrics.AddSample([]string{"method", "wow"}, 22) + time.Sleep(10000000 * time.Second) +} diff --git a/cmd/snapshot_op/snapshot_op.go b/cmd/snapshot_op/snapshot_op.go index 43e52d63..15755820 100644 --- a/cmd/snapshot_op/snapshot_op.go +++ b/cmd/snapshot_op/snapshot_op.go @@ -103,7 +103,7 @@ func test_restore_snapshot(src *base.Spec, dest *base.Spec) { if err != nil { panic(err) } - restoreResp, err := destRpc.RestoreSnapshot(dest, nil, labelName, snapshotResp) + restoreResp, err := destRpc.RestoreSnapshot(dest, nil, labelName, snapshotResp, false, false) if err != nil { panic(err) } diff --git a/cmd/thrift_get_meta/thrift_get_meta.go b/cmd/thrift_get_meta/thrift_get_meta.go new file mode 100644 index 00000000..7135ff2d --- /dev/null +++ b/cmd/thrift_get_meta/thrift_get_meta.go @@ -0,0 +1,146 @@ +package main + +import ( + "encoding/json" + "flag" + + log "github.com/sirupsen/logrus" + + "github.com/selectdb/ccr_syncer/pkg/ccr" + "github.com/selectdb/ccr_syncer/pkg/ccr/base" + "github.com/selectdb/ccr_syncer/pkg/rpc" + "github.com/selectdb/ccr_syncer/pkg/utils" +) + +var ( + host string + port string + thriftPort string + user string + password string + dbName string + tableName string +) + +func init() { + flag.StringVar(&host, "host", "localhost", "host") + flag.StringVar(&port, "port", "9030", "port") + flag.StringVar(&thriftPort, "thrift_port", "9020", "thrift port") + flag.StringVar(&user, "user", "root", "user") + flag.StringVar(&password, "password", "", "password") + flag.StringVar(&dbName, "db", "ccr", "database name") + flag.StringVar(&tableName, "table", "src_1", "table name") + flag.Parse() + + utils.InitLog() +} + +func test_get_table_meta(m ccr.Metaer, spec *base.Spec) { + if dbId, err := m.GetDbId(); err != nil { + panic(err) + } else { + spec.DbId = dbId + log.Infof("found db: %s, dbId: %d", spec.Database, dbId) + } + + if tableId, err := m.GetTableId(spec.Table); err != nil { + panic(err) + } else { + spec.TableId = tableId + log.Infof("found table: %s, tableId: %d", spec.Table, tableId) + } + + rpcFactory := rpc.NewRpcFactory() + feRpc, err := rpcFactory.NewFeRpc(spec) + if err != nil { + panic(err) + } + + tableIds := make([]int64, 0) + tableIds = append(tableIds, spec.TableId) + result, err := feRpc.GetTableMeta(spec, tableIds) + if err != nil { + panic(err) + } + // toJson + s, err := json.Marshal(&result) + if err != nil { + panic(err) + } + log.Infof("found db meta: %s", s) + + thriftMeta, err := ccr.NewThriftMeta(spec, rpcFactory, tableIds) + if err != nil { + panic(err) + } + log.Infof("found thrift meta: %+v", thriftMeta) +} + +func test_get_db_meta(m ccr.Metaer, spec *base.Spec) { + if dbId, err := m.GetDbId(); err != nil { + panic(err) + } else { + spec.DbId = dbId + log.Infof("found db: %s, dbId: %d", spec.Database, dbId) + } + + rpcFactory := rpc.NewRpcFactory() + feRpc, err := rpcFactory.NewFeRpc(spec) + if err != nil { + panic(err) + } + + result, err := feRpc.GetDbMeta(spec) + if err != nil { + panic(err) + } + // toJson + s, err := json.Marshal(result) + if err != nil { + panic(err) + } + log.Infof("found db meta: %s", s) +} + +func test_get_backends(m ccr.Metaer, spec *base.Spec) { + rpcFactory := rpc.NewRpcFactory() + feRpc, err := rpcFactory.NewFeRpc(spec) + if err != nil { + panic(err) + } + + result, err := feRpc.GetBackends(spec) + if err != nil { + panic(err) + } + // toJson + s, err := json.Marshal(&result) + if err != nil { + panic(err) + } + log.Infof("found backends: %s", s) +} + +func main() { + src := &base.Spec{ + Frontend: base.Frontend{ + Host: host, + Port: port, + ThriftPort: thriftPort, + }, + User: user, + Password: password, + Database: dbName, + Table: tableName, + } + + metaFactory := ccr.NewMetaFactory() + meta := metaFactory.NewMeta(src) + + if tableName != "" { + test_get_table_meta(meta, src) + } else { + test_get_db_meta(meta, src) + } + test_get_backends(meta, src) +} diff --git a/devtools/issue_test/priv.sh b/devtools/issue_test/priv.sh new file mode 100755 index 00000000..6a750222 --- /dev/null +++ b/devtools/issue_test/priv.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +curl -X POST -H "Content-Type: application/json" -d '{ + "name": "priv_test", + "src": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "etl", + "password": "etl%2023", + "database": "tmp", + "table": "ccr_test_src" + }, + "dest": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "etl", + "password": "etl%2023", + "database": "tmp", + "table": "ccr_test_dst" + } +}' http://127.0.0.1:9190/create_ccr diff --git a/devtools/test_ccr_db_table_alias.sh b/devtools/test_ccr_db_table_alias.sh new file mode 100755 index 00000000..78879b89 --- /dev/null +++ b/devtools/test_ccr_db_table_alias.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +curl -X POST -H "Content-Type: application/json" -d '{ + "name": "ccr_db_table_alias", + "src": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "root", + "password": "", + "database": "ccr", + "table": "src_1" + }, + "dest": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "root", + "password": "", + "database": "dccr", + "table": "src_1_alias" + } +}' http://127.0.0.1:9190/create_ccr diff --git a/devtools/test_ccr_many_rows.sh b/devtools/test_ccr_many_rows.sh new file mode 100755 index 00000000..cf001c7c --- /dev/null +++ b/devtools/test_ccr_many_rows.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +curl -X POST -H "Content-Type: application/json" -d '{ + "name": "ccr_table_many_rows", + "src": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "root", + "password": "", + "database": "ccr", + "table": "many" + }, + "dest": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "root", + "password": "", + "database": "ccr", + "table": "many_alias" + } +}' http://127.0.0.1:9190/create_ccr diff --git a/devtools/test_ccr_table_alias.sh b/devtools/test_ccr_table_alias.sh index e11857d0..e3582def 100755 --- a/devtools/test_ccr_table_alias.sh +++ b/devtools/test_ccr_table_alias.sh @@ -19,5 +19,6 @@ curl -X POST -H "Content-Type: application/json" -d '{ "password": "", "database": "ccr", "table": "src_1_alias" - } + }, + "skip_error": false }' http://127.0.0.1:9190/create_ccr diff --git a/devtools/test_limit_speed.sh b/devtools/test_limit_speed.sh new file mode 100755 index 00000000..9d9306c3 --- /dev/null +++ b/devtools/test_limit_speed.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +curl -X POST -H "Content-Type: application/json" -d '{ + "name": "test_speed_limit", + "src": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "root", + "password": "", + "database": "ccr", + "table": "github_test_1" + }, + "dest": { + "host": "localhost", + "port": "9030", + "thrift_port": "9020", + "user": "root", + "password": "", + "database": "dccr", + "table": "github_test_1_sync" + } +}' http://127.0.0.1:9190/create_ccr diff --git a/devtools/update_job.sh b/devtools/update_job.sh new file mode 100755 index 00000000..3f0adc24 --- /dev/null +++ b/devtools/update_job.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +curl -X POST -H "Content-Type: application/json" -d '{ + "name": "ccr_test", + "skip": true +}' http://127.0.0.1:9190/update_job diff --git a/doc/operations.md b/doc/operations.md index a202fda6..3007208f 100644 --- a/doc/operations.md +++ b/doc/operations.md @@ -1,39 +1,124 @@ # Syncer操作列表 + ### 请求的通用模板 + ```bash curl -X POST -H "Content-Type: application/json" -d {json_body} http://ccr_syncer_host:ccr_syncer_port/operator ``` -json_body: 以json的格式发送操作所需信息 -operator:对应Syncer的不同操作 +- json_body: 以json的格式发送操作所需信息 +- operator:对应Syncer的不同操作 + ### operators -- create_ccr - 创建CCR任务,详见[README](../README.md) -- get_lag + +- `create_ccr` + 创建CCR任务,详见[README](../README.md)。 +- `get_lag` 查看同步进度 ```bash - curl -X POST -H "Content-Type: application/json" -d '{ + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ "name": "job_name" }' http://ccr_syncer_host:ccr_syncer_port/get_lag ``` 其中job_name是create_ccr时创建的name -- pause +- `pause` 暂停同步任务 ```bash - curl -X POST -H "Content-Type: application/json" -d '{ + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ "name": "job_name" }' http://ccr_syncer_host:ccr_syncer_port/pause ``` -- resume +- `resume` 恢复同步任务 ```bash - curl -X POST -H "Content-Type: application/json" -d '{ + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ "name": "job_name" }' http://ccr_syncer_host:ccr_syncer_port/resume ``` -- delete +- `delete` 删除同步任务 ```bash - curl -X POST -H "Content-Type: application/json" -d '{ + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ "name": "job_name" }' http://ccr_syncer_host:ccr_syncer_port/delete - ``` \ No newline at end of file + ``` +- `list_jobs` + 列出所有job名称 + ```bash + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{}' http://ccr_syncer_host:ccr_syncer_port/list_jobs + ``` +- `job_detail` + 展示job的详细信息 + ```bash + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ + "name": "job_name" + }' http://ccr_syncer_host:ccr_syncer_port/job_detail + ``` +- `job_progress` + 展示job的详细进度信息 + ```bash + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ + "name": "job_name" + }' http://ccr_syncer_host:ccr_syncer_port/job_progress + ``` +- `metrics` + 获取golang以及ccr job的metrics信息 + ```bash + curl -L --post303 http://ccr_syncer_host:ccr_syncer_port/metrics + ``` +- `update_host_mapping` + 更新上游 FE/BE 集群 private ip 到 public ip 的映射;如果参数中的 public ip 为空,则删除该 private 的映射 + ```bash + curl -X POST -L --post303 -H "Content-Type: application/json" -d '{ + "name": "job_name", + "src_host_mapping": { + "172.168.1.1": "10.0.10.1", + "172.168.1.2": "10.0.10.2", + "172.168.1.3": "10.0.10.3", + "172.168.1.5": "" + }, + "dest_host_mapping": { + ... + } + }' http://ccr_syncer_host:ccr_syncer_port/add_host_mapping + ``` + 更新上游 172.168.1.1-3 的映射,同时删除 172.168.1.5 的映射。 + - `src_host_mapping`: 上游映射 + - `dest_host_mapping`: 下游映射 + +### 一些特殊场景 + +#### 上下游通过公网 IP 进行同步 + +ccr syncer 支持将上下游部署到不同的网络环境中,并通过公网 IP 进行数据同步。 + +具体方案:每个 job 会记录下上游 private IP 到 public IP 的映射关系(由用户提供),并在下游载入 binlog 前,将上游集群 FE/BE 的 private 转换成对应的 public IP。 + +使用方式:创建 ccr job 时增加一个参数: +```bash +curl -X POST -H "Content-Type: application/json" -d '{ + "name": "ccr_test", + "src": { + "host_mapping": { + "172.168.1.1": "10.0.10.1", + "172.168.1.2": "10.0.10.2", + "172.168.1.3": "10.0.10.3" + }, + ... + }, + "dest": { + "host_mapping": { + "172.168.2.3": "10.0.10.9", + "172.168.2.4": "" + }, + ... + }, +}' http://127.0.0.1:9190/create_ccr +``` + +`host_mapping` 用法与 `/update_host_mapping` 接口一致。 + +> 注意:即使增加了 host_mapping 字段,**src/dest 中的 host 字段仍需要设置为 public ip**。 + +相关操作: +- 修改/删除/增加新映射,使用 `/update_host_mapping` 接口 +- 查看 job 的所有映射,使用 `/job_detail` 接口 diff --git a/doc/pprof.md b/doc/pprof.md new file mode 100644 index 00000000..d3377fad --- /dev/null +++ b/doc/pprof.md @@ -0,0 +1,19 @@ +# pprof使用介绍 + +## pprof简介 +pprof是golang语言中,用来分析性能的工具,pprof有4种profling: +1. CPU Profiling : CPU 性能分析 +2. memory Profiling : 程序的内存占用情况 +3. Block Profiling : goroutine 在等待共享资源花费的时间 +4. Mutex Profiling : 只记录因为锁竞争导致的等待或延迟 +目前CCR已经集成了pprof,可以用来分析CCR的性能。 + +## CCR中使用pprof的步骤 +1. 启动CCR进程时,可以通过sh shell/start_syncer.sh --pprof true --pprof_port 8080 --host x.x.x.x --daemon的方式打开pprof +2. 在浏览器中打开 http://x.x.x.x:8080/debug/pprof/ 即可看到profiling +3. 或者可以使用采样工具,通过更加图形化的方式来分析,此时可以在8080端口启动后,在ccr机器上执行 +``` go tool pprof -http=:9999 http://x.x.x.x:8080/debug/pprof/heap ``` +然后在浏览器打开 http://x.x.x.x:9999 即可看到采样图形化信息 +此处需要注意的是,如果无法开通端口,可以使用如下命令将采样信息保存到文件中,再将文件拉到本地使用浏览器打开: +``` curl http://localhost:8080/debug/pprof/heap?seconds=30 > heap.out ``` +``` go tool pprof heap.out ``` \ No newline at end of file diff --git a/doc/run-regression-test-en.md b/doc/run-regression-test-en.md new file mode 100644 index 00000000..72185fcd --- /dev/null +++ b/doc/run-regression-test-en.md @@ -0,0 +1,70 @@ +# Regression Test Considerations +## Steps to Run Tests +### 1. Copy Test and CCR Interface Libraries +The regression tests for CCR require the regression test framework from doris/regression-test. Therefore, when running tests, we need to move the tests and CCR interfaces to the doris/regression-test directory. + +Create a folder named ccr-syncer-test under the doris/regression-test/suites directory and copy the test files into this folder. Next, copy the files from ccr-syncer/regression-test/common to doris/regression-test/common. The framework for the tests is now set up. +### 2. Configure regression-conf.groovy (doris) +Add and configure the following in the configuration file based on the actual situation: +```bash +// JDBC configuration +jdbcUrl = "jdbc:mysql://127.0.0.1:9030/?" +targetJdbcUrl = "jdbc:mysql://127.0.0.1:9190/? +jdbcUser = "root" +jdbcPassword = "" + +feSourceThriftAddress = "127.0.0.1:9220" +feTargetThriftAddress = "127.0.0.1:9220" +syncerAddress = "127.0.0.1:9190" +feSyncerUser = "root" +feSyncerPassword = "" +feHttpAddress = "127.0.0.1:8330" + +// CCR configuration +ccrDownstreamUrl = "jdbc:mysql://172.19.0.2:9131/?" + +ccrDownstreamUser = "root" + +ccrDownstreamPassword = "" + +ccrDownstreamFeThriftAddress = "127.0.0.1:9020" +``` +### 3. Run the Tests +Ensure that at least one BE and FE are deployed for Doris and that CCR-Syncer is deployed before running the tests. +```bash +Run the tests using the Doris script +# --Run test cases with suiteName sql_action, currently suiteName equals the prefix of the file name, the example corresponds to the test file sql_action.groovy +./run-regression-test.sh --run sql_action +``` +The steps to run the tests are now complete. +## Steps to Write Test Cases +### 1. Create Test Files +Navigate to the ccr-syncer/regression-test/suites directory and create folders based on the synchronization level. For example, for the DB level, go to the db_sync folder. Further divide the folders based on the synchronization object. For example, for the column object, go to the column folder. Divide the folders based on the actions on the object. For example, for the rename action, create a rename folder. Create the test file in this folder with a name prefixed by test followed by the sequence of directories entered, e.g., test_ds_col_rename represents the synchronization test for renaming a column at the DB level. + +**Ensure there is only one test file in each smallest folder.** +### 2. Write the Test +CCR Interface Explanation: +``` + // Enable Binlog + helper.enableDbBinlog() + + // Functions for creating, deleting, pausing, and resuming tasks support an optional parameter. + // For example, to create a task. If empty, it defaults to creating a DB-level synchronization task with the target database as context.dbName. + helper.ccrJobCreate() + + // If not empty, it creates a table-level synchronization task with the target database as context.dbName, target table as tableName. + helper.ccrJobCreate(tableName) + + // Check if the SQL execution result matches the res_func function, where sql_type is "sql" (source cluster) or "target_sql" (target cluster), and time_out is the timeout duration. + helper.checkShowTimesOf(sql, res_func, time_out, sql_type) +``` +**注意事项** +``` +1. Two clusters will be created during the test: SQL is sent to the upstream cluster, and target_sql is sent to the downstream cluster. Use target_sql for operations involving the target cluster. + +2. Ensure the source database is not empty when creating a task, otherwise, the task creation will fail. + +3. Perform checks on both upstream and downstream before and after modifying objects to ensure correctness. + +4. Ensure the length of the automatically created dbName does not exceed 64. +``` \ No newline at end of file diff --git a/doc/run-regression-test-zh.md b/doc/run-regression-test-zh.md new file mode 100644 index 00000000..f2dcbda7 --- /dev/null +++ b/doc/run-regression-test-zh.md @@ -0,0 +1,68 @@ +# 回归测试注意事项 +## 运行测试的步骤 +### 1. 复制测试及 ccr 接口库 +CCR 的回归测试需要用到 doris/regression-test 的回归测试框架, 所以我们运行测试时需要将测试和 ccr 接口迁移到doris/regression-test 目录下
+在 doris/regression-test/suites 目录下建立文件夹 ccr-syncer-test, 将测试文件复制到此文件夹, 其次将 ccr-syncer/regression-test/common 下的文件复制到 doris/regression-test/comman 目录下, 至此测试前的框架已经搭好 +### 2. 配置 regression-conf.groovy +根据实际情况在配置文件中添加如下并配置 jdbc fe ccr +```bash +// Jdbc配置 +jdbcUrl = "jdbc:mysql://127.0.0.1:9030/?" +targetJdbcUrl = "jdbc:mysql://127.0.0.1:9190/? +jdbcUser = "root" +jdbcPassword = "" + +feSourceThriftAddress = "127.0.0.1:9020" +feTargetThriftAddress = "127.0.0.1:9020" +syncerAddress = "127.0.0.1:9190" +feSyncerUser = "root" +feSyncerPassword = "" +feHttpAddress = "127.0.0.1:8030" + +// ccr配置 +ccrDownstreamUrl = "jdbc:mysql://172.19.0.2:9131/?" + +ccrDownstreamUser = "root" + +ccrDownstreamPassword = "" + +ccrDownstreamFeThriftAddress = "127.0.0.1:9020" +``` +### 3. 运行测试 +在运行测试前确保 doris 至少一个 be, fe 部署完成, 确保 ccr-syncer 部署完成 +```bash +使用 doris 脚本运行测试 +# --测试suiteName为sql_action的用例, 目前suiteName等于文件名前缀, 例子对应的用例文件是sql_action.groovy +./run-regression-test.sh --run sql_action +``` +至此运行测试的步骤已完成 +## 编写测试用例的步骤 +### 1. 创建测试文件 +进入 ccr-syncer/regressioon-test/suites 目录, 根据同步级别划分文件夹, 以db级别为例, 进入 db_sync 文件夹, 根据同步对象划分文件夹, 以 column 为例, 进入 column 文件夹, 根据对对象的行为划分文件夹, 以rename为例, 创建 rename 文件夹, 在此文件夹下创建测试, 文件名为 test 前缀加依次进入目录的顺序, 例如 test_ds_col_rename 代表在db级别下 rename column 的同步测试 +**确保在每个最小文件夹下只有一个测试文件** +### 2. 编写测试 +ccr 接口说明 +``` + // 开启Binlog + helper.enableDbBinlog() + + // 创建、删除、暂停、恢复任务等函数支持一个可选参数。 + // 以创建任务为例, 参数为 tableName, 参数为空时, 默认创建db级别同步任务, 目标数据库为context.dbName + helper.ccrJobCreate() + + // 不为空时创建 tbl 级别同步任务, 目标数据库为context.dbName, 目标表为 tableName + helper.ccrJobCreate(tableName) + + // 检测 sql 运行结果是否符合 res_func函数, sql_type 为 "sql" (源集群) 或 "target_sql" (目标集群), time_out 为超时时间 + helper.checkShowTimesOf(sql, res_func, time_out, sql_type) +``` +**注意事项** +``` +1. 测试时会建两个集群, sql 发给上游集群, target_sql 发给下游集群, 涉及到目标集群的需要用 target_sql + +2. 创建任务时确保源数据库不为空, 否则创建任务会失败 + +3. 在修改对象前后都需要对上下游进行 check 保证结果正确 + +4. 确保测试自动创建的 dbName 的长度不超过 64 +``` \ No newline at end of file diff --git a/doc/start_syncer.md b/doc/start_syncer.md index e19cce8b..2a14b4fb 100644 --- a/doc/start_syncer.md +++ b/doc/start_syncer.md @@ -23,12 +23,12 @@ bash bin/start_syncer.sh --daemon ``` ### --db_type -Syncer目前能够使用两种数据库来保存自身的元数据,分别为`sqlite3`(对应本地存储)和`mysql`(本地或远端存储) +Syncer目前能够使用两种数据库来保存自身的元数据,分别为`sqlite3`(对应本地存储)和`mysql` 或者`postgresql`(本地或远端存储) ```bash bash bin/start_syncer.sh --db_type mysql ``` 默认值为sqlite3 -在使用mysql存储元数据时,Syncer会使用`CREATE IF NOT EXISTS`来创建一个名为`ccr`的库,ccr相关的元数据表都会保存在其中 +在使用mysql或者postgresql存储元数据时,Syncer会使用`CREATE IF NOT EXISTS`来创建一个名为`ccr`的库,ccr相关的元数据表都会保存在其中 ### --db_dir **这个选项仅在db使用`sqlite3`时生效** @@ -38,7 +38,7 @@ bash bin/start_syncer.sh --db_dir /path/to/ccr.db ``` 默认路径为`SYNCER_OUTPUT_DIR/db`,文件名为`ccr.db` ### --db_host & db_port & db_user & db_password -**这个选项仅在db使用`mysql`时生效** +**这个选项仅在db使用`mysql`或者`postgresql`时生效** ```bash bash bin/start_syncer.sh --db_host 127.0.0.1 --db_port 3306 --db_user root --db_password "qwe123456" ``` @@ -54,7 +54,7 @@ bash bin/start_syncer.sh --log_dir /path/to/ccr_syncer.log ```bash bash bin/start_syncer.sh --log_level info ``` -日志的格式如下,其中hook只会在`log_level > info`的时候打印: + ``` # time level msg hooks [2023-07-18 16:30:18] TRACE This is trace type. ccrName=xxx line=xxx @@ -80,4 +80,32 @@ pid文件是stop_syncer.sh脚本用于关闭Syncer的凭据,里面保存了对 ```bash bash bin/start_syncer.sh --pid_dir /path/to/pids ``` -默认值为`SYNCER_OUTPUT_DIR/bin` \ No newline at end of file +默认值为`SYNCER_OUTPUT_DIR/bin` + +### --commit_txn_timeout +用于指定提交事务超时时间 +```bash +bash bin/start_syncer.sh --commit_txn_timeout 33s +``` +默认值为33s + +### --connect_timeout duration +用于指定连接超时时间 +```bash +bash bin/start_syncer.sh --connect_timeout 10s +``` +默认值为1s + +### --local_repo_name string +用于指定本地仓库名称 +```bash +bash bin/start_syncer.sh --local_repo_name "repo_name" +``` +默认值为"" + +### --rpc_timeout duration +用于指定rpc超时时间 +```bash +bash bin/start_syncer.sh --rpc_timeout 30s +``` +默认值为3s \ No newline at end of file diff --git a/go.mod b/go.mod index 5e4dbb67..9958b4dc 100644 --- a/go.mod +++ b/go.mod @@ -3,73 +3,80 @@ module github.com/selectdb/ccr_syncer go 1.20 require ( - github.com/apache/thrift v0.13.0 - github.com/cloudwego/kitex v0.6.2-0.20230814131251-645fec2e4585 - github.com/go-sql-driver/mysql v1.7.0 + github.com/apache/thrift v0.19.0 + github.com/cloudwego/kitex v0.8.0 + github.com/go-sql-driver/mysql v1.7.1 + github.com/hashicorp/go-metrics v0.5.3 github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1 - github.com/mattn/go-sqlite3 v1.14.17 + github.com/mattn/go-sqlite3 v1.14.22 github.com/modern-go/gls v0.0.0-20220109145502-612d0167dce5 - github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.9.0 + github.com/prometheus/client_golang v1.18.0 + github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.8.4 github.com/t-tomalak/logrus-prefixed-formatter v0.5.2 - github.com/tidwall/btree v1.6.0 - go.uber.org/mock v0.2.0 - go.uber.org/zap v1.24.0 - golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df + github.com/tidwall/btree v1.7.0 + go.uber.org/mock v0.4.0 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) // dependabot -require golang.org/x/net v0.17.0 // indirect; https://github.com/selectdb/ccr-syncer/security/dependabot/2 +require golang.org/x/net v0.21.0 // indirect; https://github.com/selectdb/ccr-syncer/security/dependabot/2 require ( - github.com/bytedance/gopkg v0.0.0-20230728082804-614d0af6619b // indirect - github.com/bytedance/sonic v1.9.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect - github.com/chenzhuoyu/iasm v0.9.0 // indirect - github.com/choleraehyq/pid v0.0.17 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bufbuild/protocompile v0.8.0 // indirect + github.com/bytedance/gopkg v0.0.0-20240202110943-5e26950c5e57 // indirect + github.com/bytedance/sonic v1.11.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/choleraehyq/pid v0.0.18 // indirect github.com/cloudwego/configmanager v0.2.0 // indirect - github.com/cloudwego/dynamicgo v0.1.2 // indirect + github.com/cloudwego/dynamicgo v0.2.0 // indirect github.com/cloudwego/fastpb v0.0.4 // indirect - github.com/cloudwego/frugal v0.1.7 // indirect + github.com/cloudwego/frugal v0.1.13 // indirect github.com/cloudwego/localsession v0.0.2 // indirect - github.com/cloudwego/netpoll v0.4.1 // indirect - github.com/cloudwego/thriftgo v0.3.0 // indirect + github.com/cloudwego/netpoll v0.5.1 // indirect + github.com/cloudwego/thriftgo v0.3.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 // indirect - github.com/iancoleman/strcase v0.2.0 // indirect - github.com/jhump/protoreflect v1.8.2 // indirect + github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/jhump/protoreflect v1.15.6 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oleiade/lane v1.0.1 // indirect github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/gomega v1.27.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/tidwall/gjson v1.9.3 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.47.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/tidwall/gjson v1.17.1 // indirect github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/x-cray/logrus-prefixed-formatter v0.5.2 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/arch v0.2.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384 // indirect - google.golang.org/protobuf v1.28.1 // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 402b0b4a..8e6bddf0 100644 --- a/go.sum +++ b/go.sum @@ -4,80 +4,106 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/brianvoe/gofakeit/v6 v6.16.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= +github.com/bufbuild/protocompile v0.8.0 h1:9Kp1q6OkS9L4nM3FYbr8vlJnEwtbpDPQlQOVXfR+78s= +github.com/bufbuild/protocompile v0.8.0/go.mod h1:+Etjg4guZoAqzVk2czwEQP12yaxLJ8DxuqCJ9qHdH94= github.com/bytedance/gopkg v0.0.0-20220413063733-65bf48ffb3a7/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q= github.com/bytedance/gopkg v0.0.0-20220509134931-d1878f638986/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q= github.com/bytedance/gopkg v0.0.0-20220531084716-665b4f21126f/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q= github.com/bytedance/gopkg v0.0.0-20230531144706-a12972768317/go.mod h1:FtQG3YbQG9L/91pbKSw787yBQPutC+457AvDW77fgUQ= -github.com/bytedance/gopkg v0.0.0-20230728082804-614d0af6619b h1:R6PWoQtxEMpWJPHnpci+9LgFxCS7iJCfOGBvCgZeTKI= github.com/bytedance/gopkg v0.0.0-20230728082804-614d0af6619b/go.mod h1:FtQG3YbQG9L/91pbKSw787yBQPutC+457AvDW77fgUQ= -github.com/bytedance/mockey v1.2.0 h1:847+X2fBSM4s/AIN4loO5d16PCgEj53j7Q8YVB+8P6c= +github.com/bytedance/gopkg v0.0.0-20240202110943-5e26950c5e57 h1:lXHfN6aablmJUX76DO3BuathM5+9gftKx/iFv1RLqcg= +github.com/bytedance/gopkg v0.0.0-20240202110943-5e26950c5e57/go.mod h1:FtQG3YbQG9L/91pbKSw787yBQPutC+457AvDW77fgUQ= github.com/bytedance/mockey v1.2.0/go.mod h1:+Jm/fzWZAuhEDrPXVjDf/jLM2BlLXJkwk94zf2JZ3X4= +github.com/bytedance/mockey v1.2.7 h1:8j4yCqS5OmMe2dQCxPit4FVkwTK9nrykIgbOZN3s28o= +github.com/bytedance/mockey v1.2.7/go.mod h1:bNrUnI1u7+pAc0TYDgPATM+wF2yzHxmNH+iDXg4AOCU= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/bytedance/sonic v1.11.0 h1:FwNNv6Vu4z2Onf1++LNzxB/QhitD8wuTdpZzMTGITWo= +github.com/bytedance/sonic v1.11.0/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= github.com/chenzhuoyu/iasm v0.0.0-20220818063314-28c361dae733/go.mod h1:wOQ0nsbeOLa2awv8bUYFW/EHXbjQMlZ10fAlXDB2sz8= github.com/chenzhuoyu/iasm v0.0.0-20230222070914-0b1b64b0e762/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= -github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= +github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/choleraehyq/pid v0.0.13/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U= github.com/choleraehyq/pid v0.0.15/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U= github.com/choleraehyq/pid v0.0.16/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U= -github.com/choleraehyq/pid v0.0.17 h1:BLBfHTllp2nRRbZ/cOFHKlx9oWJuMwKmp7GqB5d58Hk= github.com/choleraehyq/pid v0.0.17/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U= +github.com/choleraehyq/pid v0.0.18 h1:O7LLxPoOyt3YtonlCC8BmNrF9P6Hc8B509UOqlPSVhw= +github.com/choleraehyq/pid v0.0.18/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/configmanager v0.2.0 h1:niVpVg+wQ+npNqnH3dup96SMbR02Pk+tNErubYCJqKo= github.com/cloudwego/configmanager v0.2.0/go.mod h1:FLIQTjxsZRGjnmDhTttWQTy6f6DghPTatfBVOs2gQLk= github.com/cloudwego/dynamicgo v0.1.0/go.mod h1:Mdsz0XGsIImi15vxhZaHZpspNChEmBMIiWkUfD6JDKg= -github.com/cloudwego/dynamicgo v0.1.2 h1:t5KMzo/UkT002n3EvGI0Y6+Me73NGDzFI/AQlT1LQME= -github.com/cloudwego/dynamicgo v0.1.2/go.mod h1:AdPqyFN+0+fc3iVSSWojDCnOGPkzH+T0rI65017GCUA= +github.com/cloudwego/dynamicgo v0.1.6/go.mod h1:WzbIYLbhR4tjUhEMmRZRNIQXZu5J18oPurGDj5UmU9I= +github.com/cloudwego/dynamicgo v0.2.0 h1:2mIqwYjS4TvjIov+dV5/y4OO33x/YMdfaeiRgXiineg= +github.com/cloudwego/dynamicgo v0.2.0/go.mod h1:zTbRLRyBdP+OLalvkiwWPnvg84v1UungzT7iuL/2Qgc= github.com/cloudwego/fastpb v0.0.3/go.mod h1:/V13XFTq2TUkxj2qWReV8MwfPC4NnPcy6FsrojnsSG0= github.com/cloudwego/fastpb v0.0.4 h1:/ROVVfoFtpfc+1pkQLzGs+azjxUbSOsAqSY4tAAx4mg= github.com/cloudwego/fastpb v0.0.4/go.mod h1:/V13XFTq2TUkxj2qWReV8MwfPC4NnPcy6FsrojnsSG0= github.com/cloudwego/frugal v0.1.3/go.mod h1:b981ViPYdhI56aFYsoMjl9kv6yeqYSO+iEz2jrhkCgI= github.com/cloudwego/frugal v0.1.6/go.mod h1:9ElktKsh5qd2zDBQ5ENhPSQV7F2dZ/mXlr1eaZGDBFs= -github.com/cloudwego/frugal v0.1.7 h1:Ggyk8mk0WrhBlM4g4RJxdOcVWJl/Hxbd8NJ19J8My6c= -github.com/cloudwego/frugal v0.1.7/go.mod h1:3VECBCSiTYwm3QApqHXjZB9NDH+8hUw7txxlr+6pPb4= +github.com/cloudwego/frugal v0.1.12/go.mod h1:zFBA63ne4+Tz4qayRZFZf+ZVwGqTzb+1Xe3ZDCq+Wfc= +github.com/cloudwego/frugal v0.1.13 h1:s2G93j/DqANEUnYpvdf3mz760yGdCGs5o3js7dNU4Ig= +github.com/cloudwego/frugal v0.1.13/go.mod h1:zFBA63ne4+Tz4qayRZFZf+ZVwGqTzb+1Xe3ZDCq+Wfc= github.com/cloudwego/kitex v0.3.2/go.mod h1:/XD07VpUD9VQWmmoepASgZ6iw//vgWikVA9MpzLC5i0= github.com/cloudwego/kitex v0.4.4/go.mod h1:3FcH5h9Qw+dhRljSzuGSpWuThttA8DvK0BsL7HUYydo= github.com/cloudwego/kitex v0.6.1/go.mod h1:zI1GBrjT0qloTikcCfQTgxg3Ws+yQMyaChEEOcGNUvA= -github.com/cloudwego/kitex v0.6.2-0.20230814131251-645fec2e4585 h1:PHWx7esQA/VEsVJEPuNL8jFigLIfHQdug62BkagS4xI= -github.com/cloudwego/kitex v0.6.2-0.20230814131251-645fec2e4585/go.mod h1:RVWi+MbiPzI0Gi7fz8KZp+zsxB1/pLJZkr4kEwAuX6k= +github.com/cloudwego/kitex v0.8.0 h1:eL6Xb2vnHfOjvDqmPsvCuheDo513lOc1HG6hSHGiFyM= +github.com/cloudwego/kitex v0.8.0/go.mod h1:5o98nYKp8GwauvA1hhJwTA3YQcPa8Nu5tx+2j+JjwoM= github.com/cloudwego/localsession v0.0.2 h1:N9/IDtCPj1fCL9bCTP+DbXx3f40YjVYWcwkJG0YhQkY= github.com/cloudwego/localsession v0.0.2/go.mod h1:kiJxmvAcy4PLgKtEnPS5AXed3xCiXcs7Z+KBHP72Wv8= github.com/cloudwego/netpoll v0.2.4/go.mod h1:1T2WVuQ+MQw6h6DpE45MohSvDTKdy2DlzCx2KsnPI4E= github.com/cloudwego/netpoll v0.3.1/go.mod h1:1T2WVuQ+MQw6h6DpE45MohSvDTKdy2DlzCx2KsnPI4E= github.com/cloudwego/netpoll v0.4.0/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ= -github.com/cloudwego/netpoll v0.4.1 h1:/pGsY7Rs09KqEXEniB9fcsEWfi1iY+66bKUO3/NO6hc= -github.com/cloudwego/netpoll v0.4.1/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ= +github.com/cloudwego/netpoll v0.5.1 h1:zDUF7xF0C97I10fGlQFJ4jg65khZZMUvSu/TWX44Ohc= +github.com/cloudwego/netpoll v0.5.1/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ= github.com/cloudwego/thriftgo v0.1.2/go.mod h1:LzeafuLSiHA9JTiWC8TIMIq64iadeObgRUhmVG1OC/w= github.com/cloudwego/thriftgo v0.2.4/go.mod h1:8i9AF5uDdWHGqzUhXDlubCjx4MEfKvWXGQlMWyR0tM4= github.com/cloudwego/thriftgo v0.2.7/go.mod h1:8i9AF5uDdWHGqzUhXDlubCjx4MEfKvWXGQlMWyR0tM4= github.com/cloudwego/thriftgo v0.2.11/go.mod h1:dAyXHEmKXo0LfMCrblVEY3mUZsdeuA5+i0vF5f09j7E= -github.com/cloudwego/thriftgo v0.3.0 h1:BBb9hVcqmu9p4iKUP/PSIaDB21Vfutgd7k2zgK37Q9Q= -github.com/cloudwego/thriftgo v0.3.0/go.mod h1:AvH0iEjvKHu3cdxG7JvhSAaffkS4h2f4/ZxpJbm48W4= +github.com/cloudwego/thriftgo v0.3.3/go.mod h1:29ukiySoAMd0vXMYIduAY9dph/7dmChvOS11YLotFb8= +github.com/cloudwego/thriftgo v0.3.6 h1:gHHW8Ag3cAEQ/awP4emTJiRPr5yQjbANhcsmV8/Epbw= +github.com/cloudwego/thriftgo v0.3.6/go.mod h1:29ukiySoAMd0vXMYIduAY9dph/7dmChvOS11YLotFb8= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= @@ -95,13 +121,19 @@ github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -109,6 +141,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -128,27 +161,44 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 h1:mpL/HvfIgIejhVwAfxBQkwEjlhP5o0O9RAeTAjpwzxc= github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= +github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/jhump/protoreflect v1.8.2 h1:k2xE7wcUomeqwY0LDCYA16y4WWfyTcMx5mKhk0d4ua0= github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jhump/protoreflect v1.15.6 h1:WMYJbw2Wo+KOWwZFvgY0jMoVHM6i4XIvRs2RcBj5VmI= +github.com/jhump/protoreflect v1.15.6/go.mod h1:jCHoyYQIJnaabEYnbGwyo9hUqfyUMTbJw/tAut5t97E= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1 h1:JL2rWnBX8jnbHHlLcLde3BBWs+jzqZvOmF+M3sXoNOE= @@ -157,30 +207,40 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= -github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/gls v0.0.0-20220109145502-612d0167dce5 h1:uiS4zKYKJVj5F3ID+5iylfKPsEQmBEOucSD9Vgmn0i0= github.com/modern-go/gls v0.0.0-20220109145502-612d0167dce5/go.mod h1:I8AX+yW//L8Hshx6+a1m3bYkwXkpsVjA2795vP4f4oQ= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -195,30 +255,56 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -231,14 +317,17 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/t-tomalak/logrus-prefixed-formatter v0.5.2 h1:m4hdfSF9f2R5imvZJzEzit4Sm9i12JgXEZCIrTTrBL4= github.com/t-tomalak/logrus-prefixed-formatter v0.5.2/go.mod h1:koTBrtn4EvuRvh8ay81sCRdAqXhys32PXxMjJbe0FO0= github.com/thrift-iterator/go v0.0.0-20190402154806-9b5a67519118/go.mod h1:60PRwE/TCI1UqLvn8v2pwAf6+yzTPLP/Ji5xaesWDqk= -github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= -github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/v2pro/plz v0.0.0-20221028024117-e5f9aec5b631/go.mod h1:3gacX+hQo+xvl0vtLqCMufzxuNCwt4geAVOMt2LQYfE= @@ -252,35 +341,30 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= -go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.0.0-20220722155209-00200b7164a7/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.2.0 h1:W1sUEHXiJTfjaFJ5SLo0N6lZn+0eO5gWD1MFeTGqQEY= golang.org/x/arch v0.2.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -310,9 +394,11 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -325,28 +411,33 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -370,20 +461,22 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -422,14 +515,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384 h1:z+j74wi4yV+P7EtK9gPLGukOk7mFOy9wMQaC0wNb7eY= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -443,11 +538,14 @@ google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX7 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -455,8 +553,10 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/ccr/base/backend.go b/pkg/ccr/base/backend.go index 2878de17..cca459d0 100644 --- a/pkg/ccr/base/backend.go +++ b/pkg/ccr/base/backend.go @@ -3,17 +3,16 @@ package base import "fmt" type Backend struct { - Id int64 - Host string - HeartbeatPort uint16 - BePort uint16 - HttpPort uint16 - BrpcPort uint16 + Id int64 + Host string + BePort uint16 + HttpPort uint16 + BrpcPort uint16 } // Backend Stringer func (b *Backend) String() string { - return fmt.Sprintf("Backend: {Id: %d, Host: %s, HeartbeatPort: %d, BePort: %d, HttpPort: %d, BrpcPort: %d}", b.Id, b.Host, b.HeartbeatPort, b.BePort, b.HttpPort, b.BrpcPort) + return fmt.Sprintf("Backend: {Id: %d, Host: %s, BePort: %d, HttpPort: %d, BrpcPort: %d}", b.Id, b.Host, b.BePort, b.HttpPort, b.BrpcPort) } func (b *Backend) GetHttpPortStr() string { diff --git a/pkg/ccr/base/pool.go b/pkg/ccr/base/pool.go index ddca4614..3ac77ad8 100644 --- a/pkg/ccr/base/pool.go +++ b/pkg/ccr/base/pool.go @@ -46,5 +46,3 @@ func GetMysqlDB(dsn string) (*sql.DB, error) { return db, nil } } - -// TODO: 添加超时和Ping检测 diff --git a/pkg/ccr/base/spec.go b/pkg/ccr/base/spec.go index 08ddfbae..94ddceba 100644 --- a/pkg/ccr/base/spec.go +++ b/pkg/ccr/base/spec.go @@ -3,21 +3,30 @@ package base import ( "database/sql" "fmt" + "regexp" "strconv" "strings" "time" _ "github.com/go-sql-driver/mysql" + "github.com/selectdb/ccr_syncer/pkg/ccr/record" "github.com/selectdb/ccr_syncer/pkg/utils" "github.com/selectdb/ccr_syncer/pkg/xerror" + log "github.com/sirupsen/logrus" - "go.uber.org/zap" ) +var ErrRestoreSignatureNotMatched = xerror.NewWithoutStack(xerror.Normal, "The signature is not matched, the table already exist but with different schema") +var ErrBackupTableNotFound = xerror.NewWithoutStack(xerror.Normal, "backup table not found") +var ErrBackupPartitionNotFound = xerror.NewWithoutStack(xerror.Normal, "backup partition not found") + const ( BACKUP_CHECK_DURATION = time.Second * 3 RESTORE_CHECK_DURATION = time.Second * 3 MAX_CHECK_RETRY_TIMES = 86400 // 3 day + SIGNATURE_NOT_MATCHED = "already exist but with different schema" + + FE_CONFIG_ENABLE_RESTORE_SNAPSHOT_COMPRESSION = "enable_restore_snapshot_rpc_compression" ) type BackupState int @@ -94,13 +103,109 @@ func _parseRestoreState(state string) RestoreState { } } +type RestoreInfo struct { + State RestoreState + StateStr string + Label string + Status string + Timestamp string + ReplicationNum int64 + CreateTime string // 2024-10-22 06:29:27 +} + +func parseRestoreInfo(parser *utils.RowParser) (*RestoreInfo, error) { + restoreStateStr, err := parser.GetString("State") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse restore State failed") + } + + label, err := parser.GetString("Label") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse restore Label failed") + } + + restoreStatus, err := parser.GetString("Status") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse restore Status failed") + } + + timestamp, err := parser.GetString("Timestamp") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse restore Timestamp failed") + } + + replicationNum, err := parser.GetInt64("ReplicationNum") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse restore ReplicationNum failed") + } + + createTime, err := parser.GetString("CreateTime") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse restore CreateTime failed") + } + + info := &RestoreInfo{ + State: _parseRestoreState(restoreStateStr), + StateStr: restoreStateStr, + Label: label, + Status: restoreStatus, + Timestamp: timestamp, + ReplicationNum: replicationNum, + CreateTime: createTime, + } + return info, nil +} + +type BackupInfo struct { + State BackupState + StateStr string + SnapshotName string + Status string + CreateTime string // 2024-10-22 06:27:06 +} + +func parseBackupInfo(parser *utils.RowParser) (*BackupInfo, error) { + stateStr, err := parser.GetString("State") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse backup State failed") + } + + snapshotName, err := parser.GetString("SnapshotName") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse backup SnapshotName failed") + } + + createTime, err := parser.GetString("CreateTime") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse backup CreateTime failed") + } + + status, err := parser.GetString("Status") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "parse backup Status failed") + } + + info := &BackupInfo{ + State: ParseBackupState(stateStr), + StateStr: stateStr, + SnapshotName: snapshotName, + CreateTime: createTime, + Status: status, + } + return info, nil +} + type Frontend struct { Host string `json:"host"` Port string `json:"port"` ThriftPort string `json:"thrift_port"` + IsMaster bool `json:"is_master"` +} + +func (f *Frontend) String() string { + return fmt.Sprintf("host: %s, port: %s, thrift_port: %s, is_master: %v", f.Host, f.Port, f.ThriftPort, f.IsMaster) } -// TODO(Drogon): timeout config type Spec struct { // embed Frontend as current master frontend Frontend @@ -115,6 +220,9 @@ type Spec struct { Table string `json:"table"` TableId int64 `json:"table_id"` + // The mapping of host private and public ip + HostMapping map[string]string `json:"host_mapping,omitempty"` + observers []utils.Observer[SpecEvent] } @@ -158,10 +266,6 @@ func (s *Spec) Valid() error { return nil } -func (s *Spec) IsSameHostDB(dest *Spec) bool { - return s.Host == dest.Host && s.Port == dest.Port && s.ThriftPort == dest.ThriftPort && s.Database == dest.Database -} - func (s *Spec) connect(dsn string) (*sql.DB, error) { return GetMysqlDB(dsn) } @@ -196,7 +300,7 @@ func (s *Spec) IsDatabaseEnableBinlog() (bool, error) { } var createDBString string - query := fmt.Sprintf("SHOW CREATE DATABASE %s", s.Database) + query := fmt.Sprintf("SHOW CREATE DATABASE %s", utils.FormatKeywordName(s.Database)) rows, err := db.Query(query) if err != nil { return false, xerror.Wrap(err, xerror.Normal, query) @@ -234,7 +338,7 @@ func (s *Spec) IsTableEnableBinlog() (bool, error) { } var createTableString string - query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", s.Database, s.Table) + query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", utils.FormatKeywordName(s.Database), utils.FormatKeywordName(s.Table)) rows, err := db.Query(query) if err != nil { return false, xerror.Wrap(err, xerror.Normal, query) @@ -256,13 +360,50 @@ func (s *Spec) IsTableEnableBinlog() (bool, error) { return false, xerror.Wrap(err, xerror.Normal, query) } - log.Infof("table %s.%s create string: %s", s.Database, s.Table, createTableString) + log.Tracef("table %s.%s create string: %s", s.Database, s.Table, createTableString) // check "binlog.enable" = "true" in create table string binlogEnableString := `"binlog.enable" = "true"` return strings.Contains(createTableString, binlogEnableString), nil } +func (s *Spec) IsEnableRestoreSnapshotCompression() (bool, error) { + log.Debugf("check frontend enable restore snapshot compression") + + db, err := s.Connect() + if err != nil { + return false, err + } + + sql := fmt.Sprintf("SHOW FRONTEND CONFIG LIKE '%s'", FE_CONFIG_ENABLE_RESTORE_SNAPSHOT_COMPRESSION) + rows, err := db.Query(sql) + if err != nil { + return false, xerror.Wrap(err, xerror.Normal, "show frontend config failed") + } + defer rows.Close() + + enableCompress := false + if rows.Next() { + rowParser := utils.NewRowParser() + if err := rowParser.Parse(rows); err != nil { + return false, xerror.Wrap(err, xerror.Normal, "parse show frontend config result failed") + } + value, err := rowParser.GetString("Value") + if err != nil { + return false, xerror.Wrap(err, xerror.Normal, "parse show frontend config Value failed") + } + enableCompress = strings.ToLower(value) == "true" + } + + if err := rows.Err(); err != nil { + return false, xerror.Wrapf(err, xerror.Normal, + "check frontend enable restore snapshot compress, sql: %s", sql) + } + + log.Debugf("frontend enable restore snapshot compression: %t", enableCompress) + return enableCompress, nil +} + func (s *Spec) GetAllTables() ([]string, error) { log.Debugf("get all tables in database %s", s.Database) @@ -289,10 +430,122 @@ func (s *Spec) GetAllTables() ([]string, error) { } tables = append(tables, table) } + + if err := rows.Err(); err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "SHOW TABLES") + } + return tables, nil } -func (s *Spec) dropTable(table string) error { +func (s *Spec) queryResult(querySQL string, queryColumn string, errMsg string) ([]string, error) { + db, err := s.ConnectDB() + if err != nil { + return nil, err + } + + rows, err := db.Query(querySQL) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, querySQL+" failed") + } + defer rows.Close() + + var results []string + for rows.Next() { + rowParser := utils.NewRowParser() + if err := rowParser.Parse(rows); err != nil { + return nil, xerror.Wrap(err, xerror.Normal, errMsg) + } + result, err := rowParser.GetString(queryColumn) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, errMsg) + } + results = append(results, result) + } + + if err := rows.Err(); err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "query result failed, sql: %s", querySQL) + } + + return results, nil +} + +func (s *Spec) GetAllViewsFromTable(tableName string) ([]string, error) { + log.Debugf("get all view from table %s", tableName) + + var results []string + // first, query information_schema.tables with table_schema and table_type, get all views' name + querySql := fmt.Sprintf("SELECT table_name FROM information_schema.tables WHERE table_schema = '%s' AND table_type = 'VIEW'", s.Database) + viewsFromQuery, err := s.queryResult(querySql, "table_name", "QUERY VIEWS") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "query views from information schema failed") + } + + // then query view's create sql, if create sql contains tableName, this view is wanted + viewRegex := regexp.MustCompile("(`internal`\\.`\\w+`|`default_cluster:\\w+`)\\.`" + strings.TrimSpace(tableName) + "`") + for _, eachViewName := range viewsFromQuery { + showCreateViewSql := fmt.Sprintf("SHOW CREATE VIEW %s", eachViewName) + createViewSqlList, err := s.queryResult(showCreateViewSql, "Create View", "SHOW CREATE VIEW") + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "show create view failed") + } + + // a view has only one create sql, so use createViewSqlList[0] as the only sql + if len(createViewSqlList) > 0 { + found := viewRegex.MatchString(createViewSqlList[0]) + if found { + results = append(results, eachViewName) + } + } + } + + log.Debugf("get view result is %s", results) + return results, nil +} + +func (s *Spec) RenameTable(destTableName string, renameTable *record.RenameTable) error { + destTableName = utils.FormatKeywordName(destTableName) + // rename table may be 'rename table', 'rename rollup', 'rename partition' + var sql string + // ALTER TABLE table1 RENAME table2; + if renameTable.NewTableName != "" && renameTable.OldTableName != "" { + oldName := utils.FormatKeywordName(renameTable.OldTableName) + newName := utils.FormatKeywordName(renameTable.NewTableName) + sql = fmt.Sprintf("ALTER TABLE %s RENAME %s", oldName, newName) + } + + // ALTER TABLE example_table RENAME ROLLUP rollup1 rollup2; + // if rename rollup, table name is unchanged + if renameTable.NewRollupName != "" && renameTable.OldRollupName != "" { + oldName := utils.FormatKeywordName(renameTable.OldRollupName) + newName := utils.FormatKeywordName(renameTable.NewRollupName) + sql = fmt.Sprintf("ALTER TABLE %s RENAME ROLLUP %s %s", destTableName, oldName, newName) + } + + // ALTER TABLE example_table RENAME PARTITION p1 p2; + // if rename partition, table name is unchanged + if renameTable.NewPartitionName != "" && renameTable.OldPartitionName != "" { + oldName := utils.FormatKeywordName(renameTable.OldPartitionName) + newName := utils.FormatKeywordName(renameTable.NewPartitionName) + sql = fmt.Sprintf("ALTER TABLE %s RENAME PARTITION %s %s", destTableName, oldName, newName) + } + if sql == "" { + return xerror.Errorf(xerror.Normal, "rename sql is empty") + } + + log.Infof("rename table sql: %s", sql) + return s.DbExec(sql) +} + +func (s *Spec) RenameTableWithName(oldName, newName string) error { + oldName = utils.FormatKeywordName(oldName) + newName = utils.FormatKeywordName(newName) + sql := fmt.Sprintf("ALTER TABLE %s RENAME %s", oldName, newName) + log.Infof("rename table sql: %s", sql) + return s.DbExec(sql) +} + +func (s *Spec) dropTable(table string, force bool) error { log.Infof("drop table %s.%s", s.Database, table) db, err := s.Connect() @@ -300,7 +553,11 @@ func (s *Spec) dropTable(table string) error { return err } - sql := fmt.Sprintf("DROP TABLE %s.%s", s.Database, table) + suffix := "" + if force { + suffix = "FORCE" + } + sql := fmt.Sprintf("DROP TABLE %s.%s %s", utils.FormatKeywordName(s.Database), utils.FormatKeywordName(table), suffix) _, err = db.Exec(sql) if err != nil { return xerror.Wrapf(err, xerror.Normal, "drop table %s.%s failed, sql: %s", s.Database, table, sql) @@ -316,13 +573,13 @@ func (s *Spec) ClearDB() error { return err } - sql := fmt.Sprintf("DROP DATABASE %s", s.Database) + sql := fmt.Sprintf("DROP DATABASE %s", utils.FormatKeywordName(s.Database)) _, err = db.Exec(sql) if err != nil { return xerror.Wrapf(err, xerror.Normal, "drop database %s failed", s.Database) } - if _, err = db.Exec("CREATE DATABASE " + s.Database); err != nil { + if _, err = db.Exec("CREATE DATABASE " + utils.FormatKeywordName(s.Database)); err != nil { return xerror.Wrapf(err, xerror.Normal, "create database %s failed", s.Database) } return nil @@ -336,26 +593,46 @@ func (s *Spec) CreateDatabase() error { return nil } - if _, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + s.Database); err != nil { + if _, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + utils.FormatKeywordName(s.Database)); err != nil { return xerror.Wrapf(err, xerror.Normal, "create database %s failed", s.Database) } return nil } -func (s *Spec) CreateTable(stmt string) error { - db, err := s.Connect() - if err != nil { - return nil +func (s *Spec) CreateTableOrView(createTable *record.CreateTable, srcDatabase string) error { + // Creating table will only occur when sync db. + // When create view, the db name of sql is source db name, we should use dest db name to create view + createSql := createTable.Sql + if createTable.IsCreateView() { + log.Debugf("create view, use dest db name to replace source db name") + + // replace `internal`.`source_db_name`. or `default_cluster:source_db_name`. to `internal`.`dest_db_name`. + originalNameNewStyle := "`internal`.`" + strings.TrimSpace(srcDatabase) + "`." + originalNameOldStyle := "`default_cluster:" + strings.TrimSpace(srcDatabase) + "`." // for Doris 2.0.x + replaceName := "`internal`.`" + strings.TrimSpace(s.Database) + "`." + createSql = strings.ReplaceAll( + strings.ReplaceAll(createSql, originalNameNewStyle, replaceName), originalNameOldStyle, replaceName) + log.Debugf("original create view sql is %s, after replace, now sql is %s", createTable.Sql, createSql) } - if _, err = db.Exec(stmt); err != nil { - return xerror.Wrapf(err, xerror.Normal, "create table %s.%s failed", s.Database, s.Table) + // Compatible with doris 2.1.x, see apache/doris#44834 for details. + for strings.Contains(createSql, "MAXVALUEMAXVALUE") { + createSql = strings.Replace(createSql, "MAXVALUEMAXVALUE", "MAXVALUE, MAXVALUE", -1) } - return nil + + log.Infof("create table or view sql: %s", createSql) + + list := []string{} + if strings.Contains(createSql, "agg_state<") { + log.Infof("agg_state is exists in the create table sql, set enable_agg_state=true") + list = append(list, "SET enable_agg_state=true") + } + list = append(list, createSql) + return s.DbExec(list...) } func (s *Spec) CheckDatabaseExists() (bool, error) { - log.Debug("check database exist by spec", zap.String("spec", s.String())) + log.Debugf("check database exist by spec: %s", s.String()) db, err := s.Connect() if err != nil { return false, err @@ -389,14 +666,19 @@ func (s *Spec) CheckDatabaseExists() (bool, error) { // check table exits in database dir by spec func (s *Spec) CheckTableExists() (bool, error) { - log.Debug("check table exists by spec", zap.String("spec", s.String())) + log.Debugf("check table exist by spec: %s", s.String()) + return s.CheckTableExistsByName(s.Table) +} + +// check table exists in database dir by the specified table name. +func (s *Spec) CheckTableExistsByName(tableName string) (bool, error) { db, err := s.Connect() if err != nil { return false, err } - sql := fmt.Sprintf("SHOW TABLES FROM %s LIKE '%s'", s.Database, s.Table) + sql := fmt.Sprintf("SHOW TABLES FROM %s LIKE '%s'", utils.FormatKeywordName(s.Database), tableName) rows, err := db.Query(sql) if err != nil { return false, xerror.Wrapf(err, xerror.Normal, "show tables failed, sql: %s", sql) @@ -421,8 +703,35 @@ func (s *Spec) CheckTableExists() (bool, error) { return table != "", nil } +func (s *Spec) CancelRestoreIfExists(snapshotName string) error { + log.Debugf("cancel restore %s, db name: %s", snapshotName, s.Database) + + db, err := s.Connect() + if err != nil { + return err + } + + info, err := s.queryRestoreInfo(db, snapshotName) + if err != nil { + return err + } + + if info == nil || info.State == RestoreStateCancelled || info.State == RestoreStateFinished { + return nil + } + + sql := fmt.Sprintf("CANCEL RESTORE FROM %s", utils.FormatKeywordName(s.Database)) + log.Infof("cancel restore %s, sql: %s", snapshotName, sql) + _, err = db.Exec(sql) + if err != nil { + return xerror.Wrapf(err, xerror.Normal, "cancel restore failed, sql: %s", sql) + } + return nil +} + +// Create a full snapshot of the specified tables, if tables is empty, backup the entire database. // mysql> BACKUP SNAPSHOT ccr.snapshot_20230605 TO `__keep_on_local__` ON ( src_1 ) PROPERTIES ("type" = "full"); -func (s *Spec) CreateSnapshotAndWaitForDone(tables []string) (string, error) { +func (s *Spec) CreateSnapshot(snapshotName string, tables []string) error { if tables == nil { tables = make([]string, 0) } @@ -430,152 +739,346 @@ func (s *Spec) CreateSnapshotAndWaitForDone(tables []string) (string, error) { tables = append(tables, s.Table) } - var snapshotName string var tableRefs string if len(tables) == 1 { - // snapshot name format "ccrs_${table}_${timestamp}" // table refs = table - snapshotName = fmt.Sprintf("ccrs_%s_%s_%d", s.Database, s.Table, time.Now().Unix()) - tableRefs = tables[0] + tableRefs = utils.FormatKeywordName(tables[0]) } else { - // snapshot name format "ccrs_${db}_${timestamp}" // table refs = tables.join(", ") - snapshotName = fmt.Sprintf("ccrs_%s_%d", s.Database, time.Now().Unix()) - tableRefs = strings.Join(tables, ", ") + tableRefs = "`" + strings.Join(tables, "`,`") + "`" } - log.Infof("create snapshot %s.%s", s.Database, snapshotName) + // means source is a empty db, table number is 0, so backup the entire database + if tableRefs == "``" { + tableRefs = "" + } else { + tableRefs = fmt.Sprintf("ON ( %s )", tableRefs) + } db, err := s.Connect() if err != nil { - return "", err + return err } - backupSnapshotSql := fmt.Sprintf("BACKUP SNAPSHOT %s.%s TO `__keep_on_local__` ON ( %s ) PROPERTIES (\"type\" = \"full\")", s.Database, snapshotName, tableRefs) - log.Debugf("backup snapshot sql: %s", backupSnapshotSql) + backupSnapshotSql := fmt.Sprintf("BACKUP SNAPSHOT %s.%s TO `__keep_on_local__` %s PROPERTIES (\"type\" = \"full\")", + utils.FormatKeywordName(s.Database), utils.FormatKeywordName(snapshotName), tableRefs) + log.Infof("create snapshot %s.%s, backup snapshot sql: %s", s.Database, snapshotName, backupSnapshotSql) _, err = db.Exec(backupSnapshotSql) if err != nil { - return "", xerror.Wrapf(err, xerror.Normal, "backup snapshot %s failed, sql: %s", snapshotName, backupSnapshotSql) + return xerror.Wrapf(err, xerror.Normal, "backup snapshot %s failed, sql: %s", snapshotName, backupSnapshotSql) } - backupFinished, err := s.CheckBackupFinished(snapshotName) + return nil +} + +// mysql> BACKUP SNAPSHOT ccr.snapshot_20230605 TO `__keep_on_local__` ON (src_1 PARTITION (`p1`)) PROPERTIES ("type" = "full"); +func (s *Spec) CreatePartialSnapshot(snapshotName, table string, partitions []string) error { + if len(table) == 0 { + return xerror.Errorf(xerror.Normal, "source db is empty! you should have at least one table") + } + + // table refs = table + tableRef := utils.FormatKeywordName(table) + + log.Infof("create partial snapshot %s.%s", s.Database, snapshotName) + + db, err := s.Connect() if err != nil { - return "", err + return err } - if !backupFinished { - err = xerror.Errorf(xerror.Normal, "check backup state timeout, max try times: %d, sql: %s", MAX_CHECK_RETRY_TIMES, backupSnapshotSql) - return "", err + + partitionRefs := "" + if len(partitions) > 0 { + partitionRefs = " PARTITION (`" + strings.Join(partitions, "`,`") + "`)" + } + backupSnapshotSql := fmt.Sprintf( + "BACKUP SNAPSHOT %s.%s TO `__keep_on_local__` ON (%s%s) PROPERTIES (\"type\" = \"full\")", + utils.FormatKeywordName(s.Database), snapshotName, tableRef, partitionRefs) + log.Debugf("backup partial snapshot sql: %s", backupSnapshotSql) + _, err = db.Exec(backupSnapshotSql) + if err != nil { + if strings.Contains(err.Error(), "Unknown table") { + return ErrBackupTableNotFound + } else if strings.Contains(err.Error(), "Unknown partition") { + return ErrBackupPartitionNotFound + } else { + return xerror.Wrapf(err, xerror.Normal, "backup partial snapshot %s failed, sql: %s", snapshotName, backupSnapshotSql) + } } - return snapshotName, nil + return nil } // TODO: Add TaskErrMsg -func (s *Spec) checkBackupFinished(snapshotName string) (BackupState, error) { +func (s *Spec) checkBackupFinished(snapshotName string) (BackupState, string, error) { log.Debugf("check backup state of snapshot %s", snapshotName) db, err := s.Connect() if err != nil { - return BackupStateUnknown, err + return BackupStateUnknown, "", err } - sql := fmt.Sprintf("SHOW BACKUP FROM %s WHERE SnapshotName = \"%s\"", s.Database, snapshotName) + sql := fmt.Sprintf("SHOW BACKUP FROM %s WHERE SnapshotName = \"%s\"", utils.FormatKeywordName(s.Database), snapshotName) log.Debugf("check backup state sql: %s", sql) rows, err := db.Query(sql) if err != nil { - return BackupStateUnknown, xerror.Wrapf(err, xerror.Normal, "show backup failed, sql: %s", sql) + return BackupStateUnknown, "", xerror.Wrapf(err, xerror.Normal, "show backup failed, sql: %s", sql) } defer rows.Close() - var backupStateStr string if rows.Next() { rowParser := utils.NewRowParser() if err := rowParser.Parse(rows); err != nil { - return BackupStateUnknown, xerror.Wrap(err, xerror.Normal, sql) + return BackupStateUnknown, "", xerror.Wrap(err, xerror.Normal, sql) } - backupStateStr, err = rowParser.GetString("State") + + info, err := parseBackupInfo(rowParser) if err != nil { - return BackupStateUnknown, xerror.Wrap(err, xerror.Normal, sql) + return BackupStateUnknown, "", xerror.Wrap(err, xerror.Normal, sql) } - log.Infof("check snapshot %s backup state: [%v]", snapshotName, backupStateStr) - return ParseBackupState(backupStateStr), nil + log.Infof("check snapshot %s backup state: [%v]", snapshotName, info.StateStr) + return info.State, info.Status, nil } - return BackupStateUnknown, xerror.Errorf(xerror.Normal, "no backup state found, sql: %s", sql) + + if err := rows.Err(); err != nil { + return BackupStateUnknown, "", xerror.Wrapf(err, xerror.Normal, "check snapshot backup state, sql: %s", sql) + } + + return BackupStateUnknown, "", xerror.Errorf(xerror.Normal, "no backup state found, sql: %s", sql) } func (s *Spec) CheckBackupFinished(snapshotName string) (bool, error) { - log.Debug("check backup state", zap.String("database", s.Database)) + log.Debugf("check backup state, spec: %s, snapshot: %s", s.String(), snapshotName) - for i := 0; i < MAX_CHECK_RETRY_TIMES; i++ { - if backupState, err := s.checkBackupFinished(snapshotName); err != nil { - return false, err - } else if backupState == BackupStateFinished { - return true, nil - } else if backupState == BackupStateCancelled { - return false, xerror.Errorf(xerror.Normal, "backup failed or canceled") - } else { - // BackupStatePending, BackupStateUnknown - time.Sleep(BACKUP_CHECK_DURATION) + // Retry network related error to avoid full sync when the target network is interrupted, process is restarted. + if backupState, status, err := s.checkBackupFinished(snapshotName); err != nil && !isNetworkRelated(err) { + return false, err + } else if err == nil && backupState == BackupStateFinished { + return true, nil + } else if err == nil && backupState == BackupStateCancelled { + return false, xerror.Errorf(xerror.Normal, "backup failed or canceled, backup status: %s", status) + } else { + // BackupStatePending, BackupStateUnknown or network related errors. + if err != nil { + log.Warnf("check backup state is failed, spec: %s, snapshot: %s, err: %v", s.String(), snapshotName, err) } + return false, nil + } +} + +// Get the valid (running or finished) backup job with a unique prefix to indicate +// if a backup job needs to be issued again. +func (s *Spec) GetValidBackupJob(snapshotNamePrefix string) (string, error) { + log.Debugf("get valid backup job if exists, database: %s, label prefix: %s", s.Database, snapshotNamePrefix) + + db, err := s.Connect() + if err != nil { + return "", err } - return false, xerror.Errorf(xerror.Normal, "check backup state timeout, max try times: %d", MAX_CHECK_RETRY_TIMES) + query := fmt.Sprintf("SHOW BACKUP FROM %s WHERE SnapshotName LIKE \"%s%%\"", + utils.FormatKeywordName(s.Database), snapshotNamePrefix) + log.Infof("show backup state sql: %s", query) + rows, err := db.Query(query) + if err != nil { + return "", xerror.Wrap(err, xerror.Normal, "query backup state failed") + } + defer rows.Close() + + labels := make([]string, 0) + for rows.Next() { + rowParser := utils.NewRowParser() + if err := rowParser.Parse(rows); err != nil { + return "", xerror.Wrap(err, xerror.Normal, "scan backup state failed") + } + + info, err := parseBackupInfo(rowParser) + if err != nil { + return "", xerror.Wrap(err, xerror.Normal, "scan backup state failed") + } + + log.Infof("check snapshot %s backup state [%v], create time: %s", + info.SnapshotName, info.StateStr, info.CreateTime) + + if info.State == BackupStateCancelled { + continue + } + + labels = append(labels, info.SnapshotName) + } + + if err := rows.Err(); err != nil { + return "", xerror.Wrapf(err, xerror.Normal, "get valid backup job, sql: %s", query) + } + + // Return the last one. Assume that the result of `SHOW BACKUP` is ordered by CreateTime in ascending order. + if len(labels) != 0 { + return labels[len(labels)-1], nil + } + + return "", nil } -// TODO: Add TaskErrMsg -func (s *Spec) checkRestoreFinished(snapshotName string) (RestoreState, error) { - log.Debugf("check restore state %s", snapshotName) +// Get the valid (running or finished) restore job with a unique prefix to indicate +// if a restore job needs to be issued again. +func (s *Spec) GetValidRestoreJob(snapshotNamePrefix string) (string, error) { + log.Debugf("get valid restore job if exists, label prefix: %s", snapshotNamePrefix) db, err := s.Connect() if err != nil { - return RestoreStateUnknown, err + return "", err + } + + query := fmt.Sprintf("SHOW RESTORE FROM %s WHERE Label LIKE \"%s%%\"", + utils.FormatKeywordName(s.Database), snapshotNamePrefix) + log.Infof("show restore state sql: %s", query) + rows, err := db.Query(query) + if err != nil { + return "", xerror.Wrap(err, xerror.Normal, "query restore state failed") + } + defer rows.Close() + + labels := make([]string, 0) + for rows.Next() { + rowParser := utils.NewRowParser() + if err := rowParser.Parse(rows); err != nil { + return "", xerror.Wrap(err, xerror.Normal, "scan restore state failed") + } + + info, err := parseRestoreInfo(rowParser) + if err != nil { + return "", xerror.Wrap(err, xerror.Normal, "scan restore state failed") + } + + log.Infof("check snapshot %s restore state: [%v], create time: %s", + info.Label, info.StateStr, info.CreateTime) + + if info.State == RestoreStateCancelled { + continue + } + + labels = append(labels, info.Label) } - query := fmt.Sprintf("SHOW RESTORE FROM %s WHERE Label = \"%s\"", s.Database, snapshotName) + if err := rows.Err(); err != nil { + return "", xerror.Wrapf(err, xerror.Normal, "get valid restore job, sql: %s", query) + } - log.Debugf("check restore state sql: %s", query) + // Return the last one. Assume that the result of `SHOW BACKUP` is ordered by CreateTime in ascending order. + if len(labels) != 0 { + return labels[len(labels)-1], nil + } + + return "", nil +} + +// query restore info, return nil if not found +func (s *Spec) queryRestoreInfo(db *sql.DB, snapshotName string) (*RestoreInfo, error) { + query := fmt.Sprintf("SHOW RESTORE FROM %s WHERE Label = \"%s\"", + utils.FormatKeywordName(s.Database), snapshotName) + + log.Debugf("query restore info sql: %s", query) rows, err := db.Query(query) if err != nil { - return RestoreStateUnknown, xerror.Wrap(err, xerror.Normal, "query restore state failed") + return nil, xerror.Wrap(err, xerror.Normal, "query restore state failed") } defer rows.Close() - var restoreStateStr string if rows.Next() { rowParser := utils.NewRowParser() if err := rowParser.Parse(rows); err != nil { - return RestoreStateUnknown, xerror.Wrap(err, xerror.Normal, "scan restore state failed") + return nil, xerror.Wrap(err, xerror.Normal, "scan restore state failed") } - restoreStateStr, err = rowParser.GetString("State") + + info, err := parseRestoreInfo(rowParser) if err != nil { - return RestoreStateUnknown, xerror.Wrap(err, xerror.Normal, "scan restore state failed") + return nil, xerror.Wrap(err, xerror.Normal, "scan restore state failed") } - log.Infof("check snapshot %s restore state: [%v]", snapshotName, restoreStateStr) + log.Infof("query snapshot %s restore state: [%v], restore status: %s", + snapshotName, info.StateStr, info.Status) - return _parseRestoreState(restoreStateStr), nil + return info, nil } - return RestoreStateUnknown, xerror.Errorf(xerror.Normal, "no restore state found") + + if err := rows.Err(); err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "query restore info, sql: %s", query) + } + + return nil, nil +} + +func (s *Spec) checkRestoreFinished(snapshotName string) (RestoreState, string, error) { + log.Debugf("check restore state %s", snapshotName) + + db, err := s.Connect() + if err != nil { + return RestoreStateUnknown, "", err + } + + info, err := s.queryRestoreInfo(db, snapshotName) + if err != nil { + return RestoreStateUnknown, "", err + } + + if info == nil { + return RestoreStateUnknown, "", xerror.Errorf(xerror.Normal, "no restore state found") + } + + return info.State, info.Status, nil } func (s *Spec) CheckRestoreFinished(snapshotName string) (bool, error) { - log.Debug("check restore is finished", zap.String("spec", s.String()), zap.String("snapshot", snapshotName)) + log.Debugf("check restore state is finished, spec: %s, snapshot: %s", s.String(), snapshotName) + + // Retry network related error to avoid full sync when the target network is interrupted, process is restarted. + if restoreState, status, err := s.checkRestoreFinished(snapshotName); err != nil && !isNetworkRelated(err) { + return false, err + } else if err == nil && restoreState == RestoreStateFinished { + return true, nil + } else if err == nil && restoreState == RestoreStateCancelled && strings.Contains(status, SIGNATURE_NOT_MATCHED) { + return false, xerror.XWrapf(ErrRestoreSignatureNotMatched, "restore failed, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status) + } else if err == nil && restoreState == RestoreStateCancelled { + return false, xerror.Errorf(xerror.Normal, "restore failed or canceled, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status) + } else { + // RestoreStatePending, RestoreStateUnknown or network error. + if err != nil { + log.Warnf("check restore state is failed, spec: %s, snapshot: %s, err: %v", s.String(), snapshotName, err) + } + return false, nil + } +} + +func (s *Spec) GetRestoreSignatureNotMatchedTableOrView(snapshotName string) (string, bool, error) { + log.Debugf("get restore signature not matched table, spec: %s, snapshot: %s", s.String(), snapshotName) for i := 0; i < MAX_CHECK_RETRY_TIMES; i++ { - if backupState, err := s.checkRestoreFinished(snapshotName); err != nil { - return false, err - } else if backupState == RestoreStateFinished { - return true, nil - } else if backupState == RestoreStateCancelled { - return false, xerror.Errorf(xerror.Normal, "backup failed or canceled, spec: %s, snapshot: %s", s.String(), snapshotName) + if restoreState, status, err := s.checkRestoreFinished(snapshotName); err != nil { + return "", false, err + } else if restoreState == RestoreStateFinished { + return "", false, nil + } else if restoreState == RestoreStateCancelled && strings.Contains(status, SIGNATURE_NOT_MATCHED) { + pattern := regexp.MustCompile("(?PTable|View) (?P.*) already exist but with different schema") + matches := pattern.FindStringSubmatch(status) + index := pattern.SubexpIndex("tableName") + if len(matches) == 0 || index == -1 || len(matches[index]) == 0 { + return "", false, xerror.Errorf(xerror.Normal, "match table name from restore status failed, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status) + } + + resource := matches[pattern.SubexpIndex("tableOrView")] + tableOrView := resource == "Table" + return matches[index], tableOrView, nil + } else if restoreState == RestoreStateCancelled { + return "", false, xerror.Errorf(xerror.Normal, "restore failed or canceled, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status) } else { // RestoreStatePending, RestoreStateUnknown time.Sleep(RESTORE_CHECK_DURATION) } } - return false, xerror.Errorf(xerror.Normal, "check restore state timeout, max try times: %d, spec: %s, snapshot: %s", MAX_CHECK_RETRY_TIMES, s.String(), snapshotName) + log.Warnf("get restore signature not matched timeout, max try times: %d, spec: %s, snapshot: %s", MAX_CHECK_RETRY_TIMES, s, snapshotName) + return "", false, nil } func (s *Spec) waitTransactionDone(txnId int64) error { @@ -589,7 +1092,7 @@ func (s *Spec) waitTransactionDone(txnId int64) error { // WHERE // [id=transaction_id] // [label = label_name]; - query := fmt.Sprintf("SHOW TRANSACTION FROM %s WHERE id = %d", s.Database, txnId) + query := fmt.Sprintf("SHOW TRANSACTION FROM %s WHERE id = %d", utils.FormatKeywordName(s.Database), txnId) log.Debugf("wait transaction done sql: %s", query) rows, err := db.Query(query) @@ -617,6 +1120,11 @@ func (s *Spec) waitTransactionDone(txnId int64) error { return xerror.Errorf(xerror.Normal, "transaction %d status: %s", txnId, transactionStatus) } } + + if err := rows.Err(); err != nil { + return xerror.Wrapf(err, xerror.Normal, "get transaction status failed, sql: %s", query) + } + return xerror.Errorf(xerror.Normal, "no transaction status found") } @@ -646,15 +1154,17 @@ func (s *Spec) Exec(sql string) error { } // Db Exec sql -func (s *Spec) DbExec(sql string) error { +func (s *Spec) DbExec(sqls ...string) error { db, err := s.ConnectDB() if err != nil { return err } - _, err = db.Exec(sql) - if err != nil { - return xerror.Wrapf(err, xerror.Normal, "exec sql %s failed", sql) + for _, sql := range sqls { + _, err = db.Exec(sql) + if err != nil { + return xerror.Wrapf(err, xerror.Normal, "exec sql %s failed", sql) + } } return nil } @@ -701,3 +1211,294 @@ func (s *Spec) Update(event SpecEvent) { break } } + +func (s *Spec) LightningSchemaChange(srcDatabase, tableAlias string, lightningSchemaChange *record.ModifyTableAddOrDropColumns) error { + log.Debugf("lightningSchemaChange %v", lightningSchemaChange) + + rawSql := lightningSchemaChange.RawSql + + // 1. remove database prefix + // "rawSql": "ALTER TABLE `default_cluster:ccr`.`test_ddl` ADD COLUMN `nid1` int(11) NULL COMMENT \"\"" + // replace `default_cluster:${Src.Database}`.`test_ddl` to `test_ddl` + var sql string + if strings.Contains(rawSql, fmt.Sprintf("`default_cluster:%s`.", srcDatabase)) { + sql = strings.Replace(rawSql, fmt.Sprintf("`default_cluster:%s`.", srcDatabase), "", 1) + } else { + sql = strings.Replace(rawSql, fmt.Sprintf("`%s`.", srcDatabase), "", 1) + } + + // 2. handle alias + if tableAlias != "" { + re := regexp.MustCompile("ALTER TABLE `[^`]*`") + sql = re.ReplaceAllString(sql, fmt.Sprintf("ALTER TABLE `%s`", tableAlias)) + } + + // 3. compatible REPLACE_IF_NOT_NULL NULL DEFAULT "null" + // See https://github.com/apache/doris/pull/41205 for details + sql = strings.Replace(sql, "REPLACE_IF_NOT_NULL NULL DEFAULT \"null\"", + "REPLACE_IF_NOT_NULL NULL DEFAULT NULL", 1) + + log.Infof("lighting schema change sql, rawSql: %s, sql: %s", rawSql, sql) + return s.DbExec(sql) +} + +func (s *Spec) RenameColumn(destTableName string, renameColumn *record.RenameColumn) error { + renameSql := fmt.Sprintf("ALTER TABLE `%s` RENAME COLUMN `%s` `%s`", + destTableName, renameColumn.ColName, renameColumn.NewColName) + log.Infof("rename column sql: %s", renameSql) + return s.DbExec(renameSql) +} + +func (s *Spec) ModifyComment(destTableName string, modifyComment *record.ModifyComment) error { + var modifySql string + if modifyComment.Type == "COLUMN" { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("ALTER TABLE `%s` ", destTableName)) + first := true + for col, comment := range modifyComment.ColToComment { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("MODIFY COLUMN `%s` COMMENT '%s'", col, utils.EscapeStringValue(comment))) + first = false + } + modifySql = sb.String() + } else if modifyComment.Type == "TABLE" { + modifySql = fmt.Sprintf("ALTER TABLE `%s` MODIFY COMMENT '%s'", destTableName, utils.EscapeStringValue(modifyComment.TblComment)) + } else { + return xerror.Errorf(xerror.Normal, "unsupported modify comment type: %s", modifyComment.Type) + } + + log.Infof("modify comment sql: %s", modifySql) + return s.DbExec(modifySql) +} + +func (s *Spec) TruncateTable(destTableName string, truncateTable *record.TruncateTable) error { + var sql string + if truncateTable.RawSql == "" { + sql = fmt.Sprintf("TRUNCATE TABLE %s", utils.FormatKeywordName(destTableName)) + } else { + sql = fmt.Sprintf("TRUNCATE TABLE %s %s", utils.FormatKeywordName(destTableName), truncateTable.RawSql) + } + + log.Infof("truncate table sql: %s", sql) + + return s.DbExec(sql) +} + +func (s *Spec) ReplaceTable(fromName, toName string, swap bool) error { + sql := fmt.Sprintf("ALTER TABLE %s REPLACE WITH TABLE %s PROPERTIES(\"swap\"=\"%t\")", + utils.FormatKeywordName(toName), utils.FormatKeywordName(fromName), swap) + + log.Infof("replace table sql: %s", sql) + + return s.DbExec(sql) +} + +func (s *Spec) DropTable(tableName string, force bool) error { + sqlSuffix := "" + if force { + sqlSuffix = "FORCE" + } + dropSql := fmt.Sprintf("DROP TABLE %s %s", utils.FormatKeywordName(tableName), sqlSuffix) + log.Infof("drop table sql: %s", dropSql) + return s.DbExec(dropSql) +} + +func (s *Spec) DropView(viewName string) error { + dropView := fmt.Sprintf("DROP VIEW IF EXISTS %s ", utils.FormatKeywordName(viewName)) + log.Infof("drop view sql: %s", dropView) + return s.DbExec(dropView) +} + +func (s *Spec) AlterViewDef(srcDatabase, viewName string, alterView *record.AlterView) error { + // 1. remove database prefix + // CREATE VIEW `view_test_1159493057` AS + // SELECT + // `internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`.`user_id` AS `k1`, + // `internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`.`name` AS `name`, + // MAX(`internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`.`age`) AS `v1` + // FROM `internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057` + var def string + prefix := fmt.Sprintf("`internal`.`%s`.", srcDatabase) + if strings.Contains(alterView.InlineViewDef, prefix) { + def = strings.ReplaceAll(alterView.InlineViewDef, prefix, "") + } else { + prefix = fmt.Sprintf(" `%s`.", srcDatabase) + def = strings.ReplaceAll(alterView.InlineViewDef, prefix, " ") + } + + viewName = utils.FormatKeywordName(viewName) + alterViewSql := fmt.Sprintf("ALTER VIEW %s AS %s", viewName, def) + log.Infof("alter view sql: %s", alterViewSql) + return s.DbExec(alterViewSql) +} + +func (s *Spec) AddPartition(destTableName string, addPartition *record.AddPartition) error { + addPartitionSql := addPartition.GetSql(destTableName) + addPartitionSql = correctAddPartitionSql(addPartitionSql, addPartition) + log.Infof("add partition sql: %s, original sql: %s", addPartitionSql, addPartition.Sql) + return s.DbExec(addPartitionSql) +} + +func (s *Spec) DropPartition(destTableName string, dropPartition *record.DropPartition) error { + destTableName = utils.FormatKeywordName(destTableName) + dropPartitionSql := fmt.Sprintf("ALTER TABLE %s %s", destTableName, dropPartition.Sql) + log.Infof("drop partition sql: %s", dropPartitionSql) + return s.DbExec(dropPartitionSql) +} + +func (s *Spec) RenamePartition(destTableName, oldPartition, newPartition string) error { + destTableName = utils.FormatKeywordName(destTableName) + oldPartition = utils.FormatKeywordName(oldPartition) + newPartition = utils.FormatKeywordName(newPartition) + renamePartitionSql := fmt.Sprintf("ALTER TABLE %s RENAME PARTITION %s %s", + destTableName, oldPartition, newPartition) + log.Infof("rename partition sql: %s", renamePartitionSql) + return s.DbExec(renamePartitionSql) +} + +func (s *Spec) LightningIndexChange(alias string, record *record.ModifyTableAddOrDropInvertedIndices) error { + rawSql := record.GetRawSql() + if len(record.AlternativeIndexes) == 0 { + return xerror.Errorf(xerror.Normal, "lightning index change job is empty, should not be here") + } + + sql := fmt.Sprintf("ALTER TABLE %s", utils.FormatKeywordName(alias)) + if record.IsDropInvertedIndex { + dropIndexes := []string{} + for _, index := range record.AlternativeIndexes { + if !index.IsInvertedIndex() { + return xerror.Errorf(xerror.Normal, "lightning index change job is not inverted index, should not be here") + } + indexName := utils.FormatKeywordName(index.GetIndexName()) + dropIndexes = append(dropIndexes, fmt.Sprintf("DROP INDEX %s", indexName)) + } + sql = fmt.Sprintf("%s %s", sql, strings.Join(dropIndexes, ", ")) + } else { + addIndexes := []string{} + for _, index := range record.AlternativeIndexes { + if !index.IsInvertedIndex() { + return xerror.Errorf(xerror.Normal, "lightning index change job is not inverted index, should not be here") + } + columns := index.GetColumns() + columnsRef := fmt.Sprintf("(`%s`)", strings.Join(columns, "`,`")) + indexName := utils.FormatKeywordName(index.GetIndexName()) + addIndex := fmt.Sprintf("ADD INDEX %s %s USING INVERTED COMMENT '%s'", + indexName, columnsRef, index.GetComment()) + addIndexes = append(addIndexes, addIndex) + } + sql = fmt.Sprintf("%s %s", sql, strings.Join(addIndexes, ", ")) + } + + log.Infof("lighting index change sql, rawSql: %s, sql: %s", rawSql, sql) + return s.DbExec(sql) +} + +func (s *Spec) BuildIndex(tableAlias string, buildIndex *record.IndexChangeJob) error { + if buildIndex.IsDropOp { + return xerror.Errorf(xerror.Normal, "build index job is drop op, should not be here") + } + + if len(buildIndex.Indexes) != 1 { + return xerror.Errorf(xerror.Normal, "build index job has more than one index, should not be here") + } + + index := buildIndex.Indexes[0] + indexName := index.GetIndexName() + sql := fmt.Sprintf("BUILD INDEX %s ON %s", + utils.FormatKeywordName(indexName), utils.FormatKeywordName(tableAlias)) + + if buildIndex.PartitionName != "" { + sqlWithPart := fmt.Sprintf("%s PARTITION (%s)", sql, utils.FormatKeywordName(buildIndex.PartitionName)) + + log.Infof("build index sql: %s", sqlWithPart) + err := s.DbExec(sqlWithPart) + if err == nil { + return nil + } else if !strings.Contains(err.Error(), "is not partitioned, cannot build index with partitions") { + return err + } + + log.Infof("table %s is not partitioned, try to build index without partition", tableAlias) + } + + log.Infof("build index sql: %s", sql) + return s.DbExec(sql) +} + +func (s *Spec) RenameRollup(destTableName, oldRollup, newRollup string) error { + destTableName = utils.FormatKeywordName(destTableName) + oldRollup = utils.FormatKeywordName(oldRollup) + newRollup = utils.FormatKeywordName(newRollup) + renameRollupSql := fmt.Sprintf("ALTER TABLE %s RENAME ROLLUP %s %s", + destTableName, oldRollup, newRollup) + log.Infof("rename rollup sql: %s", renameRollupSql) + return s.DbExec(renameRollupSql) +} + +func (s *Spec) DropRollup(destTableName, rollup string) error { + destTableName = utils.FormatKeywordName(destTableName) + rollup = utils.FormatKeywordName(rollup) + dropRollupSql := fmt.Sprintf("ALTER TABLE %s DROP ROLLUP %s", destTableName, rollup) + log.Infof("drop rollup sql: %s", dropRollupSql) + return s.DbExec(dropRollupSql) +} + +func (s *Spec) DesyncTables(tables ...string) error { + var err error + + failedTables := []string{} + for _, table := range tables { + desyncSql := fmt.Sprintf("ALTER TABLE %s SET (\"is_being_synced\"=\"false\")", utils.FormatKeywordName(table)) + log.Debugf("db exec sql: %s", desyncSql) + if err = s.DbExec(desyncSql); err != nil { + failedTables = append(failedTables, table) + } + } + + if len(failedTables) > 0 { + return xerror.Wrapf(err, xerror.FE, "failed tables: %s", strings.Join(failedTables, ",")) + } + + return nil +} + +// Determine whether the error are network related, eg connection refused, connection reset, exposed from net packages. +func isNetworkRelated(err error) bool { + msg := err.Error() + + // The below errors are exposed from net packages. + // See https://github.com/golang/go/issues/23827 for details. + return strings.Contains(msg, "timeout awaiting response headers") || + strings.Contains(msg, "connection refused") || + strings.Contains(msg, "connection reset by peer") || + strings.Contains(msg, "connection timeouted") || + strings.Contains(msg, "i/o timeout") +} + +func correctAddPartitionSql(addPartitionSql string, addPartition *record.AddPartition) string { + // HACK: + // + // The doris version before 2.1.3 and 2.0.10 did not handle unpartitioned and temporary + // partitions correctly, see https://github.com/apache/doris/pull/35461 for details. + // + // 1. fix unpartitioned add partition sql + // 2. support add temporary partition + if strings.Contains(addPartitionSql, "VALUES [(), ())") { + re := regexp.MustCompile(`VALUES \[\(\), \(\)\) \([^\)]+\)`) + addPartitionSql = re.ReplaceAllString(addPartitionSql, "") + } + if strings.Contains(addPartitionSql, "VALUES IN (((") { + re := regexp.MustCompile(`VALUES IN \(\(\((.*)\)\)\)`) + matches := re.FindStringSubmatch(addPartitionSql) + if len(matches) > 1 { + replace := fmt.Sprintf("VALUES IN ((%s))", matches[1]) + addPartitionSql = re.ReplaceAllString(addPartitionSql, replace) + } + } + if addPartition.IsTemp && !strings.Contains(addPartitionSql, "ADD TEMPORARY PARTITION") { + addPartitionSql = strings.ReplaceAll(addPartitionSql, "ADD PARTITION", "ADD TEMPORARY PARTITION") + } + return addPartitionSql +} diff --git a/pkg/ccr/base/specer.go b/pkg/ccr/base/specer.go index fcfb55de..d90a2064 100644 --- a/pkg/ccr/base/specer.go +++ b/pkg/ccr/base/specer.go @@ -1,8 +1,7 @@ package base import ( - "database/sql" - + "github.com/selectdb/ccr_syncer/pkg/ccr/record" "github.com/selectdb/ccr_syncer/pkg/utils" ) @@ -13,24 +12,52 @@ const ( httpNotFoundEvent SpecEvent = 1 ) +// this interface is used to for spec operation, treat it as a mysql dao type Specer interface { Valid() error - Connect() (*sql.DB, error) - ConnectDB() (*sql.DB, error) IsDatabaseEnableBinlog() (bool, error) IsTableEnableBinlog() (bool, error) + IsEnableRestoreSnapshotCompression() (bool, error) GetAllTables() ([]string, error) + GetAllViewsFromTable(tableName string) ([]string, error) ClearDB() error CreateDatabase() error - CreateTable(stmt string) error + CreateTableOrView(createTable *record.CreateTable, srcDatabase string) error CheckDatabaseExists() (bool, error) CheckTableExists() (bool, error) - CreateSnapshotAndWaitForDone(tables []string) (string, error) + CheckTableExistsByName(tableName string) (bool, error) + GetValidBackupJob(snapshotNamePrefix string) (string, error) + GetValidRestoreJob(snapshotNamePrefix string) (string, error) + CancelRestoreIfExists(snapshotName string) error + CreatePartialSnapshot(snapshotName, table string, partitions []string) error + CreateSnapshot(snapshotName string, tables []string) error + CheckBackupFinished(snapshotName string) (bool, error) CheckRestoreFinished(snapshotName string) (bool, error) + GetRestoreSignatureNotMatchedTableOrView(snapshotName string) (string, bool, error) WaitTransactionDone(txnId int64) // busy wait - Exec(sql string) error - DbExec(sql string) error + LightningSchemaChange(srcDatabase string, tableAlias string, changes *record.ModifyTableAddOrDropColumns) error + RenameColumn(destTableName string, renameColumn *record.RenameColumn) error + RenameTable(destTableName string, renameTable *record.RenameTable) error + RenameTableWithName(destTableName, newName string) error + ModifyComment(destTableName string, modifyComment *record.ModifyComment) error + TruncateTable(destTableName string, truncateTable *record.TruncateTable) error + ReplaceTable(fromName, toName string, swap bool) error + DropTable(tableName string, force bool) error + DropView(viewName string) error + AlterViewDef(srcDatabase, viewName string, alterView *record.AlterView) error + + AddPartition(destTableName string, addPartition *record.AddPartition) error + DropPartition(destTableName string, dropPartition *record.DropPartition) error + RenamePartition(destTableName, oldPartition, newPartition string) error + + LightningIndexChange(tableAlias string, changes *record.ModifyTableAddOrDropInvertedIndices) error + BuildIndex(tableAlias string, buildIndex *record.IndexChangeJob) error + + RenameRollup(destTableName, oldRollup, newRollup string) error + DropRollup(destTableName, rollupName string) error + + DesyncTables(tables ...string) error utils.Subject[SpecEvent] } diff --git a/pkg/ccr/base/specer_factory.go b/pkg/ccr/base/specer_factory.go index 7b6bddbe..574d4f49 100644 --- a/pkg/ccr/base/specer_factory.go +++ b/pkg/ccr/base/specer_factory.go @@ -4,8 +4,7 @@ type SpecerFactory interface { NewSpecer(tableSpec *Spec) Specer } -type SpecFactory struct { -} +type SpecFactory struct{} func NewSpecerFactory() SpecerFactory { return &SpecFactory{} diff --git a/pkg/ccr/checker.go b/pkg/ccr/checker.go index 0ae1dbed..43320fbf 100644 --- a/pkg/ccr/checker.go +++ b/pkg/ccr/checker.go @@ -132,7 +132,7 @@ func (c *Checker) check() error { c.reset() for { - log.Debugf("checker state: %s", c.state.String()) + log.Tracef("checker state: %s", c.state) switch c.state { case checkerStateRefresh: c.handleRefresh() diff --git a/pkg/ccr/errors.go b/pkg/ccr/errors.go index 78d475dc..2dd5ea31 100644 --- a/pkg/ccr/errors.go +++ b/pkg/ccr/errors.go @@ -2,6 +2,4 @@ package ccr import "github.com/selectdb/ccr_syncer/pkg/xerror" -var ( - errBackendNotFound = xerror.XNew(xerror.Meta, "backend not found") -) +var errBackendNotFound = xerror.NewWithoutStack(xerror.Meta, "backend not found") diff --git a/pkg/ccr/factory.go b/pkg/ccr/factory.go index 5c2f7d4b..13800c16 100644 --- a/pkg/ccr/factory.go +++ b/pkg/ccr/factory.go @@ -6,15 +6,17 @@ import ( ) type Factory struct { - RpcFactory rpc.IRpcFactory - MetaFactory MetaerFactory - ISpecFactory base.SpecerFactory + rpc.IRpcFactory + MetaerFactory + base.SpecerFactory + ThriftMetaFactory } -func NewFactory(rpcFactory rpc.IRpcFactory, metaFactory MetaerFactory, ISpecFactory base.SpecerFactory) *Factory { +func NewFactory(rpcFactory rpc.IRpcFactory, metaFactory MetaerFactory, ISpecFactory base.SpecerFactory, thriftMetaFactory ThriftMetaFactory) *Factory { return &Factory{ - RpcFactory: rpcFactory, - MetaFactory: metaFactory, - ISpecFactory: ISpecFactory, + IRpcFactory: rpcFactory, + MetaerFactory: metaFactory, + SpecerFactory: ISpecFactory, + ThriftMetaFactory: thriftMetaFactory, } } diff --git a/pkg/ccr/fe_mock.go b/pkg/ccr/fe_mock.go index dc180c11..57ab1674 100644 --- a/pkg/ccr/fe_mock.go +++ b/pkg/ccr/fe_mock.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: rpc/fe.go +// Source: pkg/rpc/fe.go // // Generated by this command: // -// mockgen -source=rpc/fe.go -destination=ccr/fe_mock.go -package=ccr +// mockgen -source=pkg/rpc/fe.go -destination=pkg/ccr/fe_mock.go -package=ccr // // Package ccr is a generated GoMock package. package ccr @@ -13,6 +13,7 @@ import ( base "github.com/selectdb/ccr_syncer/pkg/ccr/base" frontendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice" + status "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" types "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" gomock "go.uber.org/mock/gomock" ) @@ -40,6 +41,20 @@ func (m *MockIFeRpc) EXPECT() *MockIFeRpcMockRecorder { return m.recorder } +// Address mocks base method. +func (m *MockIFeRpc) Address() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Address") + ret0, _ := ret[0].(string) + return ret0 +} + +// Address indicates an expected call of Address. +func (mr *MockIFeRpcMockRecorder) Address() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Address", reflect.TypeOf((*MockIFeRpc)(nil).Address)) +} + // BeginTransaction mocks base method. func (m *MockIFeRpc) BeginTransaction(arg0 *base.Spec, arg1 string, arg2 []int64) (*frontendservice.TBeginTxnResult_, error) { m.ctrl.T.Helper() @@ -70,6 +85,21 @@ func (mr *MockIFeRpcMockRecorder) CommitTransaction(arg0, arg1, arg2 any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitTransaction", reflect.TypeOf((*MockIFeRpc)(nil).CommitTransaction), arg0, arg1, arg2) } +// GetBackends mocks base method. +func (m *MockIFeRpc) GetBackends(spec *base.Spec) (*frontendservice.TGetBackendMetaResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBackends", spec) + ret0, _ := ret[0].(*frontendservice.TGetBackendMetaResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBackends indicates an expected call of GetBackends. +func (mr *MockIFeRpcMockRecorder) GetBackends(spec any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackends", reflect.TypeOf((*MockIFeRpc)(nil).GetBackends), spec) +} + // GetBinlog mocks base method. func (m *MockIFeRpc) GetBinlog(arg0 *base.Spec, arg1 int64) (*frontendservice.TGetBinlogResult_, error) { m.ctrl.T.Helper() @@ -100,11 +130,26 @@ func (mr *MockIFeRpcMockRecorder) GetBinlogLag(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBinlogLag", reflect.TypeOf((*MockIFeRpc)(nil).GetBinlogLag), arg0, arg1) } +// GetDbMeta mocks base method. +func (m *MockIFeRpc) GetDbMeta(spec *base.Spec) (*frontendservice.TGetMetaResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDbMeta", spec) + ret0, _ := ret[0].(*frontendservice.TGetMetaResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDbMeta indicates an expected call of GetDbMeta. +func (mr *MockIFeRpcMockRecorder) GetDbMeta(spec any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDbMeta", reflect.TypeOf((*MockIFeRpc)(nil).GetDbMeta), spec) +} + // GetMasterToken mocks base method. -func (m *MockIFeRpc) GetMasterToken(arg0 *base.Spec) (string, error) { +func (m *MockIFeRpc) GetMasterToken(arg0 *base.Spec) (*frontendservice.TGetMasterTokenResult_, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetMasterToken", arg0) - ret0, _ := ret[0].(string) + ret0, _ := ret[0].(*frontendservice.TGetMasterTokenResult_) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -130,6 +175,21 @@ func (mr *MockIFeRpcMockRecorder) GetSnapshot(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshot", reflect.TypeOf((*MockIFeRpc)(nil).GetSnapshot), arg0, arg1) } +// GetTableMeta mocks base method. +func (m *MockIFeRpc) GetTableMeta(spec *base.Spec, tableIds []int64) (*frontendservice.TGetMetaResult_, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTableMeta", spec, tableIds) + ret0, _ := ret[0].(*frontendservice.TGetMetaResult_) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTableMeta indicates an expected call of GetTableMeta. +func (mr *MockIFeRpcMockRecorder) GetTableMeta(spec, tableIds any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTableMeta", reflect.TypeOf((*MockIFeRpc)(nil).GetTableMeta), spec, tableIds) +} + // RestoreSnapshot mocks base method. func (m *MockIFeRpc) RestoreSnapshot(arg0 *base.Spec, arg1 []*frontendservice.TTableRef, arg2 string, arg3 *frontendservice.TGetSnapshotResult_) (*frontendservice.TRestoreSnapshotResult_, error) { m.ctrl.T.Helper() @@ -160,6 +220,71 @@ func (mr *MockIFeRpcMockRecorder) RollbackTransaction(spec, txnId any) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackTransaction", reflect.TypeOf((*MockIFeRpc)(nil).RollbackTransaction), spec, txnId) } +// MockresultType is a mock of resultType interface. +type MockresultType struct { + ctrl *gomock.Controller + recorder *MockresultTypeMockRecorder +} + +// MockresultTypeMockRecorder is the mock recorder for MockresultType. +type MockresultTypeMockRecorder struct { + mock *MockresultType +} + +// NewMockresultType creates a new mock instance. +func NewMockresultType(ctrl *gomock.Controller) *MockresultType { + mock := &MockresultType{ctrl: ctrl} + mock.recorder = &MockresultTypeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockresultType) EXPECT() *MockresultTypeMockRecorder { + return m.recorder +} + +// GetMasterAddress mocks base method. +func (m *MockresultType) GetMasterAddress() *types.TNetworkAddress { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMasterAddress") + ret0, _ := ret[0].(*types.TNetworkAddress) + return ret0 +} + +// GetMasterAddress indicates an expected call of GetMasterAddress. +func (mr *MockresultTypeMockRecorder) GetMasterAddress() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMasterAddress", reflect.TypeOf((*MockresultType)(nil).GetMasterAddress)) +} + +// GetStatus mocks base method. +func (m *MockresultType) GetStatus() *status.TStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStatus") + ret0, _ := ret[0].(*status.TStatus) + return ret0 +} + +// GetStatus indicates an expected call of GetStatus. +func (mr *MockresultTypeMockRecorder) GetStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockresultType)(nil).GetStatus)) +} + +// IsSetMasterAddress mocks base method. +func (m *MockresultType) IsSetMasterAddress() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsSetMasterAddress") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsSetMasterAddress indicates an expected call of IsSetMasterAddress. +func (mr *MockresultTypeMockRecorder) IsSetMasterAddress() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSetMasterAddress", reflect.TypeOf((*MockresultType)(nil).IsSetMasterAddress)) +} + // MockRequest is a mock of Request interface. type MockRequest struct { ctrl *gomock.Controller diff --git a/pkg/ccr/ingest_binlog_job.go b/pkg/ccr/ingest_binlog_job.go index bac78a2b..0d96ab7b 100644 --- a/pkg/ccr/ingest_binlog_job.go +++ b/pkg/ccr/ingest_binlog_job.go @@ -18,17 +18,30 @@ import ( log "github.com/sirupsen/logrus" ) +var errNotFoundDestMappingTableId = xerror.NewWithoutStack(xerror.Meta, "not found dest mapping table id") + type commitInfosCollector struct { commitInfos []*ttypes.TTabletCommitInfo commitInfosLock sync.Mutex } +type subTxnInfosCollector struct { + subTxnidToCommitInfos map[int64]([]*ttypes.TTabletCommitInfo) + subTxnInfosLock sync.Mutex +} + func newCommitInfosCollector() *commitInfosCollector { return &commitInfosCollector{ commitInfos: make([]*ttypes.TTabletCommitInfo, 0), } } +func newSubTxnInfosCollector() *subTxnInfosCollector { + return &subTxnInfosCollector{ + subTxnidToCommitInfos: make(map[int64]([]*ttypes.TTabletCommitInfo)), + } +} + func (cic *commitInfosCollector) appendCommitInfos(commitInfo ...*ttypes.TTabletCommitInfo) { cic.commitInfosLock.Lock() defer cic.commitInfosLock.Unlock() @@ -36,6 +49,23 @@ func (cic *commitInfosCollector) appendCommitInfos(commitInfo ...*ttypes.TTablet cic.commitInfos = append(cic.commitInfos, commitInfo...) } +func (stic *subTxnInfosCollector) appendSubTxnCommitInfos(stid int64, commitInfo ...*ttypes.TTabletCommitInfo) { + stic.subTxnInfosLock.Lock() + defer stic.subTxnInfosLock.Unlock() + + if stic.subTxnidToCommitInfos == nil { + stic.subTxnidToCommitInfos = make(map[int64]([]*ttypes.TTabletCommitInfo)) + } + + tabletCommitInfos := stic.subTxnidToCommitInfos[stid] + if tabletCommitInfos == nil { + tabletCommitInfos = make([]*ttypes.TTabletCommitInfo, 0) + } + + tabletCommitInfos = append(tabletCommitInfos, commitInfo...) + stic.subTxnidToCommitInfos[stid] = tabletCommitInfos +} + func (cic *commitInfosCollector) CommitInfos() []*ttypes.TTabletCommitInfo { cic.commitInfosLock.Lock() defer cic.commitInfosLock.Unlock() @@ -43,38 +73,31 @@ func (cic *commitInfosCollector) CommitInfos() []*ttypes.TTabletCommitInfo { return cic.commitInfos } +func (stic *subTxnInfosCollector) SubTxnToCommitInfos() map[int64]([]*ttypes.TTabletCommitInfo) { + stic.subTxnInfosLock.Lock() + defer stic.subTxnInfosLock.Unlock() + + return stic.subTxnidToCommitInfos +} + type tabletIngestBinlogHandler struct { ingestJob *IngestBinlogJob binlogVersion int64 + stid int64 srcTablet *TabletMeta destTablet *TabletMeta destPartitionId int64 + destTableId int64 *commitInfosCollector - - err error - errLock sync.Mutex + *subTxnInfosCollector cancel atomic.Bool wg sync.WaitGroup } -func (h *tabletIngestBinlogHandler) setError(err error) { - h.errLock.Lock() - defer h.errLock.Unlock() - - h.err = err -} - -func (h *tabletIngestBinlogHandler) error() error { - h.errLock.Lock() - defer h.errLock.Unlock() - - return h.err -} - // handle Replica -func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool { +func (h *tabletIngestBinlogHandler) handleReplica(srcReplica, destReplica *ReplicaMeta) bool { destReplicaId := destReplica.Id log.Debugf("handle dest replica id: %d", destReplicaId) @@ -84,6 +107,7 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool } j := h.ingestJob + destStid := h.stid binlogVersion := h.binlogVersion srcTablet := h.srcTablet destPartitionId := h.destPartitionId @@ -95,30 +119,28 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool } destTabletId := destReplica.TabletId - destRpc, err := h.ingestJob.ccrJob.rpcFactory.NewBeRpc(destBackend) + destRpc, err := h.ingestJob.ccrJob.factory.NewBeRpc(destBackend) if err != nil { j.setError(err) return false } - loadId := ttypes.NewTUniqueId() - loadId.SetHi(-1) - loadId.SetLo(-1) - - srcReplicas := srcTablet.ReplicaMetas - // srcBackendIds := make([]int64, 0, srcReplicas.Len()) - iter := srcReplicas.Iter() - if ok := iter.First(); !ok { - j.setError(xerror.Errorf(xerror.Meta, "src replicas is empty")) - return false - } - srcBackendId := iter.Value().BackendId + srcBackendId := srcReplica.BackendId srcBackend := j.GetSrcBackend(srcBackendId) if srcBackend == nil { j.setError(xerror.XWrapf(errBackendNotFound, "backend id: %d", srcBackendId)) return false } + loadId := ttypes.NewTUniqueId() + loadId.SetHi(-1) + loadId.SetLo(-1) + + // for txn insert + txnId := j.txnId + if destStid != 0 { + txnId = destStid + } req := &bestruct.TIngestBinlogRequest{ - TxnId: utils.ThriftValueWrapper(j.txnId), + TxnId: utils.ThriftValueWrapper(txnId), RemoteTabletId: utils.ThriftValueWrapper[int64](srcTablet.Id), BinlogVersion: utils.ThriftValueWrapper(binlogVersion), RemoteHost: utils.ThriftValueWrapper(srcBackend.Host), @@ -131,6 +153,7 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool TabletId: destTabletId, BackendId: destBackend.Id, } + cwind := h.ingestJob.ccrJob.concurrencyManager.GetWindow(destBackend.Id) h.wg.Add(1) go func() { @@ -140,6 +163,9 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool gls.Set("job", j.ccrJob.Name) defer gls.ResetGls(gls.GoID(), map[interface{}]interface{}{}) + cwind.Acquire() + defer cwind.Release() + resp, err := destRpc.IngestBinlog(req) if err != nil { j.setError(err) @@ -148,15 +174,20 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool log.Debugf("ingest resp: %v", resp) if !resp.IsSetStatus() { - err = xerror.Errorf(xerror.BE, "ingest resp status not set") + err = xerror.Errorf(xerror.BE, "ingest resp status not set, req: %+v", req) j.setError(err) return } else if resp.Status.StatusCode != tstatus.TStatusCode_OK { - err = xerror.Errorf(xerror.BE, "ingest resp status code: %v, msg: %v", resp.Status.StatusCode, resp.Status.ErrorMsgs) + err = xerror.Errorf(xerror.BE, "ingest error, req %v, resp status code: %v, msg: %v", req, resp.Status.StatusCode, resp.Status.ErrorMsgs) j.setError(err) return } else { h.appendCommitInfos(commitInfo) + + // for txn insert + if destStid != 0 { + h.appendSubTxnCommitInfos(destStid, commitInfo) + } } }() @@ -166,30 +197,74 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool func (h *tabletIngestBinlogHandler) handle() { log.Debugf("handle tablet ingest binlog, src tablet id: %d, dest tablet id: %d", h.srcTablet.Id, h.destTablet.Id) + // all src replicas version > binlogVersion + srcReplicas := make([]*ReplicaMeta, 0, h.srcTablet.ReplicaMetas.Len()) + h.srcTablet.ReplicaMetas.Scan(func(srcReplicaId int64, srcReplica *ReplicaMeta) bool { + if srcReplica.Version >= h.binlogVersion { + srcReplicas = append(srcReplicas, srcReplica) + } + return true + }) + + if len(srcReplicas) == 0 { + h.ingestJob.setError(xerror.Errorf(xerror.Meta, "no src replica version > %d", h.binlogVersion)) + return + } + + srcReplicaIndex := 0 h.destTablet.ReplicaMetas.Scan(func(destReplicaId int64, destReplica *ReplicaMeta) bool { - return h.handleReplica(destReplica) + // round robbin + srcReplica := srcReplicas[srcReplicaIndex%len(srcReplicas)] + srcReplicaIndex++ + return h.handleReplica(srcReplica, destReplica) }) h.wg.Wait() h.ingestJob.appendCommitInfos(h.CommitInfos()...) + // for txn insert + if h.stid != 0 { + commitInfos := h.SubTxnToCommitInfos()[h.stid] + h.ingestJob.appendSubTxnCommitInfos(h.stid, commitInfos...) + } } type IngestContext struct { context.Context txnId int64 tableRecords []*record.TableRecord + tableMapping map[int64]int64 + stidMapping map[int64]int64 +} + +func NewIngestContext(txnId int64, tableRecords []*record.TableRecord, tableMapping map[int64]int64) *IngestContext { + return &IngestContext{ + Context: context.Background(), + txnId: txnId, + tableRecords: tableRecords, + tableMapping: tableMapping, + } } -func NewIngestContext(txnId int64, tableRecords []*record.TableRecord) *IngestContext { +func NewIngestContextForTxnInsert(txnId int64, tableRecords []*record.TableRecord, + tableMapping map[int64]int64, stidMapping map[int64]int64) *IngestContext { return &IngestContext{ Context: context.Background(), txnId: txnId, tableRecords: tableRecords, + tableMapping: tableMapping, + stidMapping: stidMapping, } } type IngestBinlogJob struct { - ccrJob *Job // ccr job + ccrJob *Job // ccr job + factory *Factory + + tableMapping map[int64]int64 + srcMeta IngestBinlogMetaer + destMeta IngestBinlogMetaer + stidMap map[int64]int64 + txnId int64 tableRecords []*record.TableRecord @@ -199,6 +274,7 @@ type IngestBinlogJob struct { tabletIngestJobs []*tabletIngestBinlogHandler *commitInfosCollector + *subTxnInfosCollector err error errLock sync.RWMutex @@ -214,11 +290,16 @@ func NewIngestBinlogJob(ctx context.Context, ccrJob *Job) (*IngestBinlogJob, err } return &IngestBinlogJob{ - ccrJob: ccrJob, + ccrJob: ccrJob, + factory: ccrJob.factory, + + tableMapping: ingestCtx.tableMapping, txnId: ingestCtx.txnId, tableRecords: ingestCtx.tableRecords, + stidMap: ingestCtx.stidMapping, commitInfosCollector: newCommitInfosCollector(), + subTxnInfosCollector: newSubTxnInfosCollector(), }, nil } @@ -259,6 +340,7 @@ func (j *IngestBinlogJob) Error() error { type prepareIndexArg struct { binlogVersion int64 srcTableId int64 + stid int64 srcPartitionId int64 destTableId int64 destPartitionId int64 @@ -271,14 +353,13 @@ func (j *IngestBinlogJob) prepareIndex(arg *prepareIndexArg) { // Step 1: check tablets log.Debugf("arg %+v", arg) - job := j.ccrJob - srcTablets, err := job.srcMeta.GetTablets(arg.srcTableId, arg.srcPartitionId, arg.srcIndexMeta.Id) + srcTablets, err := j.srcMeta.GetTablets(arg.srcTableId, arg.srcPartitionId, arg.srcIndexMeta.Id) if err != nil { j.setError(err) return } - destTablets, err := job.destMeta.GetTablets(arg.destTableId, arg.destPartitionId, arg.destIndexMeta.Id) + destTablets, err := j.destMeta.GetTablets(arg.destTableId, arg.destPartitionId, arg.destIndexMeta.Id) if err != nil { j.setError(err) return @@ -312,12 +393,15 @@ func (j *IngestBinlogJob) prepareIndex(arg *prepareIndexArg) { destTablet := destIter.Value() tabletIngestBinlogHandler := &tabletIngestBinlogHandler{ ingestJob: j, + stid: arg.stid, binlogVersion: arg.binlogVersion, srcTablet: srcTablet, destTablet: destTablet, destPartitionId: arg.destPartitionId, + destTableId: arg.destTableId, commitInfosCollector: newCommitInfosCollector(), + subTxnInfosCollector: newSubTxnInfosCollector(), } j.tabletIngestJobs = append(j.tabletIngestJobs, tabletIngestBinlogHandler) @@ -335,7 +419,6 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit // 还是要求一下和下游对齐的index length,这个是不可以recover的 // 思考那些是recover用的,主要就是tablet那块的 - // TODO(Drogon): add use Backup/Restore to handle this if len(indexIds) == 0 { j.setError(xerror.Errorf(xerror.Meta, "index ids is empty")) return @@ -345,19 +428,21 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit srcPartitionId := partitionRecord.Id srcPartitionRange := partitionRecord.Range - destPartitionId, err := job.destMeta.GetPartitionIdByRange(destTableId, srcPartitionRange) + sourceStid := partitionRecord.Stid + stidMap := j.stidMap + destPartitionId, err := j.destMeta.GetPartitionIdByRange(destTableId, srcPartitionRange) if err != nil { j.setError(err) return } // Step 1: check index id - srcIndexIdMap, err := j.ccrJob.srcMeta.GetIndexIdMap(srcTableId, srcPartitionId) + srcIndexIdMap, err := j.srcMeta.GetIndexIdMap(srcTableId, srcPartitionId) if err != nil { j.setError(err) return } - destIndexNameMap, err := j.ccrJob.destMeta.GetIndexNameMap(destTableId, destPartitionId) + destIndexNameMap, destBaseIndex, err := j.destMeta.GetIndexNameMap(destTableId, destPartitionId) if err != nil { j.setError(err) return @@ -367,12 +452,22 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit srcIndexName := srcIndexMeta.Name if ccrJob.SyncType == TableSync && srcIndexName == ccrJob.Src.Table { return ccrJob.Dest.Table + } else if srcIndexMeta.IsBaseIndex { + return destBaseIndex.Name } else { return srcIndexName } } for _, indexId := range indexIds { + if j.srcMeta.IsIndexDropped(indexId) { + continue + } + if featureFilterShadowIndexesUpsert { + if _, ok := j.ccrJob.progress.ShadowIndexes[indexId]; ok { + continue + } + } srcIndexMeta, ok := srcIndexIdMap[indexId] if !ok { j.setError(xerror.Errorf(xerror.Meta, "index id %v not found in src meta", indexId)) @@ -380,8 +475,11 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit } srcIndexName := getSrcIndexName(job, srcIndexMeta) + log.Debugf("src idx id %d, name %s", indexId, srcIndexName) if _, ok := destIndexNameMap[srcIndexName]; !ok { - j.setError(xerror.Errorf(xerror.Meta, "index name %v not found in dest meta", srcIndexName)) + j.setError(xerror.Errorf(xerror.Meta, + "index name %v not found in dest meta, is base index: %t, src index id: %d", + srcIndexName, srcIndexMeta.IsBaseIndex, indexId)) return } } @@ -390,11 +488,23 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit prepareIndexArg := prepareIndexArg{ binlogVersion: partitionRecord.Version, srcTableId: srcTableId, + stid: stidMap[sourceStid], srcPartitionId: srcPartitionId, destTableId: destTableId, destPartitionId: destPartitionId, } for _, indexId := range indexIds { + if j.srcMeta.IsIndexDropped(indexId) { + log.Infof("skip the dropped index %d", indexId) + continue + } + if featureFilterShadowIndexesUpsert { + if _, ok := j.ccrJob.progress.ShadowIndexes[indexId]; ok { + log.Infof("skip the shadow index %d", indexId) + continue + } + } + srcIndexMeta := srcIndexIdMap[indexId] destIndexMeta := destIndexNameMap[getSrcIndexName(job, srcIndexMeta)] prepareIndexArg.srcIndexMeta = srcIndexMeta @@ -405,6 +515,10 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) { log.Debugf("tableRecord: %v", tableRecord) + if j.srcMeta.IsTableDropped(tableRecord.Id) { + log.Infof("skip the dropped table %d", tableRecord.Id) + return + } if len(tableRecord.PartitionRecords) == 0 { j.setError(xerror.Errorf(xerror.Meta, "partition records is empty")) @@ -437,19 +551,21 @@ func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) { } // Step 1: check all partitions in partition records are in src/dest cluster - srcPartitionMap, err := job.srcMeta.GetPartitionRangeMap(srcTableId) + srcPartitionMap, err := j.srcMeta.GetPartitionRangeMap(srcTableId) if err != nil { j.setError(err) return } - destPartitionMap, err := job.destMeta.GetPartitionRangeMap(destTableId) + destPartitionMap, err := j.destMeta.GetPartitionRangeMap(destTableId) if err != nil { j.setError(err) return } for _, partitionRecord := range tableRecord.PartitionRecords { + if partitionRecord.IsTemp || j.srcMeta.IsPartitionDropped(partitionRecord.Id) { + continue + } rangeKey := partitionRecord.Range - // TODO(Improvment, Fix): this may happen after drop partition, can seek partition for more time, check from recycle bin if _, ok := srcPartitionMap[rangeKey]; !ok { err = xerror.Errorf(xerror.Meta, "partition range: %v not in src cluster", rangeKey) j.setError(err) @@ -464,6 +580,16 @@ func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) { // Step 2: prepare partitions for _, partitionRecord := range tableRecord.PartitionRecords { + if partitionRecord.IsTemp { + log.Debugf("skip ingest binlog to an temp partition, id: %d range: %s, version: %d", + partitionRecord.Id, partitionRecord.Range, partitionRecord.Version) + continue + } + if j.srcMeta.IsPartitionDropped(partitionRecord.Id) { + log.Infof("skip the dropped partition %d, range: %s, version: %d", + partitionRecord.Id, partitionRecord.Range, partitionRecord.Version) + continue + } j.preparePartition(srcTableId, destTableId, partitionRecord, tableRecord.IndexIds) } } @@ -471,16 +597,14 @@ func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) { func (j *IngestBinlogJob) prepareBackendMap() { log.Debug("prepareBackendMap") - job := j.ccrJob - var err error - j.srcBackendMap, err = job.srcMeta.GetBackendMap() + j.srcBackendMap, err = j.srcMeta.GetBackendMap() if err != nil { j.setError(err) return } - j.destBackendMap, err = job.destMeta.GetBackendMap() + j.destBackendMap, err = j.destMeta.GetBackendMap() if err != nil { j.setError(err) return @@ -512,8 +636,68 @@ func (j *IngestBinlogJob) runTabletIngestJobs() { j.wg.Wait() } +func (j *IngestBinlogJob) prepareMeta() { + log.Debug("prepareMeta") + srcTableIds := make([]int64, 0, len(j.tableRecords)) + job := j.ccrJob + factory := j.factory + + switch job.SyncType { + case DBSync: + for _, tableRecord := range j.tableRecords { + srcTableIds = append(srcTableIds, tableRecord.Id) + } + case TableSync: + srcTableIds = append(srcTableIds, job.Src.TableId) + default: + err := xerror.Panicf(xerror.Normal, "invalid sync type: %s", job.SyncType) + j.setError(err) + return + } + + srcMeta, err := factory.NewThriftMeta(&job.Src, j.ccrJob.factory, srcTableIds) + if err != nil { + j.setError(err) + return + } + + destTableIds := make([]int64, 0, len(j.tableRecords)) + switch job.SyncType { + case DBSync: + for _, srcTableId := range srcTableIds { + if destTableId, ok := j.tableMapping[srcTableId]; ok { + destTableIds = append(destTableIds, destTableId) + } else { + err := xerror.XWrapf(errNotFoundDestMappingTableId, "src table id: %d", srcTableId) + j.setError(err) + return + } + } + case TableSync: + destTableIds = append(destTableIds, job.Dest.TableId) + default: + err := xerror.Panicf(xerror.Normal, "invalid sync type: %s", job.SyncType) + j.setError(err) + return + } + + destMeta, err := factory.NewThriftMeta(&job.Dest, j.ccrJob.factory, destTableIds) + if err != nil { + j.setError(err) + return + } + + j.srcMeta = srcMeta + j.destMeta = destMeta +} + // TODO(Drogon): use monad error handle func (j *IngestBinlogJob) Run() { + j.prepareMeta() + if err := j.Error(); err != nil { + return + } + j.prepareBackendMap() if err := j.Error(); err != nil { return diff --git a/pkg/ccr/job.go b/pkg/ccr/job.go index d47f3317..cb35ee68 100644 --- a/pkg/ccr/job.go +++ b/pkg/ccr/job.go @@ -1,16 +1,17 @@ package ccr -// TODO: rewrite by state machine, such as first sync, full/incremental sync - import ( "context" "encoding/json" "errors" + "flag" "fmt" "math" "math/rand" + "regexp" "strings" "sync" + "sync/atomic" "time" "github.com/selectdb/ccr_syncer/pkg/ccr/base" @@ -19,6 +20,7 @@ import ( "github.com/selectdb/ccr_syncer/pkg/storage" utils "github.com/selectdb/ccr_syncer/pkg/utils" "github.com/selectdb/ccr_syncer/pkg/xerror" + "github.com/selectdb/ccr_syncer/pkg/xmetrics" festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice" tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" @@ -27,13 +29,50 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/modern-go/gls" log "github.com/sirupsen/logrus" - "go.uber.org/zap" ) const ( SYNC_DURATION = time.Second * 3 ) +var ( + featureSchemaChangePartialSync bool + featureCleanTableAndPartitions bool + featureAtomicRestore bool + featureCreateViewDropExists bool + featureReplaceNotMatchedWithAlias bool + featureFilterShadowIndexesUpsert bool + featureReuseRunningBackupRestoreJob bool + featureCompressedSnapshot bool + featureSkipRollupBinlogs bool + featureTxnInsert bool +) + +func init() { + flag.BoolVar(&featureSchemaChangePartialSync, "feature_schema_change_partial_sync", true, + "use partial sync when working with schema change") + + // The default value is false, since clean tables will erase views unexpectedly. + flag.BoolVar(&featureCleanTableAndPartitions, "feature_clean_table_and_partitions", false, + "clean non restored tables and partitions during fullsync") + flag.BoolVar(&featureAtomicRestore, "feature_atomic_restore", true, + "replace tables in atomic during fullsync (otherwise the dest table will not be able to read).") + flag.BoolVar(&featureCreateViewDropExists, "feature_create_view_drop_exists", true, + "drop the exists view if exists, when sync the creating view binlog") + flag.BoolVar(&featureReplaceNotMatchedWithAlias, "feature_replace_not_matched_with_alias", true, + "replace signature not matched tables with table alias during the full sync") + flag.BoolVar(&featureFilterShadowIndexesUpsert, "feature_filter_shadow_indexes_upsert", true, + "filter the upsert to the shadow indexes") + flag.BoolVar(&featureReuseRunningBackupRestoreJob, "feature_reuse_running_backup_restore_job", true, + "reuse the running backup/restore issued by the job self") + flag.BoolVar(&featureCompressedSnapshot, "feature_compressed_snapshot", true, + "compress the snapshot job info and meta") + flag.BoolVar(&featureSkipRollupBinlogs, "feature_skip_rollup_binlogs", false, + "skip the rollup related binlogs") + flag.BoolVar(&featureTxnInsert, "feature_txn_insert", false, + "enable txn insert support") +} + type SyncType int const ( @@ -71,42 +110,44 @@ func (j JobState) String() string { } } -// TODO: refactor merge Src && Isrc, Dest && IDest type Job struct { - SyncType SyncType `json:"sync_type"` - Name string `json:"name"` - Src base.Spec `json:"src"` - ISrc base.Specer `json:"-"` - srcMeta Metaer `json:"-"` - Dest base.Spec `json:"dest"` - IDest base.Specer `json:"-"` - destMeta Metaer `json:"-"` - State JobState `json:"state"` - destSrcTableIdMap map[int64]int64 `json:"-"` - progress *JobProgress `json:"-"` - db storage.DB `json:"-"` - jobFactory *JobFactory `json:"-"` - rpcFactory rpc.IRpcFactory `json:"-"` - stop chan struct{} `json:"-"` - lock sync.Mutex `json:"-"` + SyncType SyncType `json:"sync_type"` + Name string `json:"name"` + Src base.Spec `json:"src"` + ISrc base.Specer `json:"-"` + srcMeta Metaer `json:"-"` + Dest base.Spec `json:"dest"` + IDest base.Specer `json:"-"` + destMeta Metaer `json:"-"` + SkipError bool `json:"skip_error"` + State JobState `json:"state"` + + factory *Factory `json:"-"` + + allowTableExists bool `json:"-"` // Only for FirstRun(), don't need to persist. + forceFullsync bool `json:"-"` // Force job step fullsync, for test only. + + progress *JobProgress `json:"-"` + db storage.DB `json:"-"` + jobFactory *JobFactory `json:"-"` + rawStatus RawJobStatus `json:"-"` + + stop chan struct{} `json:"-"` + isDeleted atomic.Bool `json:"-"` + + concurrencyManager *rpc.ConcurrencyManager `json:"-"` + + lock sync.Mutex `json:"-"` } type JobContext struct { context.Context - src base.Spec - dest base.Spec - db storage.DB - factory *Factory -} - -func NewJobContext(src, dest base.Spec, db storage.DB, factory *Factory) *JobContext { - return &JobContext{ - Context: context.Background(), - src: src, - dest: dest, - db: db, - factory: factory, - } + Src base.Spec + Dest base.Spec + Db storage.DB + SkipError bool + AllowTableExists bool + Factory *Factory } // new job @@ -116,23 +157,29 @@ func NewJobFromService(name string, ctx context.Context) (*Job, error) { return nil, xerror.Errorf(xerror.Normal, "invalid context type: %T", ctx) } - metaFactory := jobContext.factory.MetaFactory - iSpecFactory := jobContext.factory.ISpecFactory - src := jobContext.src - dest := jobContext.dest + factory := jobContext.Factory + src := jobContext.Src + dest := jobContext.Dest job := &Job{ - Name: name, - Src: src, - ISrc: iSpecFactory.NewSpecer(&src), - srcMeta: metaFactory.NewMeta(&jobContext.src), - Dest: dest, - IDest: iSpecFactory.NewSpecer(&dest), - destMeta: metaFactory.NewMeta(&jobContext.dest), - State: JobRunning, - destSrcTableIdMap: make(map[int64]int64), - progress: nil, - db: jobContext.db, - stop: make(chan struct{}), + Name: name, + Src: src, + ISrc: factory.NewSpecer(&src), + srcMeta: factory.NewMeta(&jobContext.Src), + Dest: dest, + IDest: factory.NewSpecer(&dest), + destMeta: factory.NewMeta(&jobContext.Dest), + SkipError: jobContext.SkipError, + State: JobRunning, + + allowTableExists: jobContext.AllowTableExists, + factory: factory, + forceFullsync: false, + + progress: nil, + db: jobContext.Db, + stop: make(chan struct{}), + + concurrencyManager: rpc.NewConcurrencyManager(), } if err := job.valid(); err != nil { @@ -146,7 +193,6 @@ func NewJobFromService(name string, ctx context.Context) (*Job, error) { } job.jobFactory = NewJobFactory() - job.rpcFactory = jobContext.factory.RpcFactory return job, nil } @@ -157,16 +203,18 @@ func NewJobFromJson(jsonData string, db storage.DB, factory *Factory) (*Job, err if err != nil { return nil, xerror.Wrapf(err, xerror.Normal, "unmarshal json failed, json: %s", jsonData) } - job.ISrc = factory.ISpecFactory.NewSpecer(&job.Src) - job.IDest = factory.ISpecFactory.NewSpecer(&job.Dest) - job.srcMeta = factory.MetaFactory.NewMeta(&job.Src) - job.destMeta = factory.MetaFactory.NewMeta(&job.Dest) - job.destSrcTableIdMap = make(map[int64]int64) + + // recover all not json fields + job.factory = factory + job.ISrc = factory.NewSpecer(&job.Src) + job.IDest = factory.NewSpecer(&job.Dest) + job.srcMeta = factory.NewMeta(&job.Src) + job.destMeta = factory.NewMeta(&job.Dest) job.progress = nil job.db = db job.stop = make(chan struct{}) job.jobFactory = NewJobFactory() - job.rpcFactory = factory.RpcFactory + job.concurrencyManager = rpc.NewConcurrencyManager() return &job, nil } @@ -200,13 +248,11 @@ func (j *Job) valid() error { } func (j *Job) RecoverDatabaseSync() error { - // TODO(Drogon): impl return nil } // database old data sync func (j *Job) DatabaseOldDataSync() error { - // TODO(Drogon): impl // Step 1: drop all tables err := j.IDest.ClearDB() if err != nil { @@ -220,16 +266,16 @@ func (j *Job) DatabaseOldDataSync() error { // database sync func (j *Job) DatabaseSync() error { - // TODO(Drogon): impl return nil } func (j *Job) genExtraInfo() (*base.ExtraInfo, error) { meta := j.srcMeta - masterToken, err := meta.GetMasterToken(j.rpcFactory) + masterToken, err := meta.GetMasterToken(j.factory) if err != nil { return nil, err } + log.Infof("gen extra info with master token %s", masterToken) backends, err := meta.GetBackends() if err != nil { @@ -240,7 +286,7 @@ func (j *Job) genExtraInfo() (*base.ExtraInfo, error) { beNetworkMap := make(map[int64]base.NetworkAddr) for _, backend := range backends { - log.Infof("backend: %v", backend) + log.Infof("gen extra info with backend: %v", backend) addr := base.NetworkAddr{ Ip: backend.Host, Port: backend.HttpPort, @@ -255,166 +301,297 @@ func (j *Job) genExtraInfo() (*base.ExtraInfo, error) { } func (j *Job) isIncrementalSync() bool { - return j.progress.SyncState == DBIncrementalSync || j.progress.SyncState == TableIncrementalSync + switch j.progress.SyncState { + case TableIncrementalSync, DBIncrementalSync, DBTablesIncrementalSync: + return true + default: + return false + } } -func (j *Job) fullSync() error { +func (j *Job) isTableSyncWithAlias() bool { + return j.SyncType == TableSync && j.Src.Table != j.Dest.Table +} + +func (j *Job) isTableDropped(tableId int64) (bool, error) { + // Keep compatible with the old version, which doesn't have the table id in partial sync data. + if tableId == 0 { + return false, nil + } + + var tableIds = []int64{tableId} + srcMeta, err := j.factory.NewThriftMeta(&j.Src, j.factory, tableIds) + if err != nil { + return false, err + } + + return srcMeta.IsTableDropped(tableId), nil +} + +func (j *Job) addExtraInfo(jobInfo []byte) ([]byte, error) { + var jobInfoMap map[string]interface{} + err := json.Unmarshal(jobInfo, &jobInfoMap) + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "unmarshal jobInfo failed, jobInfo: %s", string(jobInfo)) + } + + extraInfo, err := j.genExtraInfo() + if err != nil { + return nil, err + } + log.Debugf("extraInfo: %v", extraInfo) + jobInfoMap["extra_info"] = extraInfo + + jobInfoBytes, err := json.Marshal(jobInfoMap) + if err != nil { + return nil, xerror.Errorf(xerror.Normal, "marshal jobInfo failed, jobInfo: %v", jobInfoMap) + } + + return jobInfoBytes, nil +} + +func (j *Job) handlePartialSyncTableNotFound() error { + tableId := j.progress.PartialSyncData.TableId + table := j.progress.PartialSyncData.Table + + if dropped, err := j.isTableDropped(tableId); err != nil { + return err + } else if dropped { + // skip this partial sync because table has been dropped + log.Warnf("skip this partial sync because table %s has been dropped, table id: %d", table, tableId) + nextCommitSeq := j.progress.CommitSeq + if j.SyncType == DBSync { + j.progress.NextWithPersist(nextCommitSeq, DBIncrementalSync, Done, "") + } else { + j.progress.NextWithPersist(nextCommitSeq, TableIncrementalSync, Done, "") + } + return nil + } else if newTableName, err := j.srcMeta.GetTableNameById(tableId); err != nil { + return err + } else if j.SyncType == DBSync { + // The table might be renamed, so we need to update the table name. + log.Warnf("force new partial snapshot, since table %d has renamed from %s to %s", tableId, table, newTableName) + replace := true // replace the old data to avoid blocking reading + return j.newPartialSnapshot(tableId, newTableName, nil, replace) + } else { + return xerror.Errorf(xerror.Normal, "table sync but table has renamed from %s to %s, table id %d", + table, newTableName, tableId) + } +} + +// Like fullSync, but only backup and restore partial of the partitions of a table. +func (j *Job) partialSync() error { type inMemoryData struct { SnapshotName string `json:"snapshot_name"` SnapshotResp *festruct.TGetSnapshotResult_ `json:"snapshot_resp"` TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` + TableNameMapping map[int64]string `json:"table_name_mapping"` + RestoreLabel string `json:"restore_label"` + } + + if j.progress.PartialSyncData == nil { + return xerror.Errorf(xerror.Normal, "run partial sync but data is nil") } - // TODO: snapshot machine, not need create snapshot each time - // TODO(Drogon): check last snapshot commitSeq > first commitSeq, maybe we can reuse this snapshot + tableId := j.progress.PartialSyncData.TableId + table := j.progress.PartialSyncData.Table + partitions := j.progress.PartialSyncData.Partitions switch j.progress.SubSyncState { case Done: - if err := j.newSnapshot(j.progress.CommitSeq); err != nil { + log.Infof("partial sync status: done") + withAlias := len(j.progress.TableAliases) > 0 + if err := j.newPartialSnapshot(tableId, table, partitions, withAlias); err != nil { return err } case BeginCreateSnapshot: // Step 1: Create snapshot - log.Infof("fullsync status: create snapshot") + prefix := NewPartialSnapshotLabelPrefix(j.Name, j.progress.SyncId) + log.Infof("partial sync status: create snapshot with prefix %s", prefix) - backupTableList := make([]string, 0) - switch j.SyncType { - case DBSync: - tables, err := j.srcMeta.GetTables() + if featureReuseRunningBackupRestoreJob { + snapshotName, err := j.ISrc.GetValidBackupJob(prefix) if err != nil { return err } - for _, table := range tables { - backupTableList = append(backupTableList, table.Name) + if snapshotName != "" { + log.Infof("partial sync status: find a valid backup job %s", snapshotName) + j.progress.NextSubVolatile(WaitBackupDone, snapshotName) + return nil } - case TableSync: - backupTableList = append(backupTableList, j.Src.Table) - default: - return xerror.Errorf(xerror.Normal, "invalid sync type %s", j.SyncType) } - snapshotName, err := j.ISrc.CreateSnapshotAndWaitForDone(backupTableList) + + snapshotName := NewLabelWithTs(prefix) + err := j.ISrc.CreatePartialSnapshot(snapshotName, table, partitions) + if err != nil && err == base.ErrBackupPartitionNotFound { + log.Warnf("partial sync status: partition not found in the upstream, step to table partial sync") + replace := true // replace the old data to avoid blocking reading + return j.newPartialSnapshot(tableId, table, nil, replace) + } else if err != nil && err == base.ErrBackupTableNotFound { + return j.handlePartialSyncTableNotFound() + } else if err != nil { + return err + } + + j.progress.NextSubVolatile(WaitBackupDone, snapshotName) + return nil + + case WaitBackupDone: + // Step 2: Wait backup job done + snapshotName := j.progress.InMemoryData.(string) + backupFinished, err := j.ISrc.CheckBackupFinished(snapshotName) if err != nil { + j.progress.NextSubVolatile(BeginCreateSnapshot, snapshotName) return err } + if !backupFinished { + log.Infof("partial sync status: backup job %s is running", snapshotName) + return nil + } + j.progress.NextSubCheckpoint(GetSnapshotInfo, snapshotName) case GetSnapshotInfo: - // Step 2: Get snapshot info - log.Infof("fullsync status: get snapshot info") + // Step 3: Get snapshot info + log.Infof("partial sync status: get snapshot info") snapshotName := j.progress.PersistData src := &j.Src - srcRpc, err := j.rpcFactory.NewFeRpc(src) + srcRpc, err := j.factory.NewFeRpc(src) if err != nil { return err } - log.Debugf("begin get snapshot %s", snapshotName) - snapshotResp, err := srcRpc.GetSnapshot(src, snapshotName) + log.Debugf("partial sync begin get snapshot %s", snapshotName) + compress := false // partial snapshot no need to compress + snapshotResp, err := srcRpc.GetSnapshot(src, snapshotName, compress) if err != nil { return err } - if snapshotResp.Status.GetStatusCode() != tstatus.TStatusCode_OK { - log.Errorf("get snapshot failed, status: %v", snapshotResp.Status) + if snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_NOT_EXIST || + snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_EXPIRED { + log.Warnf("get snapshot %s: %s (%s), retry with new partial sync", snapshotName, + utils.FirstOr(snapshotResp.Status.GetErrorMsgs(), "unknown"), + snapshotResp.Status.GetStatusCode()) + replace := len(j.progress.TableAliases) > 0 + return j.newPartialSnapshot(tableId, table, partitions, replace) + } else if snapshotResp.Status.GetStatusCode() != tstatus.TStatusCode_OK { + err = xerror.Errorf(xerror.FE, "get snapshot failed, status: %v", snapshotResp.Status) + return err } - log.Debugf("job: %s", string(snapshotResp.GetJobInfo())) if !snapshotResp.IsSetJobInfo() { return xerror.New(xerror.Normal, "jobInfo is not set") } - tableCommitSeqMap, err := ExtractTableCommitSeqMap(snapshotResp.GetJobInfo()) + log.Tracef("job: %.128s", snapshotResp.GetJobInfo()) + + backupJobInfo, err := NewBackupJobInfoFromJson(snapshotResp.GetJobInfo()) if err != nil { return err } - if j.SyncType == TableSync { - if _, ok := tableCommitSeqMap[j.Src.TableId]; !ok { - return xerror.Errorf(xerror.Normal, "tableid %d, commit seq not found", j.Src.TableId) + tableCommitSeqMap := backupJobInfo.TableCommitSeqMap + tableNameMapping := backupJobInfo.TableNameMapping() + log.Debugf("table commit seq map: %v, table name mapping: %v", tableCommitSeqMap, tableNameMapping) + if backupObject, ok := backupJobInfo.BackupObjects[table]; !ok { + return xerror.Errorf(xerror.Normal, "table %s not found in backup objects", table) + } else if backupObject.Id != tableId { + log.Warnf("partial sync table %s id not match, force full sync. table id %d, backup object id %d", + table, tableId, backupObject.Id) + if j.SyncType == TableSync { + log.Infof("reset src table id from %d to %d, table %s", j.Src.TableId, backupObject.Id, table) + j.Src.TableId = backupObject.Id } + return j.newSnapshot(j.progress.CommitSeq) + } else if _, ok := tableCommitSeqMap[backupObject.Id]; !ok { + return xerror.Errorf(xerror.Normal, "commit seq not found, table id %d, table name: %s", backupObject.Id, table) } inMemoryData := &inMemoryData{ SnapshotName: snapshotName, SnapshotResp: snapshotResp, TableCommitSeqMap: tableCommitSeqMap, + TableNameMapping: tableNameMapping, } j.progress.NextSubVolatile(AddExtraInfo, inMemoryData) case AddExtraInfo: - // Step 3: Add extra info - log.Infof("fullsync status: add extra info") + // Step 4: Add extra info + log.Infof("partial sync status: add extra info") inMemoryData := j.progress.InMemoryData.(*inMemoryData) snapshotResp := inMemoryData.SnapshotResp jobInfo := snapshotResp.GetJobInfo() - tableCommitSeqMap := inMemoryData.TableCommitSeqMap - var jobInfoMap map[string]interface{} - err := json.Unmarshal(jobInfo, &jobInfoMap) - if err != nil { - return xerror.Wrapf(err, xerror.Normal, "unmarshal jobInfo failed, jobInfo: %s", string(jobInfo)) - } - log.Debugf("jobInfo: %v", jobInfoMap) + log.Infof("partial sync snapshot response meta size: %d, job info size: %d, expired at: %d", + len(snapshotResp.Meta), len(snapshotResp.JobInfo), snapshotResp.GetExpiredAt()) - extraInfo, err := j.genExtraInfo() + jobInfoBytes, err := j.addExtraInfo(jobInfo) if err != nil { return err } - log.Debugf("extraInfo: %v", extraInfo) - jobInfoMap["extra_info"] = extraInfo - jobInfoBytes, err := json.Marshal(jobInfoMap) - if err != nil { - return xerror.Errorf(xerror.Normal, "marshal jobInfo failed, jobInfo: %v", jobInfoMap) - } - log.Debugf("jobInfoBytes: %s", string(jobInfoBytes)) + log.Debugf("partial sync job info size: %d, bytes: %.128s", len(jobInfoBytes), string(jobInfoBytes)) snapshotResp.SetJobInfo(jobInfoBytes) - var commitSeq int64 = math.MaxInt64 - switch j.SyncType { - case DBSync: - for _, seq := range tableCommitSeqMap { - commitSeq = utils.Min(commitSeq, seq) - } - j.progress.TableCommitSeqMap = tableCommitSeqMap // persist in CommitNext - case TableSync: - commitSeq = tableCommitSeqMap[j.Src.TableId] - } - j.progress.CommitNextSubWithPersist(commitSeq, RestoreSnapshot, inMemoryData) + j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData) case RestoreSnapshot: - // Step 4: Restore snapshot - log.Infof("fullsync status: restore snapshot") + // Step 5: Restore snapshot + log.Infof("partial sync status: restore snapshot") if j.progress.InMemoryData == nil { persistData := j.progress.PersistData inMemoryData := &inMemoryData{} if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil { - // TODO: return to snapshot return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData) } j.progress.InMemoryData = inMemoryData } - // Step 4.1: start a new fullsync && persist + // Step 5.1: try reuse the exists restore job. inMemoryData := j.progress.InMemoryData.(*inMemoryData) snapshotName := inMemoryData.SnapshotName + if featureReuseRunningBackupRestoreJob { + name, err := j.IDest.GetValidRestoreJob(snapshotName) + if err != nil { + return nil + } + if name != "" { + log.Infof("partial sync status: find a valid restore job %s", name) + inMemoryData.RestoreLabel = name + j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData) + break + } + } + + // Step 5.2: start a new fullsync & restore snapshot to dest + restoreSnapshotName := NewRestoreLabel(snapshotName) snapshotResp := inMemoryData.SnapshotResp - // Step 4.2: restore snapshot to dest dest := &j.Dest - destRpc, err := j.rpcFactory.NewFeRpc(dest) + destRpc, err := j.factory.NewFeRpc(dest) if err != nil { return err } - log.Debugf("begin restore snapshot %s", snapshotName) + log.Debugf("partial sync begin restore snapshot %s to %s", snapshotName, restoreSnapshotName) var tableRefs []*festruct.TTableRef - if j.Src.IsSameHostDB(&j.Dest) { - log.Debugf("same host db, table: %s, dest table: %s", j.Src.Table, j.Dest.Table) + + // ATTN: The table name of the alias is from the source cluster. + if aliasName, ok := j.progress.TableAliases[table]; ok { + log.Infof("partial sync with table alias, table: %s, alias: %s", table, aliasName) + tableRefs = make([]*festruct.TTableRef, 0) + tableRef := &festruct.TTableRef{ + Table: &table, + AliasName: &aliasName, + } + tableRefs = append(tableRefs, tableRef) + } else if j.isTableSyncWithAlias() { + log.Infof("table sync snapshot not same name, table: %s, dest table: %s", j.Src.Table, j.Dest.Table) tableRefs = make([]*festruct.TTableRef, 0) tableRef := &festruct.TTableRef{ Table: &j.Src.Table, @@ -422,618 +599,2175 @@ func (j *Job) fullSync() error { } tableRefs = append(tableRefs, tableRef) } - restoreResp, err := destRpc.RestoreSnapshot(dest, tableRefs, snapshotName, snapshotResp) + + restoreReq := rpc.RestoreSnapshotRequest{ + TableRefs: tableRefs, + SnapshotName: restoreSnapshotName, + SnapshotResult: snapshotResp, + + // DO NOT drop exists tables and partitions + CleanPartitions: false, + CleanTables: false, + AtomicRestore: false, + Compress: false, + } + restoreResp, err := destRpc.RestoreSnapshot(dest, &restoreReq) if err != nil { return err } - log.Infof("resp: %v", restoreResp) + if restoreResp.Status.GetStatusCode() != tstatus.TStatusCode_OK { + return xerror.Errorf(xerror.Normal, "restore snapshot failed, status: %v", restoreResp.Status) + } + log.Infof("partial sync restore snapshot resp: %v", restoreResp) + inMemoryData.RestoreLabel = restoreSnapshotName + + j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData) + return nil + + case WaitRestoreDone: + // Step 6: Wait restore job done + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + restoreSnapshotName := inMemoryData.RestoreLabel + snapshotResp := inMemoryData.SnapshotResp - // TODO: impl wait for done, use show restore - restoreFinished, err := j.IDest.CheckRestoreFinished(snapshotName) + if snapshotResp.GetExpiredAt() > 0 && time.Now().UnixMilli() > snapshotResp.GetExpiredAt() { + log.Infof("partial sync snapshot %s is expired, cancel and retry with new partial sync", restoreSnapshotName) + if err := j.IDest.CancelRestoreIfExists(restoreSnapshotName); err != nil { + return err + } + replace := len(j.progress.TableAliases) > 0 + return j.newPartialSnapshot(tableId, table, partitions, replace) + } + + restoreFinished, err := j.IDest.CheckRestoreFinished(restoreSnapshotName) if err != nil { + j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData) return err } + if !restoreFinished { - err = xerror.Errorf(xerror.Normal, "check restore state timeout, max try times: %d", base.MAX_CHECK_RETRY_TIMES) - return err + log.Infof("partial sync status: restore job %s is running", restoreSnapshotName) + return nil } - j.progress.NextSubCheckpoint(PersistRestoreInfo, snapshotName) + + // save the entire commit seq map, this value will be used in PersistRestoreInfo. + j.progress.TableCommitSeqMap = utils.MergeMap( + j.progress.TableCommitSeqMap, inMemoryData.TableCommitSeqMap) + j.progress.TableNameMapping = utils.MergeMap( + j.progress.TableNameMapping, inMemoryData.TableNameMapping) + j.progress.NextSubCheckpoint(PersistRestoreInfo, restoreSnapshotName) case PersistRestoreInfo: - // Step 5: Update job progress && dest table id + // Step 7: Update job progress && dest table id // update job info, only for dest table id - log.Infof("fullsync status: persist restore info") + var targetName = table + if j.isTableSyncWithAlias() { + targetName = j.Dest.Table + } + if alias, ok := j.progress.TableAliases[table]; ok { + // check table exists to ensure the idempotent + if exist, err := j.IDest.CheckTableExistsByName(alias); err != nil { + return err + } else if exist { + if exists, err := j.IDest.CheckTableExistsByName(targetName); err != nil { + return err + } else if exists { + log.Infof("partial sync swap table with alias, table: %s, alias: %s", targetName, alias) + swap := false // drop the old table + if err := j.IDest.ReplaceTable(alias, targetName, swap); err != nil { + return err + } + } else { + log.Infof("partial sync rename table alias %s to %s", alias, targetName) + if err := j.IDest.RenameTableWithName(alias, targetName); err != nil { + return err + } + } + // Since the meta of dest table has been changed, refresh it. + j.destMeta.ClearTablesCache() + } else { + log.Infof("partial sync the table alias has been swapped, table: %s, alias: %s", targetName, alias) + } - // TODO: retry && mark it for not start a new full sync + // Save the replace result + j.progress.TableAliases = nil + j.progress.NextSubCheckpoint(PersistRestoreInfo, j.progress.PersistData) + } + + log.Infof("partial sync status: persist restore info") + destTable, err := j.destMeta.UpdateTable(targetName, 0) + if err != nil { + return err + } switch j.SyncType { case DBSync: + j.progress.TableMapping[tableId] = destTable.Id j.progress.NextWithPersist(j.progress.CommitSeq, DBTablesIncrementalSync, Done, "") case TableSync: - if destTable, err := j.destMeta.UpdateTable(j.Dest.Table, 0); err != nil { - return err - } else { - j.Dest.TableId = destTable.Id - } - - // TODO: reload check job table id - if err := j.persistJob(); err != nil { - return err + commitSeq, ok := j.progress.TableCommitSeqMap[j.Src.TableId] + if !ok { + return xerror.Errorf(xerror.Normal, "table id %d, commit seq not found", j.Src.TableId) } - + j.Dest.TableId = destTable.Id + j.progress.TableMapping = nil j.progress.TableCommitSeqMap = nil - j.progress.NextWithPersist(j.progress.CommitSeq, TableIncrementalSync, Done, "") + j.progress.NextWithPersist(commitSeq, TableIncrementalSync, Done, "") default: return xerror.Errorf(xerror.Normal, "invalid sync type %d", j.SyncType) } return nil + default: return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState) } - return j.fullSync() -} - -func (j *Job) persistJob() error { - data, err := json.Marshal(j) - if err != nil { - return xerror.Errorf(xerror.Normal, "marshal job failed, job: %v", j) - } - - if err := j.db.UpdateJob(j.Name, string(data)); err != nil { - return err - } - - return nil + return j.partialSync() } -// FIXME: label will conflict when commitSeq equal -func (j *Job) newLabel(commitSeq int64) string { - src := &j.Src - dest := &j.Dest - randNum := rand.Intn(65536) // hex 4 chars - if j.SyncType == DBSync { - // label "ccrj-rand:${sync_type}:${src_db_id}:${dest_db_id}:${commit_seq}" - return fmt.Sprintf("ccrj-%x:%s:%d:%d:%d", randNum, j.SyncType, src.DbId, dest.DbId, commitSeq) - } else { - // TableSync - // label "ccrj-rand:${sync_type}:${src_db_id}_${src_table_id}:${dest_db_id}_${dest_table_id}:${commit_seq}" - return fmt.Sprintf("ccrj-%x:%s:%d_%d:%d_%d:%d", randNum, j.SyncType, src.DbId, src.TableId, dest.DbId, dest.TableId, commitSeq) +func (j *Job) fullSync() error { + type inMemoryData struct { + SnapshotName string `json:"snapshot_name"` + SnapshotResp *festruct.TGetSnapshotResult_ `json:"snapshot_resp"` + TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` + TableNameMapping map[int64]string `json:"table_name_mapping"` + Views []string `json:"views"` + RestoreLabel string `json:"restore_label"` } -} -// only called by DBSync, TableSync tableId is in Src/Dest Spec -// TODO: [Performance] improve by cache -func (j *Job) getDestTableIdBySrc(srcTableId int64) (int64, error) { - if destTableId, ok := j.destSrcTableIdMap[srcTableId]; ok { - return destTableId, nil - } + switch j.progress.SubSyncState { + case Done: + log.Infof("fullsync status: done") + if err := j.newSnapshot(j.progress.CommitSeq); err != nil { + return err + } - srcTableName, err := j.srcMeta.GetTableNameById(srcTableId) - if err != nil { - return 0, err - } + case BeginCreateSnapshot: + // Step 1: Create snapshot + prefix := NewSnapshotLabelPrefix(j.Name, j.progress.SyncId) + log.Infof("fullsync status: create snapshot with prefix %s", prefix) - if destTableId, err := j.destMeta.GetTableId(srcTableName); err != nil { - return 0, err - } else { - j.destSrcTableIdMap[srcTableId] = destTableId - return destTableId, nil - } -} + if featureReuseRunningBackupRestoreJob { + snapshotName, err := j.ISrc.GetValidBackupJob(prefix) + if err != nil { + return err + } + if snapshotName != "" { + log.Infof("fullsync status: find a valid backup job %s", snapshotName) + j.progress.NextSubVolatile(WaitBackupDone, snapshotName) + return nil + } + } -func (j *Job) getDbSyncTableRecords(upsert *record.Upsert) ([]*record.TableRecord, error) { - commitSeq := upsert.CommitSeq - tableCommitSeqMap := j.progress.TableCommitSeqMap - tableRecords := make([]*record.TableRecord, 0, len(upsert.TableRecords)) + backupTableList := make([]string, 0) + switch j.SyncType { + case DBSync: + tables, err := j.srcMeta.GetTables() + if err != nil { + return err + } + if len(tables) == 0 { + log.Warnf("full sync but source db is empty! retry later") + return nil + } + case TableSync: + backupTableList = append(backupTableList, j.Src.Table) + default: + return xerror.Errorf(xerror.Normal, "invalid sync type %s", j.SyncType) + } - for tableId, tableRecord := range upsert.TableRecords { - // DBIncrementalSync - if tableCommitSeqMap == nil { - tableRecords = append(tableRecords, tableRecord) - continue + snapshotName := NewLabelWithTs(prefix) + if err := j.ISrc.CreateSnapshot(snapshotName, backupTableList); err != nil { + return err } + j.progress.NextSubVolatile(WaitBackupDone, snapshotName) + return nil - if tableCommitSeq, ok := tableCommitSeqMap[tableId]; ok { - if commitSeq > tableCommitSeq { - tableRecords = append(tableRecords, tableRecord) - } - } else { - // TODO: check + case WaitBackupDone: + // Step 2: Wait backup job done + snapshotName := j.progress.InMemoryData.(string) + backupFinished, err := j.ISrc.CheckBackupFinished(snapshotName) + if err != nil { + j.progress.NextSubVolatile(BeginCreateSnapshot, snapshotName) + return err + } + if !backupFinished { + log.Infof("fullsync status: backup job %s is running", snapshotName) + return nil } - } - return tableRecords, nil -} + j.progress.NextSubCheckpoint(GetSnapshotInfo, snapshotName) -func (j *Job) getReleatedTableRecords(upsert *record.Upsert) ([]*record.TableRecord, error) { - var tableRecords []*record.TableRecord //, 0, len(upsert.TableRecords)) + case GetSnapshotInfo: + // Step 3: Get snapshot info + log.Infof("fullsync status: get snapshot info") - switch j.SyncType { - case DBSync: - records, err := j.getDbSyncTableRecords(upsert) + snapshotName := j.progress.PersistData + src := &j.Src + srcRpc, err := j.factory.NewFeRpc(src) if err != nil { - return nil, err + return err } - if len(records) == 0 { - return nil, nil + log.Debugf("fullsync begin get snapshot %s", snapshotName) + compress := false + snapshotResp, err := srcRpc.GetSnapshot(src, snapshotName, compress) + if err != nil { + return err } - tableRecords = records - case TableSync: - tableRecord, ok := upsert.TableRecords[j.Src.TableId] - if !ok { - return nil, xerror.Errorf(xerror.Normal, "table record not found, table: %s", j.Src.Table) + + if snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_NOT_EXIST || + snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_EXPIRED { + log.Warnf("get snapshot %s: %s (%s), retry with new full sync", snapshotName, + utils.FirstOr(snapshotResp.Status.GetErrorMsgs(), "unknown"), + snapshotResp.Status.GetStatusCode()) + return j.newSnapshot(j.progress.CommitSeq) + } else if snapshotResp.Status.GetStatusCode() != tstatus.TStatusCode_OK { + err = xerror.Errorf(xerror.FE, "get snapshot failed, status: %v", snapshotResp.Status) + return err } + + if !snapshotResp.IsSetJobInfo() { + return xerror.New(xerror.Normal, "jobInfo is not set") + } + + if snapshotResp.GetCompressed() { + if bytes, err := utils.GZIPDecompress(snapshotResp.GetJobInfo()); err != nil { + return xerror.Wrap(err, xerror.Normal, "decompress snapshot job info failed") + } else { + snapshotResp.SetJobInfo(bytes) + } + if bytes, err := utils.GZIPDecompress(snapshotResp.GetMeta()); err != nil { + return xerror.Wrap(err, xerror.Normal, "decompress snapshot meta failed") + } else { + snapshotResp.SetMeta(bytes) + } + } + + log.Tracef("fullsync snapshot job: %.128s", snapshotResp.GetJobInfo()) + backupJobInfo, err := NewBackupJobInfoFromJson(snapshotResp.GetJobInfo()) + if err != nil { + return err + } + + tableCommitSeqMap := backupJobInfo.TableCommitSeqMap + tableNameMapping := backupJobInfo.TableNameMapping() + views := backupJobInfo.Views() + + if j.SyncType == TableSync { + if backupObject, ok := backupJobInfo.BackupObjects[j.Src.Table]; !ok { + return xerror.Errorf(xerror.Normal, "table %s not found in backup objects", j.Src.Table) + } else if backupObject.Id != j.Src.TableId { + // Might be the table has been replace. + log.Warnf("full sync table %s id not match, force full sync and reset table id from %d to %d", + j.Src.Table, j.Src.TableId, backupObject.Id) + j.Src.TableId = backupObject.Id + return j.newSnapshot(j.progress.CommitSeq) + } else if _, ok := tableCommitSeqMap[j.Src.TableId]; !ok { + return xerror.Errorf(xerror.Normal, "table id %d, commit seq not found", j.Src.TableId) + } + } else { + // save the view ids in the table commit seq map, to build the view mapping latter. + for _, view := range backupJobInfo.NewBackupObjects.Views { + tableNameMapping[view.Id] = view.Name + tableCommitSeqMap[view.Id] = snapshotResp.GetCommitSeq() // zero if not exists + } + } + + inMemoryData := &inMemoryData{ + SnapshotName: snapshotName, + SnapshotResp: snapshotResp, + TableCommitSeqMap: tableCommitSeqMap, + TableNameMapping: tableNameMapping, + Views: views, + } + j.progress.NextSubVolatile(AddExtraInfo, inMemoryData) + + case AddExtraInfo: + // Step 4: Add extra info + log.Infof("fullsync status: add extra info") + + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + snapshotResp := inMemoryData.SnapshotResp + jobInfo := snapshotResp.GetJobInfo() + + log.Infof("snapshot response meta size: %d, job info size: %d, expired at: %d, commit seq: %d", + len(snapshotResp.Meta), len(snapshotResp.JobInfo), snapshotResp.GetExpiredAt(), snapshotResp.GetCommitSeq()) + + jobInfoBytes, err := j.addExtraInfo(jobInfo) + if err != nil { + return err + } + log.Debugf("job info size: %d, bytes: %.128s", len(jobInfoBytes), string(jobInfoBytes)) + snapshotResp.SetJobInfo(jobInfoBytes) + + j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData) + + case RestoreSnapshot: + // Step 5: Restore snapshot + log.Infof("fullsync status: restore snapshot") + + if j.progress.InMemoryData == nil { + persistData := j.progress.PersistData + inMemoryData := &inMemoryData{} + if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil { + return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData) + } + j.progress.InMemoryData = inMemoryData + } + + // Step 5.1: cancel the running restore job which by the former process, if exists + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + snapshotName := inMemoryData.SnapshotName + if featureReuseRunningBackupRestoreJob { + restoreSnapshotName, err := j.IDest.GetValidRestoreJob(snapshotName) + if err != nil { + return nil + } + if restoreSnapshotName != "" { + log.Infof("fullsync status: find a valid restore job %s", restoreSnapshotName) + inMemoryData.RestoreLabel = restoreSnapshotName + j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData) + break + } + } + + // Step 5.2: start a new fullsync & restore snapshot to dest + restoreSnapshotName := NewRestoreLabel(snapshotName) + snapshotResp := inMemoryData.SnapshotResp + tableNameMapping := inMemoryData.TableNameMapping + + dest := &j.Dest + destRpc, err := j.factory.NewFeRpc(dest) + if err != nil { + return err + } + log.Debugf("begin restore snapshot %s to %s", snapshotName, restoreSnapshotName) + + var tableRefs []*festruct.TTableRef + if j.isTableSyncWithAlias() { + log.Debugf("table sync snapshot not same name, table: %s, dest table: %s", j.Src.Table, j.Dest.Table) + tableRefs = make([]*festruct.TTableRef, 0) + tableRef := &festruct.TTableRef{ + Table: &j.Src.Table, + AliasName: &j.Dest.Table, + } + tableRefs = append(tableRefs, tableRef) + } + if len(j.progress.TableAliases) > 0 { + tableRefs = make([]*festruct.TTableRef, 0) + viewMap := make(map[string]interface{}) + for _, viewName := range inMemoryData.Views { + log.Debugf("fullsync alias with view ref %s", viewName) + viewMap[viewName] = nil + tableRef := &festruct.TTableRef{Table: utils.ThriftValueWrapper(viewName)} + tableRefs = append(tableRefs, tableRef) + } + for _, tableName := range tableNameMapping { + if alias, ok := j.progress.TableAliases[tableName]; ok { + log.Debugf("fullsync alias skip table ref %s because it has alias %s", tableName, alias) + continue + } + if _, ok := viewMap[tableName]; ok { + continue + } + log.Debugf("fullsync alias with table ref %s", tableName) + tableRef := &festruct.TTableRef{Table: utils.ThriftValueWrapper(tableName)} + tableRefs = append(tableRefs, tableRef) + } + for table, alias := range j.progress.TableAliases { + log.Infof("fullsync alias table from %s to %s", table, alias) + tableRef := &festruct.TTableRef{ + Table: utils.ThriftValueWrapper(table), + AliasName: utils.ThriftValueWrapper(alias), + } + tableRefs = append(tableRefs, tableRef) + } + } + + compress := false + if featureCompressedSnapshot { + if enable, err := j.IDest.IsEnableRestoreSnapshotCompression(); err != nil { + return xerror.Wrap(err, xerror.Normal, "check enable restore snapshot compression failed") + } else { + compress = enable + } + } + restoreReq := rpc.RestoreSnapshotRequest{ + TableRefs: tableRefs, + SnapshotName: restoreSnapshotName, + SnapshotResult: snapshotResp, + CleanPartitions: false, + CleanTables: false, + AtomicRestore: false, + Compress: compress, + } + if featureCleanTableAndPartitions { + // drop exists partitions, and drop tables if in db sync. + restoreReq.CleanPartitions = true + if j.SyncType == DBSync { + restoreReq.CleanTables = true + } + } + if featureAtomicRestore { + restoreReq.AtomicRestore = true + } + restoreResp, err := destRpc.RestoreSnapshot(dest, &restoreReq) + if err != nil { + return err + } + if restoreResp.Status.GetStatusCode() != tstatus.TStatusCode_OK { + return xerror.Errorf(xerror.Normal, "restore snapshot failed, status: %v", restoreResp.Status) + } + log.Infof("resp: %v", restoreResp) + + inMemoryData.RestoreLabel = restoreSnapshotName + j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData) + return nil + + case WaitRestoreDone: + // Step 6: Wait restore job done + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + restoreSnapshotName := inMemoryData.RestoreLabel + tableNameMapping := inMemoryData.TableNameMapping + snapshotResp := inMemoryData.SnapshotResp + + if snapshotResp.GetExpiredAt() > 0 && time.Now().UnixMilli() > snapshotResp.GetExpiredAt() { + log.Infof("fullsync snapshot %s is expired, cancel and retry with new full sync", restoreSnapshotName) + if err := j.IDest.CancelRestoreIfExists(restoreSnapshotName); err != nil { + return err + } + return j.newSnapshot(j.progress.CommitSeq) + } + + for { + restoreFinished, err := j.IDest.CheckRestoreFinished(restoreSnapshotName) + if err != nil && errors.Is(err, base.ErrRestoreSignatureNotMatched) { + // We need rebuild the exists table. + var tableName string + var tableOrView bool = true + if j.SyncType == TableSync { + tableName = j.Dest.Table + } else { + tableName, tableOrView, err = j.IDest.GetRestoreSignatureNotMatchedTableOrView(restoreSnapshotName) + if err != nil || len(tableName) == 0 { + continue + } + } + + resource := "table" + if !tableOrView { + resource = "view" + } + log.Infof("the signature of %s %s is not matched with the target table in snapshot", resource, tableName) + if tableOrView && featureReplaceNotMatchedWithAlias { + if j.progress.TableAliases == nil { + j.progress.TableAliases = make(map[string]string) + } + j.progress.TableAliases[tableName] = TableAlias(tableName) + j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData) + break + } + for { + if tableOrView { + if err := j.IDest.DropTable(tableName, false); err == nil { + break + } + } else { + if err := j.IDest.DropView(tableName); err == nil { + break + } + } + } + log.Infof("the restore is cancelled, the unmatched %s %s is dropped, restore snapshot again", resource, tableName) + break + } else if err != nil { + j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData) + return err + } + + if !restoreFinished { + log.Infof("fullsync status: restore job %s is running", restoreSnapshotName) + return nil + } + + tableCommitSeqMap := inMemoryData.TableCommitSeqMap + var commitSeq int64 = math.MaxInt64 + switch j.SyncType { + case DBSync: + for tableId, seq := range tableCommitSeqMap { + if seq == 0 { + // Skip the views + continue + } + commitSeq = utils.Min(commitSeq, seq) + log.Debugf("fullsync table commit seq, table id: %d, commit seq: %d", tableId, seq) + } + if snapshotResp.GetCommitSeq() > 0 { + commitSeq = utils.Min(commitSeq, snapshotResp.GetCommitSeq()) + } + j.progress.TableCommitSeqMap = tableCommitSeqMap // persist in CommitNext + j.progress.TableNameMapping = tableNameMapping + case TableSync: + commitSeq = tableCommitSeqMap[j.Src.TableId] + } + + j.progress.CommitNextSubWithPersist(commitSeq, PersistRestoreInfo, restoreSnapshotName) + break + } + + case PersistRestoreInfo: + // Step 7: Update job progress && dest table id + // update job info, only for dest table id + + if len(j.progress.TableAliases) > 0 { + log.Infof("fullsync swap %d tables with aliases", len(j.progress.TableAliases)) + + var tables []string + for table := range j.progress.TableAliases { + tables = append(tables, table) + } + for _, table := range tables { + alias := j.progress.TableAliases[table] + targetName := table + if j.isTableSyncWithAlias() { + targetName = j.Dest.Table + } + + // check table exists to ensure the idempotent + if exist, err := j.IDest.CheckTableExistsByName(alias); err != nil { + return err + } else if exist { + log.Infof("fullsync swap table with alias, table: %s, alias: %s", targetName, alias) + swap := false // drop the old table + if err := j.IDest.ReplaceTable(alias, targetName, swap); err != nil { + return err + } + } else { + log.Infof("fullsync the table alias has been swapped, table: %s, alias: %s", targetName, alias) + } + } + // Since the meta of dest table has been changed, refresh it. + j.destMeta.ClearTablesCache() + + // Save the replace result + j.progress.TableAliases = nil + j.progress.NextSubCheckpoint(PersistRestoreInfo, j.progress.PersistData) + } + + log.Infof("fullsync status: persist restore info") + + switch j.SyncType { + case DBSync: + // refresh dest meta cache before building table mapping. + j.destMeta.ClearTablesCache() + tableMapping := make(map[int64]int64) + for srcTableId := range j.progress.TableCommitSeqMap { + var srcTableName string + if name, ok := j.progress.TableNameMapping[srcTableId]; ok { + srcTableName = name + } else { + // Keep compatible, but once the upstream table is renamed, the + // downstream table id will not be found here. + name, err := j.srcMeta.GetTableNameById(srcTableId) + if err != nil { + return err + } + srcTableName = name + + // If srcTableName is empty, it may be deleted. + // No need to map it to dest table + if srcTableName == "" { + log.Warnf("the name of source table id: %d is empty, no need to map it to dest table", srcTableId) + continue + } + } + + destTableId, err := j.destMeta.GetTableId(srcTableName) + if err != nil { + return err + } + + log.Debugf("fullsync table mapping, src: %d, dest: %d, name: %s", + srcTableId, destTableId, srcTableName) + tableMapping[srcTableId] = destTableId + } + + j.progress.TableMapping = tableMapping + j.progress.ShadowIndexes = nil + j.progress.NextWithPersist(j.progress.CommitSeq, DBTablesIncrementalSync, Done, "") + case TableSync: + if destTable, err := j.destMeta.UpdateTable(j.Dest.Table, 0); err != nil { + return err + } else { + j.Dest.TableId = destTable.Id + } + + if err := j.persistJob(); err != nil { + return err + } + + j.progress.TableCommitSeqMap = nil + j.progress.TableMapping = nil + j.progress.ShadowIndexes = nil + j.progress.NextWithPersist(j.progress.CommitSeq, TableIncrementalSync, Done, "") + default: + return xerror.Errorf(xerror.Normal, "invalid sync type %d", j.SyncType) + } + + return nil + default: + return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState) + } + + return j.fullSync() +} + +func (j *Job) persistJob() error { + data, err := json.Marshal(j) + if err != nil { + return xerror.Errorf(xerror.Normal, "marshal job failed, job: %v", j) + } + + if err := j.db.UpdateJob(j.Name, string(data)); err != nil { + return err + } + + return nil +} + +func (j *Job) newLabel(commitSeq int64) string { + src := &j.Src + dest := &j.Dest + randNum := rand.Intn(65536) // hex 4 chars + if j.SyncType == DBSync { + // label "ccrj-rand:${sync_type}:${src_db_id}:${dest_db_id}:${commit_seq}" + return fmt.Sprintf("ccrj-%x:%s:%d:%d:%d", randNum, j.SyncType, src.DbId, dest.DbId, commitSeq) + } else { + // TableSync + // label "ccrj-rand:${sync_type}:${src_db_id}_${src_table_id}:${dest_db_id}_${dest_table_id}:${commit_seq}" + return fmt.Sprintf("ccrj-%x:%s:%d_%d:%d_%d:%d", randNum, j.SyncType, src.DbId, src.TableId, dest.DbId, dest.TableId, commitSeq) + } +} + +// only called by DBSync, TableSync tableId is in Src/Dest Spec +func (j *Job) getDestTableIdBySrc(srcTableId int64) (int64, error) { + if j.progress.TableMapping != nil { + if destTableId, ok := j.progress.TableMapping[srcTableId]; ok { + return destTableId, nil + } + log.Warnf("table mapping not found, src table id: %d", srcTableId) + } else { + log.Warnf("table mapping not found, src table id: %d", srcTableId) + j.progress.TableMapping = make(map[int64]int64) + } + + // WARNING: the table name might be changed, and the TableMapping has been updated in time, + // only keep this for compatible. + srcTableName, err := j.srcMeta.GetTableNameById(srcTableId) + if err != nil { + return 0, err + } + + if destTableId, err := j.destMeta.GetTableId(srcTableName); err != nil { + return 0, err + } else { + j.progress.TableMapping[srcTableId] = destTableId + return destTableId, nil + } +} + +func (j *Job) getDestNameBySrcId(srcTableId int64) (string, error) { + destTableId, err := j.getDestTableIdBySrc(srcTableId) + if err != nil { + return "", err + } + + name, err := j.destMeta.GetTableNameById(destTableId) + if err != nil { + return "", err + } + + if name == "" { + return "", xerror.Errorf(xerror.Normal, "dest table name not found, dest table id: %d", destTableId) + } + + return name, nil +} + +func (j *Job) isBinlogCommitted(tableId int64, binlogCommitSeq int64) bool { + if j.progress.SyncState == DBTablesIncrementalSync { + tableCommitSeq, ok := j.progress.TableCommitSeqMap[tableId] + if ok && binlogCommitSeq <= tableCommitSeq { + log.Infof("filter the already committed binlog %d, table commit seq: %d, table: %d", + binlogCommitSeq, tableCommitSeq, tableId) + return true + } + } + return false +} + +func (j *Job) getDbSyncTableRecords(upsert *record.Upsert) []*record.TableRecord { + commitSeq := upsert.CommitSeq + tableCommitSeqMap := j.progress.TableCommitSeqMap + tableRecords := make([]*record.TableRecord, 0, len(upsert.TableRecords)) + + for tableId, tableRecord := range upsert.TableRecords { + // DBIncrementalSync + if tableCommitSeqMap == nil { + tableRecords = append(tableRecords, tableRecord) + continue + } + + if tableCommitSeq, ok := tableCommitSeqMap[tableId]; ok { + if commitSeq > tableCommitSeq { + tableRecords = append(tableRecords, tableRecord) + } + } else { + // for db partial sync + tableRecords = append(tableRecords, tableRecord) + } + } + + return tableRecords +} + +func (j *Job) getRelatedTableRecords(upsert *record.Upsert) ([]*record.TableRecord, error) { + var tableRecords []*record.TableRecord //, 0, len(upsert.TableRecords)) + + switch j.SyncType { + case DBSync: + records := j.getDbSyncTableRecords(upsert) + if len(records) == 0 { + return nil, nil + } + tableRecords = records + case TableSync: + tableRecord, ok := upsert.TableRecords[j.Src.TableId] + if !ok { + return nil, xerror.Errorf(xerror.Normal, "table record not found, table: %s", j.Src.Table) + } + tableRecords = make([]*record.TableRecord, 0, 1) tableRecords = append(tableRecords, tableRecord) default: - return nil, xerror.Errorf(xerror.Normal, "invalid sync type: %s", j.SyncType) + return nil, xerror.Errorf(xerror.Normal, "invalid sync type: %s", j.SyncType) + } + + return tableRecords, nil +} + +// Table ingestBinlog +func (j *Job) ingestBinlog(txnId int64, tableRecords []*record.TableRecord) ([]*ttypes.TTabletCommitInfo, error) { + log.Infof("ingestBinlog, txnId: %d", txnId) + + job, err := j.jobFactory.CreateJob(NewIngestContext(txnId, tableRecords, j.progress.TableMapping), j, "IngestBinlog") + if err != nil { + return nil, err + } + + ingestBinlogJob, ok := job.(*IngestBinlogJob) + if !ok { + return nil, xerror.Errorf(xerror.Normal, "invalid job type, job: %+v", job) + } + + job.Run() + if err := job.Error(); err != nil { + return nil, err + } + return ingestBinlogJob.CommitInfos(), nil +} + +// Table ingestBinlog for txn insert +func (j *Job) ingestBinlogForTxnInsert(txnId int64, tableRecords []*record.TableRecord, stidMap map[int64]int64, destTableId int64) ([]*festruct.TSubTxnInfo, error) { + log.Infof("ingestBinlogForTxnInsert, txnId: %d", txnId) + + job, err := j.jobFactory.CreateJob(NewIngestContextForTxnInsert(txnId, tableRecords, j.progress.TableMapping, stidMap), j, "IngestBinlog") + if err != nil { + return nil, err + } + + ingestBinlogJob, ok := job.(*IngestBinlogJob) + if !ok { + return nil, xerror.Errorf(xerror.Normal, "invalid job type, job: %+v", job) + } + + job.Run() + if err := job.Error(); err != nil { + return nil, err + } + + stidToCommitInfos := ingestBinlogJob.SubTxnToCommitInfos() + subTxnInfos := make([]*festruct.TSubTxnInfo, 0, len(stidMap)) + for sourceStid, destStid := range stidMap { + destStid := destStid // if no this line, every element in subTxnInfos is the last tSubTxnInfo + commitInfos := stidToCommitInfos[destStid] + if commitInfos == nil { + log.Warnf("no commit infos from source stid: %d; dest stid %d, just skip", sourceStid, destStid) + continue + } + + tSubTxnInfo := &festruct.TSubTxnInfo{ + SubTxnId: &destStid, + TableId: &destTableId, + TabletCommitInfos: commitInfos, + } + + subTxnInfos = append(subTxnInfos, tSubTxnInfo) + } + + return subTxnInfos, nil +} + +func (j *Job) handleUpsertWithRetry(binlog *festruct.TBinlog) error { + err := j.handleUpsert(binlog) + if !xerror.IsCategory(err, xerror.Meta) { + return err + } + + log.Warnf("a meta error occurred, retry to handle upsert binlog again, commitSeq: %d", binlog.GetCommitSeq()) + return j.handleUpsert(binlog) +} + +func (j *Job) handleUpsert(binlog *festruct.TBinlog) error { + log.Infof("handle upsert binlog, sub sync state: %s, prevCommitSeq: %d, commitSeq: %d", + j.progress.SubSyncState, j.progress.PrevCommitSeq, j.progress.CommitSeq) + + // inMemory will be update in state machine, but progress keep any, so progress.inMemory is also latest, well call NextSubCheckpoint don't need to upate inMemory in progress + type inMemoryData struct { + CommitSeq int64 `json:"commit_seq"` + TxnId int64 `json:"txn_id"` + DestTableIds []int64 `json:"dest_table_ids"` + TableRecords []*record.TableRecord `json:"table_records"` + CommitInfos []*ttypes.TTabletCommitInfo `json:"commit_infos"` + IsTxnInsert bool `json:"is_txn_insert"` + SourceStids []int64 `json:"source_stid"` + DestStids []int64 `json:"desc_stid"` + SubTxnInfos []*festruct.TSubTxnInfo `json:"sub_txn_infos"` + } + + updateInMemory := func() error { + if j.progress.InMemoryData == nil { + persistData := j.progress.PersistData + inMemoryData := &inMemoryData{} + if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil { + return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData) + } + j.progress.InMemoryData = inMemoryData + } + return nil + } + + rollback := func(err error, inMemoryData *inMemoryData) { + log.Errorf("need rollback, err: %+v", err) + j.progress.NextSubCheckpoint(RollbackTransaction, inMemoryData) + } + + committed := func() { + log.Infof("txn committed, commitSeq: %d, cleanup", j.progress.CommitSeq) + + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + commitSeq := j.progress.CommitSeq + destTableIds := inMemoryData.DestTableIds + if j.SyncType == DBSync && len(j.progress.TableCommitSeqMap) > 0 { + for _, tableId := range destTableIds { + tableCommitSeq, ok := j.progress.TableCommitSeqMap[tableId] + if !ok { + continue + } + + if tableCommitSeq < commitSeq { + j.progress.TableCommitSeqMap[tableId] = commitSeq + } + } + + j.progress.Persist() + } + j.progress.Done() + } + + dest := &j.Dest + switch j.progress.SubSyncState { + case Done: + if binlog == nil { + log.Errorf("binlog is nil, %+v", xerror.Errorf(xerror.Normal, "handle nil upsert binlog")) + return nil + } + + data := binlog.GetData() + upsert, err := record.NewUpsertFromJson(data) + if err != nil { + return err + } + log.Debugf("upsert: %v", upsert) + + // Step 1: get related tableRecords + var isTxnInsert bool = false + if len(upsert.Stids) > 0 { + if !featureTxnInsert { + log.Warnf("The txn insert is not supported yet") + return xerror.Errorf(xerror.Normal, "The txn insert is not supported yet") + } + if j.SyncType == DBSync { + log.Warnf("Txn insert is NOT supported when DBSync") + return xerror.Errorf(xerror.Normal, "Txn insert is NOT supported when DBSync") + } + isTxnInsert = true + } + + tableRecords, err := j.getRelatedTableRecords(upsert) + if err != nil { + log.Errorf("get related table records failed, err: %+v", err) + } + if len(tableRecords) == 0 { + log.Debug("no related table records") + return nil + } + + log.Debugf("tableRecords: %v", tableRecords) + destTableIds := make([]int64, 0, len(tableRecords)) + if j.SyncType == DBSync { + for _, tableRecord := range tableRecords { + if destTableId, err := j.getDestTableIdBySrc(tableRecord.Id); err != nil { + return err + } else { + destTableIds = append(destTableIds, destTableId) + } + } + } else { + destTableIds = append(destTableIds, j.Dest.TableId) + } + inMemoryData := &inMemoryData{ + CommitSeq: upsert.CommitSeq, + DestTableIds: destTableIds, + TableRecords: tableRecords, + IsTxnInsert: isTxnInsert, + SourceStids: upsert.Stids, + } + j.progress.NextSubVolatile(BeginTransaction, inMemoryData) + + case BeginTransaction: + // Step 2: begin txn + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + commitSeq := inMemoryData.CommitSeq + sourceStids := inMemoryData.SourceStids + isTxnInsert := inMemoryData.IsTxnInsert + log.Debugf("begin txn, dest: %v, commitSeq: %d", dest, commitSeq) + + destRpc, err := j.factory.NewFeRpc(dest) + if err != nil { + return err + } + + label := j.newLabel(commitSeq) + + var beginTxnResp *festruct.TBeginTxnResult_ + if isTxnInsert { + // when txn insert, give an array length in BeginTransaction, it will return a list of stid + beginTxnResp, err = destRpc.BeginTransactionForTxnInsert(dest, label, inMemoryData.DestTableIds, int64(len(sourceStids))) + } else { + beginTxnResp, err = destRpc.BeginTransaction(dest, label, inMemoryData.DestTableIds) + } + + if err != nil { + return err + } + log.Debugf("resp: %v", beginTxnResp) + if beginTxnResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK { + if isTableNotFound(beginTxnResp.GetStatus()) && j.SyncType == DBSync { + // It might caused by the staled TableMapping entries. + // In order to rebuild the dest table ids, this progress should be rollback. + j.progress.Rollback(j.SkipError) + for _, tableRecord := range inMemoryData.TableRecords { + delete(j.progress.TableMapping, tableRecord.Id) + } + } + return xerror.Errorf(xerror.Normal, "begin txn failed, status: %v", beginTxnResp.GetStatus()) + } + txnId := beginTxnResp.GetTxnId() + if isTxnInsert { + destStids := beginTxnResp.GetSubTxnIds() + inMemoryData.DestStids = destStids + log.Debugf("TxnId: %d, DbId: %d, destStids: %v", txnId, beginTxnResp.GetDbId(), destStids) + } else { + log.Debugf("TxnId: %d, DbId: %d", txnId, beginTxnResp.GetDbId()) + } + + inMemoryData.TxnId = txnId + j.progress.NextSubCheckpoint(IngestBinlog, inMemoryData) + + case IngestBinlog: + log.Debug("ingest binlog") + if err := updateInMemory(); err != nil { + return err + } + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + tableRecords := inMemoryData.TableRecords + txnId := inMemoryData.TxnId + isTxnInsert := inMemoryData.IsTxnInsert + + // make stidMap, source_stid to dest_stid + stidMap := make(map[int64]int64) + if isTxnInsert { + sourceStids := inMemoryData.SourceStids + destStids := inMemoryData.DestStids + if len(sourceStids) == len(destStids) { + for i := 0; i < len(sourceStids); i++ { + stidMap[sourceStids[i]] = destStids[i] + } + } + } + + // Step 3: ingest binlog + if isTxnInsert { + // When txn insert, only one table can be inserted, so use the first DestTableId + destTableId := inMemoryData.DestTableIds[0] + + // When txn insert, use subTxnInfos to commit rather than commitInfos. + subTxnInfos, err := j.ingestBinlogForTxnInsert(txnId, tableRecords, stidMap, destTableId) + if err != nil { + rollback(err, inMemoryData) + return err + } else { + inMemoryData.SubTxnInfos = subTxnInfos + j.progress.NextSubCheckpoint(CommitTransaction, inMemoryData) + } + } else { + commitInfos, err := j.ingestBinlog(txnId, tableRecords) + if err != nil { + rollback(err, inMemoryData) + return err + } else { + inMemoryData.CommitInfos = commitInfos + j.progress.NextSubCheckpoint(CommitTransaction, inMemoryData) + } + } + + case CommitTransaction: + // Step 4: commit txn + log.Debug("commit txn") + if err := updateInMemory(); err != nil { + return err + } + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + txnId := inMemoryData.TxnId + commitInfos := inMemoryData.CommitInfos + + destRpc, err := j.factory.NewFeRpc(dest) + if err != nil { + rollback(err, inMemoryData) + break + } + + isTxnInsert := inMemoryData.IsTxnInsert + subTxnInfos := inMemoryData.SubTxnInfos + var resp *festruct.TCommitTxnResult_ + if isTxnInsert { + resp, err = destRpc.CommitTransactionForTxnInsert(dest, txnId, true, subTxnInfos) + } else { + resp, err = destRpc.CommitTransaction(dest, txnId, commitInfos) + } + if err != nil { + rollback(err, inMemoryData) + break + } + + if statusCode := resp.Status.GetStatusCode(); statusCode == tstatus.TStatusCode_PUBLISH_TIMEOUT { + dest.WaitTransactionDone(txnId) + } else if statusCode != tstatus.TStatusCode_OK { + err := xerror.Errorf(xerror.Normal, "commit txn failed, status: %v", resp.Status) + rollback(err, inMemoryData) + break + } + + log.Infof("TxnId: %d committed, resp: %v", txnId, resp) + committed() + + return nil + + case RollbackTransaction: + log.Debugf("Rollback txn") + // Not Step 5: just rollback txn + if err := updateInMemory(); err != nil { + return err + } + + inMemoryData := j.progress.InMemoryData.(*inMemoryData) + txnId := inMemoryData.TxnId + destRpc, err := j.factory.NewFeRpc(dest) + if err != nil { + return err + } + + resp, err := destRpc.RollbackTransaction(dest, txnId) + if err != nil { + return err + } + if resp.Status.GetStatusCode() != tstatus.TStatusCode_OK { + if isTxnNotFound(resp.Status) { + log.Warnf("txn not found, txnId: %d", txnId) + } else if isTxnAborted(resp.Status) { + log.Infof("txn already aborted, txnId: %d", txnId) + } else if isTxnCommitted(resp.Status) { + log.Infof("txn already committed, txnId: %d", txnId) + committed() + return nil + } else { + return xerror.Errorf(xerror.Normal, "rollback txn failed, status: %v", resp.Status) + } + } + + log.Infof("rollback TxnId: %d resp: %v", txnId, resp) + j.progress.Rollback(j.SkipError) + return nil + + default: + return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState) + } + + return j.handleUpsert(binlog) +} + +// handleAddPartition +func (j *Job) handleAddPartition(binlog *festruct.TBinlog) error { + log.Infof("handle add partition binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + addPartition, err := record.NewAddPartitionFromJson(data) + if err != nil { + return err + } + + if j.isBinlogCommitted(addPartition.TableId, binlog.GetCommitSeq()) { + return nil + } + + if addPartition.IsTemp { + log.Infof("skip add temporary partition because backup/restore table with temporary partitions is not supported yet") + return nil + } + + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else if j.SyncType == DBSync { + destTableId, err := j.getDestTableIdBySrc(addPartition.TableId) + if err != nil { + return err + } + + if destTableName, err = j.destMeta.GetTableNameById(destTableId); err != nil { + return err + } else if destTableName == "" { + return xerror.Errorf(xerror.Normal, "tableId %d not found in destMeta", destTableId) + } + } + return j.IDest.AddPartition(destTableName, addPartition) +} + +// handleDropPartition +func (j *Job) handleDropPartition(binlog *festruct.TBinlog) error { + log.Infof("handle drop partition binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + dropPartition, err := record.NewDropPartitionFromJson(data) + if err != nil { + return err + } + + if dropPartition.IsTemp { + log.Infof("Since the temporary partition is not synchronized to the downstream, this binlog is skipped.") + return nil + } + + if j.isBinlogCommitted(dropPartition.TableId, binlog.GetCommitSeq()) { + return nil + } + + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else if j.SyncType == DBSync { + destTableId, err := j.getDestTableIdBySrc(dropPartition.TableId) + if err != nil { + return err + } + + if destTableName, err = j.destMeta.GetTableNameById(destTableId); err != nil { + return err + } else if destTableName == "" { + return xerror.Errorf(xerror.Normal, "tableId %d not found in destMeta", destTableId) + } + } + return j.IDest.DropPartition(destTableName, dropPartition) +} + +// handleCreateTable +func (j *Job) handleCreateTable(binlog *festruct.TBinlog) error { + log.Infof("handle create table binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + if j.SyncType != DBSync { + return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType) + } + + data := binlog.GetData() + createTable, err := record.NewCreateTableFromJson(data) + if err != nil { + return err + } + + if j.isBinlogCommitted(createTable.TableId, binlog.GetCommitSeq()) { + return nil + } + + if featureCreateViewDropExists { + viewRegex := regexp.MustCompile(`(?i)^CREATE(\s+)VIEW`) + isCreateView := viewRegex.MatchString(createTable.Sql) + tableName := strings.TrimSpace(createTable.TableName) + if isCreateView && len(tableName) > 0 { + // drop view if exists + log.Infof("feature_create_view_drop_exists is enabled, try drop view %s before creating", tableName) + if err = j.IDest.DropView(tableName); err != nil { + return xerror.Wrapf(err, xerror.Normal, "drop view before create view %s, table id=%d", + tableName, createTable.TableId) + } + } + } + + // Some operations, such as DROP TABLE, will be skiped in the partial/full snapshot, + // in that case, the dest table might already exists, so we need to check it before creating. + // If the dest table already exists, we need to do a partial snapshot. + // + // See test_cds_fullsync_tbl_drop_create.groovy for details + if j.SyncType == DBSync && !createTable.IsCreateView() { + if exists, err := j.IDest.CheckTableExistsByName(createTable.TableName); err != nil { + return err + } else if exists { + log.Warnf("the dest table %s already exists, force partial snapshot, commit seq: %d", + createTable.TableName, binlog.GetCommitSeq()) + replace := true + return j.newPartialSnapshot(createTable.TableId, createTable.TableName, nil, replace) + } + } + + if err = j.IDest.CreateTableOrView(createTable, j.Src.Database); err != nil { + return xerror.Wrapf(err, xerror.Normal, "create table %d", createTable.TableId) + } + + j.srcMeta.ClearTablesCache() + j.destMeta.ClearTablesCache() + + srcTableName := createTable.TableName + if len(srcTableName) == 0 { + // the field `TableName` is added after doris 2.0.3, to keep compatible, try read src table + // name from upstream, but the result might be wrong if upstream has executed rename/replace. + log.Infof("the table id %d is not found in the binlog record, get the name from the upstream", createTable.TableId) + srcTableName, err = j.srcMeta.GetTableNameById(createTable.TableId) + if err != nil { + return xerror.Errorf(xerror.Normal, "the table with id %d is not found in the upstream cluster, create table: %s", + createTable.TableId, createTable.String()) + } + } + + var destTableId int64 + destTableId, err = j.destMeta.GetTableId(srcTableName) + if err != nil { + return err + } + + if j.progress.TableMapping == nil { + j.progress.TableMapping = make(map[int64]int64) + } + j.progress.TableMapping[createTable.TableId] = destTableId + if j.progress.TableNameMapping == nil { + j.progress.TableNameMapping = make(map[int64]string) + } + j.progress.TableNameMapping[createTable.TableId] = srcTableName + j.progress.Done() + return nil +} + +// handleDropTable +func (j *Job) handleDropTable(binlog *festruct.TBinlog) error { + log.Infof("handle drop table binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + if j.SyncType != DBSync { + return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType) + } + + data := binlog.GetData() + dropTable, err := record.NewDropTableFromJson(data) + if err != nil { + return err + } + + if !dropTable.IsView { + if _, ok := j.progress.TableMapping[dropTable.TableId]; !ok { + log.Warnf("the dest table is not found, skip drop table binlog, src table id: %d, commit seq: %d", + dropTable.TableId, binlog.GetCommitSeq()) + return nil + } + } + + if j.isBinlogCommitted(dropTable.TableId, binlog.GetCommitSeq()) { + return nil + } + + tableName := dropTable.TableName + // deprecated, `TableName` has been added after doris 2.0.0 + if tableName == "" { + dirtySrcTables := j.srcMeta.DirtyGetTables() + srcTable, ok := dirtySrcTables[dropTable.TableId] + if !ok { + return xerror.Errorf(xerror.Normal, "table not found, tableId: %d", dropTable.TableId) + } + + tableName = srcTable.Name + } + + if dropTable.IsView { + if err = j.IDest.DropView(tableName); err != nil { + return xerror.Wrapf(err, xerror.Normal, "drop view %s", tableName) + } + } else { + if err = j.IDest.DropTable(tableName, true); err != nil { + // In apache/doris/common/ErrorCode.java + // + // ERR_WRONG_OBJECT(1347, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s.%s' is not %s. %s.") + if !strings.Contains(err.Error(), "is not TABLE") { + return xerror.Wrapf(err, xerror.Normal, "drop table %s", tableName) + } else if err = j.IDest.DropView(tableName); err != nil { // retry with drop view. + return xerror.Wrapf(err, xerror.Normal, "drop view %s", tableName) + } + } + } + + j.srcMeta.ClearTablesCache() + j.destMeta.ClearTablesCache() + delete(j.progress.TableNameMapping, dropTable.TableId) + delete(j.progress.TableMapping, dropTable.TableId) + return nil +} + +func (j *Job) handleDummy(binlog *festruct.TBinlog) error { + dummyCommitSeq := binlog.GetCommitSeq() + + log.Infof("handle dummy binlog, need full sync. SyncType: %v, seq: %v", j.SyncType, dummyCommitSeq) + + return j.newSnapshot(dummyCommitSeq) +} + +// handleAlterJob +func (j *Job) handleAlterJob(binlog *festruct.TBinlog) error { + log.Infof("handle alter job binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + alterJob, err := record.NewAlterJobV2FromJson(data) + if err != nil { + return err + } + + if featureSkipRollupBinlogs && alterJob.Type == record.ALTER_JOB_ROLLUP { + log.Warnf("skip rollup alter job: %s", alterJob) + return nil + } + + if alterJob.Type == record.ALTER_JOB_SCHEMA_CHANGE { + return j.handleSchemaChange(alterJob) + } else if alterJob.Type == record.ALTER_JOB_ROLLUP { + return j.handleAlterRollup(alterJob) + } else { + return xerror.Errorf(xerror.Normal, "unsupported alter job type: %s", alterJob.Type) + } +} + +func (j *Job) handleAlterRollup(alterJob *record.AlterJobV2) error { + if !alterJob.IsFinished() { + switch alterJob.JobState { + case record.ALTER_JOB_STATE_PENDING: + // Once the rollup job step to WAITING_TXN, the upsert to the rollup index is allowed, + // but the dest index of the downstream cluster hasn't been created. + // + // To filter the upsert to the rollup index, save the shadow index ids here. + if j.progress.ShadowIndexes == nil { + j.progress.ShadowIndexes = make(map[int64]int64) + } + j.progress.ShadowIndexes[alterJob.RollupIndexId] = alterJob.BaseIndexId + case record.ALTER_JOB_STATE_CANCELLED: + // clear the shadow indexes + delete(j.progress.ShadowIndexes, alterJob.RollupIndexId) + } + return nil + } + + // Once partial snapshot finished, the rollup indexes will be convert to normal index. + delete(j.progress.ShadowIndexes, alterJob.RollupIndexId) + + replace := true + return j.newPartialSnapshot(alterJob.TableId, alterJob.TableName, nil, replace) +} + +func (j *Job) handleSchemaChange(alterJob *record.AlterJobV2) error { + if !alterJob.IsFinished() { + switch alterJob.JobState { + case record.ALTER_JOB_STATE_PENDING: + // Once the schema change step to WAITING_TXN, the upsert to the shadow indexes is allowed, + // but the dest indexes of the downstream cluster hasn't been created. + // + // To filter the upsert to the shadow indexes, save the shadow index ids here. + if j.progress.ShadowIndexes == nil { + j.progress.ShadowIndexes = make(map[int64]int64) + } + for shadowIndexId, originIndexId := range alterJob.ShadowIndexes { + j.progress.ShadowIndexes[shadowIndexId] = originIndexId + } + case record.ALTER_JOB_STATE_CANCELLED: + // clear the shadow indexes + for shadowIndexId := range alterJob.ShadowIndexes { + delete(j.progress.ShadowIndexes, shadowIndexId) + } + } + return nil + } + + // drop table dropTableSql + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + destTableName = alterJob.TableName + } + + if featureSchemaChangePartialSync && alterJob.Type == record.ALTER_JOB_SCHEMA_CHANGE { + // Once partial snapshot finished, the shadow indexes will be convert to normal indexes. + for shadowIndexId := range alterJob.ShadowIndexes { + delete(j.progress.ShadowIndexes, shadowIndexId) + } + + replaceTable := true + return j.newPartialSnapshot(alterJob.TableId, alterJob.TableName, nil, replaceTable) + } + + var allViewDeleted bool = false + for { + // before drop table, drop related view firstly + if !allViewDeleted { + views, err := j.IDest.GetAllViewsFromTable(destTableName) + if err != nil { + log.Errorf("when alter job, get view from table failed, err : %v", err) + continue + } + + var dropViewFailed bool = false + for _, view := range views { + if err := j.IDest.DropView(view); err != nil { + log.Errorf("when alter job, drop view %s failed, err : %v", view, err) + dropViewFailed = true + } + } + if dropViewFailed { + continue + } + + allViewDeleted = true + } + + if err := j.IDest.DropTable(destTableName, true); err == nil { + break + } + } + + return j.newSnapshot(j.progress.CommitSeq) +} + +// handleLightningSchemaChange +func (j *Job) handleLightningSchemaChange(binlog *festruct.TBinlog) error { + log.Infof("handle lightning schema change binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + lightningSchemaChange, err := record.NewModifyTableAddOrDropColumnsFromJson(data) + if err != nil { + return err + } + + if j.isBinlogCommitted(lightningSchemaChange.TableId, binlog.GetCommitSeq()) { + return nil + } + + tableAlias := "" + if j.isTableSyncWithAlias() { + tableAlias = j.Dest.Table + } + return j.IDest.LightningSchemaChange(j.Src.Database, tableAlias, lightningSchemaChange) +} + +// handle rename column +func (j *Job) handleRenameColumn(binlog *festruct.TBinlog) error { + log.Infof("handle rename column binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + renameColumn, err := record.NewRenameColumnFromJson(data) + if err != nil { + return err + } + + return j.handleRenameColumnRecord(binlog.GetCommitSeq(), renameColumn) +} + +func (j *Job) handleRenameColumnRecord(commitSeq int64, renameColumn *record.RenameColumn) error { + if j.isBinlogCommitted(renameColumn.TableId, commitSeq) { + return nil + } + + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + var err error + destTableName, err = j.getDestNameBySrcId(renameColumn.TableId) + if err != nil { + return err + } + } + + return j.IDest.RenameColumn(destTableName, renameColumn) +} + +// handle modify comment +func (j *Job) handleModifyComment(binlog *festruct.TBinlog) error { + log.Infof("handle modify comment binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + modifyComment, err := record.NewModifyCommentFromJson(data) + if err != nil { + return err + } + + return j.handleModifyCommentRecord(binlog.GetCommitSeq(), modifyComment) +} + +func (j *Job) handleModifyCommentRecord(commitSeq int64, modifyComment *record.ModifyComment) error { + if j.isBinlogCommitted(modifyComment.TblId, commitSeq) { + return nil + } + + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + var err error + destTableName, err = j.getDestNameBySrcId(modifyComment.TblId) + if err != nil { + return err + } + } + + return j.IDest.ModifyComment(destTableName, modifyComment) +} + +func (j *Job) handleTruncateTable(binlog *festruct.TBinlog) error { + log.Infof("handle truncate table binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + truncateTable, err := record.NewTruncateTableFromJson(data) + if err != nil { + return err + } + + if j.isBinlogCommitted(truncateTable.TableId, binlog.GetCommitSeq()) { + return nil + } + + var destTableName string + switch j.SyncType { + case DBSync: + destTableName = truncateTable.TableName + case TableSync: + destTableName = j.Dest.Table + default: + return xerror.Panicf(xerror.Normal, "invalid sync type: %v", j.SyncType) + } + + err = j.IDest.TruncateTable(destTableName, truncateTable) + if err == nil { + j.srcMeta.ClearTable(j.Src.Database, truncateTable.TableName) + j.destMeta.ClearTable(j.Dest.Database, destTableName) + } + + return err +} + +func (j *Job) handleReplacePartitions(binlog *festruct.TBinlog) error { + log.Infof("handle replace partitions binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + replacePartition, err := record.NewReplacePartitionFromJson(data) + if err != nil { + return err + } + + if j.isBinlogCommitted(replacePartition.TableId, binlog.GetCommitSeq()) { + return nil + } + + if !replacePartition.StrictRange { + log.Warnf("replacing partitions with non strict range is not supported yet, replace partition record: %s", string(data)) + return j.newSnapshot(j.progress.CommitSeq) + } + + if replacePartition.UseTempName { + log.Warnf("replacing partitions with use tmp name is not supported yet, replace partition record: %s", string(data)) + return j.newSnapshot(j.progress.CommitSeq) + } + + oldPartitions := strings.Join(replacePartition.Partitions, ",") + newPartitions := strings.Join(replacePartition.TempPartitions, ",") + log.Infof("table %s replace partitions %s with temp partitions %s", + replacePartition.TableName, oldPartitions, newPartitions) + + partitions := replacePartition.Partitions + if replacePartition.UseTempName { + partitions = replacePartition.TempPartitions + } + + return j.newPartialSnapshot(replacePartition.TableId, replacePartition.TableName, partitions, false) +} + +func (j *Job) handleModifyPartitions(binlog *festruct.TBinlog) error { + log.Infof("handle modify partitions binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + log.Warnf("modify partitions is not supported now, binlog data: %s", binlog.GetData()) + return nil +} + +// handle rename table +func (j *Job) handleRenameTable(binlog *festruct.TBinlog) error { + log.Infof("handle rename table binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) + + data := binlog.GetData() + renameTable, err := record.NewRenameTableFromJson(data) + if err != nil { + return err + } + + return j.handleRenameTableRecord(binlog.GetCommitSeq(), renameTable) +} + +func (j *Job) handleRenameTableRecord(commitSeq int64, renameTable *record.RenameTable) error { + // don't support rename table when table sync + if j.SyncType == TableSync { + log.Warnf("rename table is not supported when table sync, consider rebuilding this job instead") + return xerror.Errorf(xerror.Normal, "rename table is not supported when table sync, consider rebuilding this job instead") } - return tableRecords, nil -} + if j.isBinlogCommitted(renameTable.TableId, commitSeq) { + return nil + } -// Table ingestBinlog -// TODO: add check success, check ingestBinlog commitInfo -// TODO: rewrite by use tableId -func (j *Job) ingestBinlog(txnId int64, tableRecords []*record.TableRecord) ([]*ttypes.TTabletCommitInfo, error) { - log.Infof("ingestBinlog, txnId: %d", txnId) + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + var err error + destTableName, err = j.getDestNameBySrcId(renameTable.TableId) + if err != nil { + return err + } + } - job, err := j.jobFactory.CreateJob(NewIngestContext(txnId, tableRecords), j, "IngestBinlog") - if err != nil { - return nil, err + if renameTable.NewTableName != "" && renameTable.OldTableName == "" { + // for compatible with old doris version + // + // If we synchronize all operations accurately, then the old table name should be equal to + // the destination table name. + renameTable.OldTableName = destTableName } - ingestBinlogJob, ok := job.(*IngestBinlogJob) - if !ok { - return nil, xerror.Errorf(xerror.Normal, "invalid job type, job: %+v", job) + err := j.IDest.RenameTable(destTableName, renameTable) + if err != nil { + return err } - job.Run() - if err := job.Error(); err != nil { - return nil, err + j.destMeta.GetTables() + if j.progress.TableNameMapping == nil { + j.progress.TableNameMapping = make(map[int64]string) } - return ingestBinlogJob.CommitInfos(), nil + j.progress.TableNameMapping[renameTable.TableId] = renameTable.NewTableName + + return nil } -// TODO: handle error by abort txn -func (j *Job) handleUpsert(binlog *festruct.TBinlog) error { - log.Infof("handle upsert binlog, sub sync state: %s", j.progress.SubSyncState) +func (j *Job) handleReplaceTable(binlog *festruct.TBinlog) error { + log.Infof("handle replace table binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) - // inMemory will be update in state machine, but progress keep any, so progress.inMemory is also latest, well call NextSubCheckpoint don't need to upate inMemory in progress - // TODO(IMPROVE): some steps not need all data, so we can reset some data in progress, such as RollbackTransaction only need txnId - type inMemoryData struct { - CommitSeq int64 `json:"commit_seq"` - TxnId int64 `json:"txn_id"` - DestTableIds []int64 `json:"dest_table_ids"` - TableRecords []*record.TableRecord `json:"table_records"` - CommitInfos []*ttypes.TTabletCommitInfo `json:"commit_infos"` + record, err := record.NewReplaceTableRecordFromJson(binlog.GetData()) + if err != nil { + return err } - upateInMemory := func() error { - if j.progress.InMemoryData == nil { - persistData := j.progress.PersistData - inMemoryData := &inMemoryData{} - if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil { - return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData) - } - j.progress.InMemoryData = inMemoryData - } - return nil - } + return j.handleReplaceTableRecord(binlog.GetCommitSeq(), record) +} - rollback := func(err error, inMemoryData *inMemoryData) { - log.Errorf("need rollback, err: %+v", err) - j.progress.NextSubCheckpoint(RollbackTransaction, inMemoryData) +func (j *Job) handleReplaceTableRecord(commitSeq int64, record *record.ReplaceTableRecord) error { + if j.SyncType == TableSync { + log.Infof("replace table %s with fullsync in table sync, reset src table id from %d to %d, swap: %t", + record.OriginTableName, record.OriginTableId, record.NewTableId, record.SwapTable) + j.Src.TableId = record.NewTableId + return j.newSnapshot(commitSeq) } - dest := &j.Dest - switch j.progress.SubSyncState { - case Done: - if binlog == nil { - log.Errorf("binlog is nil, %+v", xerror.Errorf(xerror.Normal, "handle nil upsert binlog")) - return nil - } + if j.isBinlogCommitted(record.OriginTableId, commitSeq) { + return nil + } - data := binlog.GetData() - upsert, err := record.NewUpsertFromJson(data) - if err != nil { - return err - } - log.Debugf("upsert: %v", upsert) + toName := record.OriginTableName + fromName := record.NewTableName + if err := j.IDest.ReplaceTable(fromName, toName, record.SwapTable); err != nil { + return err + } - // TODO(Fix) - // commitSeq := upsert.CommitSeq + j.destMeta.GetTables() // update id <=> name cache + if j.progress.TableNameMapping == nil { + j.progress.TableNameMapping = make(map[int64]string) + } + if record.SwapTable { + // keep table mapping + j.progress.TableNameMapping[record.OriginTableId] = record.NewTableName + j.progress.TableNameMapping[record.NewTableId] = record.OriginTableName + } else { // delete table1 + j.progress.TableNameMapping[record.NewTableId] = record.OriginTableName + delete(j.progress.TableNameMapping, record.OriginTableId) + delete(j.progress.TableMapping, record.OriginTableId) + } - // Step 1: get related tableRecords - tableRecords, err := j.getReleatedTableRecords(upsert) - if err != nil { - log.Errorf("get releated table records failed, err: %+v", err) - } - if len(tableRecords) == 0 { - log.Debug("no releated table records") - return nil - } + return nil +} - log.Debugf("tableRecords: %v", tableRecords) - destTableIds := make([]int64, 0, len(tableRecords)) - if j.SyncType == DBSync { - for _, tableRecord := range tableRecords { - if destTableId, err := j.getDestTableIdBySrc(tableRecord.Id); err != nil { - return err - } else { - destTableIds = append(destTableIds, destTableId) - } - } - } else { - destTableIds = append(destTableIds, j.Dest.TableId) - } - inMemoryData := &inMemoryData{ - CommitSeq: upsert.CommitSeq, - DestTableIds: destTableIds, - TableRecords: tableRecords, - } - j.progress.NextSubVolatile(BeginTransaction, inMemoryData) +func (j *Job) handleModifyTableAddOrDropInvertedIndices(binlog *festruct.TBinlog) error { + log.Infof("handle modify table add or drop inverted indices binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) - case BeginTransaction: - // Step 2: begin txn - inMemoryData := j.progress.InMemoryData.(*inMemoryData) - commitSeq := inMemoryData.CommitSeq - log.Debugf("begin txn, dest: %v, commitSeq: %d", dest, commitSeq) + data := binlog.GetData() + modifyTableAddOrDropInvertedIndices, err := record.NewModifyTableAddOrDropInvertedIndicesFromJson(data) + if err != nil { + return err + } - destRpc, err := j.rpcFactory.NewFeRpc(dest) - if err != nil { - return err - } + return j.handleModifyTableAddOrDropInvertedIndicesRecord(binlog.GetCommitSeq(), modifyTableAddOrDropInvertedIndices) +} - label := j.newLabel(commitSeq) +func (j *Job) handleModifyTableAddOrDropInvertedIndicesRecord(commitSeq int64, record *record.ModifyTableAddOrDropInvertedIndices) error { + if j.isBinlogCommitted(record.TableId, commitSeq) { + return nil + } - beginTxnResp, err := destRpc.BeginTransaction(dest, label, inMemoryData.DestTableIds) + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + var err error + destTableName, err = j.getDestNameBySrcId(record.TableId) if err != nil { return err } - log.Debugf("resp: %v", beginTxnResp) - if beginTxnResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK { - return xerror.Errorf(xerror.Normal, "begin txn failed, status: %v", beginTxnResp.GetStatus()) - } - txnId := beginTxnResp.GetTxnId() - log.Debugf("TxnId: %d, DbId: %d", txnId, beginTxnResp.GetDbId()) - - inMemoryData.TxnId = txnId - j.progress.NextSubCheckpoint(IngestBinlog, inMemoryData) - - case IngestBinlog: - log.Debug("ingest binlog") - if err := upateInMemory(); err != nil { - return err - } - inMemoryData := j.progress.InMemoryData.(*inMemoryData) - tableRecords := inMemoryData.TableRecords - txnId := inMemoryData.TxnId + } - // TODO: 反查现在的状况 - // Step 3: ingest binlog - var commitInfos []*ttypes.TTabletCommitInfo - commitInfos, err := j.ingestBinlog(txnId, tableRecords) - if err != nil { - rollback(err, inMemoryData) - } else { - log.Debugf("commitInfos: %v", commitInfos) - inMemoryData.CommitInfos = commitInfos - j.progress.NextSubCheckpoint(CommitTransaction, inMemoryData) - } + return j.IDest.LightningIndexChange(destTableName, record) +} - case CommitTransaction: - // Step 4: commit txn - log.Debug("commit txn") - if err := upateInMemory(); err != nil { - return err - } - inMemoryData := j.progress.InMemoryData.(*inMemoryData) - txnId := inMemoryData.TxnId - commitInfos := inMemoryData.CommitInfos +func (j *Job) handleIndexChangeJob(binlog *festruct.TBinlog) error { + log.Infof("handle index change job binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) - destRpc, err := j.rpcFactory.NewFeRpc(dest) - if err != nil { - rollback(err, inMemoryData) - break - } + data := binlog.GetData() + indexChangeJob, err := record.NewIndexChangeJobFromJson(data) + if err != nil { + return err + } - resp, err := destRpc.CommitTransaction(dest, txnId, commitInfos) - if err != nil { - rollback(err, inMemoryData) - break - } + return j.handleIndexChangeJobRecord(binlog.GetCommitSeq(), indexChangeJob) +} - if statusCode := resp.Status.GetStatusCode(); statusCode == tstatus.TStatusCode_PUBLISH_TIMEOUT { - dest.WaitTransactionDone(txnId) - } else if statusCode != tstatus.TStatusCode_OK { - err := xerror.Errorf(xerror.Normal, "commit txn failed, status: %v", resp.Status) - rollback(err, inMemoryData) - break - } +func (j *Job) handleIndexChangeJobRecord(commitSeq int64, indexChangeJob *record.IndexChangeJob) error { + if j.isBinlogCommitted(indexChangeJob.TableId, commitSeq) { + return nil + } - log.Infof("commit TxnId: %d resp: %v", txnId, resp) - commitSeq := j.progress.CommitSeq - destTableIds := inMemoryData.DestTableIds - if j.SyncType == DBSync && len(j.progress.TableCommitSeqMap) > 0 { - for _, tableId := range destTableIds { - tableCommitSeq, ok := j.progress.TableCommitSeqMap[tableId] - if !ok { - continue - } + if indexChangeJob.JobState != record.INDEX_CHANGE_JOB_STATE_FINISHED || + indexChangeJob.IsDropOp { + log.Debugf("skip index change job binlog, job state: %s, is drop op: %t", + indexChangeJob.JobState, indexChangeJob.IsDropOp) + return nil + } - if tableCommitSeq < commitSeq { - j.progress.TableCommitSeqMap[tableId] = commitSeq - } - // TODO: [PERFORMANCE] remove old commit seq - } + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + destTableName = indexChangeJob.TableName + } - j.progress.Persist() - } - j.progress.Done() - return nil + return j.IDest.BuildIndex(destTableName, indexChangeJob) +} - case RollbackTransaction: - log.Debugf("Rollback txn") - // Not Step 5: just rollback txn - if err := upateInMemory(); err != nil { - return err - } +// handle alter view def +func (j *Job) handleAlterViewDef(binlog *festruct.TBinlog) error { + log.Infof("handle alter view def binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) - inMemoryData := j.progress.InMemoryData.(*inMemoryData) - txnId := inMemoryData.TxnId - destRpc, err := j.rpcFactory.NewFeRpc(dest) - if err != nil { - return err - } + data := binlog.GetData() + alterView, err := record.NewAlterViewFromJson(data) + if err != nil { + return err + } + return j.handleAlterViewDefRecord(binlog.GetCommitSeq(), alterView) +} - resp, err := destRpc.RollbackTransaction(dest, txnId) - if err != nil { - return err - } - if resp.Status.GetStatusCode() != tstatus.TStatusCode_OK { - return xerror.Errorf(xerror.Normal, "rollback txn failed, status: %v", resp.Status) - } - log.Infof("rollback TxnId: %d resp: %v", txnId, resp) - j.progress.Done() +func (j *Job) handleAlterViewDefRecord(commitSeq int64, alterView *record.AlterView) error { + if j.isBinlogCommitted(alterView.TableId, commitSeq) { return nil + } - default: - return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState) + viewName, err := j.getDestNameBySrcId(alterView.TableId) + if err != nil { + return err } - return j.handleUpsert(binlog) + return j.IDest.AlterViewDef(j.Src.Database, viewName, alterView) } -// handleAddPartition -func (j *Job) handleAddPartition(binlog *festruct.TBinlog) error { - log.Infof("handle add partition binlog") +func (j *Job) handleRenamePartition(binlog *festruct.TBinlog) error { + log.Infof("handle rename partition binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) data := binlog.GetData() - addPartition, err := record.NewAddPartitionFromJson(data) + renamePartition, err := record.NewRenamePartitionFromJson(data) if err != nil { return err } + return j.handleRenamePartitionRecord(binlog.GetCommitSeq(), renamePartition) +} + +func (j *Job) handleRenamePartitionRecord(commitSeq int64, renamePartition *record.RenamePartition) error { + if j.isBinlogCommitted(renamePartition.TableId, commitSeq) { + return nil + } - destDbName := j.Dest.Database var destTableName string if j.SyncType == TableSync { destTableName = j.Dest.Table - } else if j.SyncType == DBSync { - destTableName, err = j.destMeta.GetTableNameById(addPartition.TableId) + } else { + var err error + destTableName, err = j.getDestNameBySrcId(renamePartition.TableId) if err != nil { return err } } - // addPartitionSql = "ALTER TABLE " + sql - addPartitionSql := fmt.Sprintf("ALTER TABLE %s.%s %s", destDbName, destTableName, addPartition.Sql) - log.Infof("addPartitionSql: %s", addPartitionSql) - return j.IDest.Exec(addPartitionSql) + newPartition := renamePartition.NewPartitionName + oldPartition := renamePartition.OldPartitionName + if oldPartition == "" { + log.Warnf("old partition name is empty, sync partition via partial snapshot, "+ + "new partition: %s, partition id: %d, table id: %d, commit seq: %d", + newPartition, renamePartition.PartitionId, renamePartition.TableId, commitSeq) + replace := true + tableName := destTableName + if j.isTableSyncWithAlias() { + tableName = j.Src.Table + } + return j.newPartialSnapshot(renamePartition.TableId, tableName, nil, replace) + } + return j.IDest.RenamePartition(destTableName, oldPartition, newPartition) } -// handleDropPartition -func (j *Job) handleDropPartition(binlog *festruct.TBinlog) error { - log.Infof("handle drop partition binlog") +func (j *Job) handleRenameRollup(binlog *festruct.TBinlog) error { + log.Infof("handle rename rollup binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) data := binlog.GetData() - dropPartition, err := record.NewDropPartitionFromJson(data) + renameRollup, err := record.NewRenameRollupFromJson(data) if err != nil { return err } - destDbName := j.Dest.Database + return j.handleRenameRollupRecord(binlog.GetCommitSeq(), renameRollup) +} + +func (j *Job) handleRenameRollupRecord(commitSeq int64, renameRollup *record.RenameRollup) error { + if j.isBinlogCommitted(renameRollup.TableId, commitSeq) { + return nil + } + var destTableName string if j.SyncType == TableSync { destTableName = j.Dest.Table - } else if j.SyncType == DBSync { - destTableName, err = j.destMeta.GetTableNameById(dropPartition.TableId) + } else { + var err error + destTableName, err = j.getDestNameBySrcId(renameRollup.TableId) if err != nil { return err } } - // dropPartitionSql = "ALTER TABLE " + sql - dropPartitionSql := fmt.Sprintf("ALTER TABLE %s.%s %s", destDbName, destTableName, dropPartition.Sql) - log.Infof("dropPartitionSql: %s", dropPartitionSql) - return j.IDest.Exec(dropPartitionSql) -} - -// handleCreateTable -func (j *Job) handleCreateTable(binlog *festruct.TBinlog) error { - log.Infof("handle create table binlog") - - if j.SyncType != DBSync { - return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType) - } - - data := binlog.GetData() - createTable, err := record.NewCreateTableFromJson(data) - if err != nil { - return err + newRollup := renameRollup.NewRollupName + oldRollup := renameRollup.OldRollupName + if oldRollup == "" { + log.Warnf("old rollup name is empty, sync rollup via partial snapshot, "+ + "new rollup: %s, index id: %d, table id: %d, commit seq: %d", + newRollup, renameRollup.IndexId, renameRollup.TableId, commitSeq) + replace := true + tableName := destTableName + if j.isTableSyncWithAlias() { + tableName = j.Src.Table + } + return j.newPartialSnapshot(renameRollup.TableId, tableName, nil, replace) } - sql := createTable.Sql - log.Infof("createTableSql: %s", sql) - // HACK: for drop table - err = j.IDest.DbExec(sql) - j.srcMeta.GetTables() - j.destMeta.GetTables() - return err + return j.IDest.RenameRollup(destTableName, oldRollup, newRollup) } -// handleDropTable -func (j *Job) handleDropTable(binlog *festruct.TBinlog) error { - log.Infof("handle drop table binlog") - - if j.SyncType != DBSync { - return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType) - } +func (j *Job) handleDropRollup(binlog *festruct.TBinlog) error { + log.Infof("handle drop rollup binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) data := binlog.GetData() - dropTable, err := record.NewDropTableFromJson(data) + dropRollup, err := record.NewDropRollupFromJson(data) if err != nil { return err } - tableName := dropTable.TableName - // depreated - if tableName == "" { - dirtySrcTables := j.srcMeta.DirtyGetTables() - srcTable, ok := dirtySrcTables[dropTable.TableId] - if !ok { - return xerror.Errorf(xerror.Normal, "table not found, tableId: %d", dropTable.TableId) - } - - tableName = srcTable.Name - } - - sql := fmt.Sprintf("DROP TABLE %s FORCE", tableName) - log.Infof("dropTableSql: %s", sql) - err = j.IDest.DbExec(sql) - j.srcMeta.GetTables() - j.destMeta.GetTables() - return err + return j.handleDropRollupRecord(binlog.GetCommitSeq(), dropRollup) } -func (j *Job) handleDummy(binlog *festruct.TBinlog) error { - dummyCommitSeq := binlog.GetCommitSeq() +func (j *Job) handleDropRollupRecord(commitSeq int64, dropRollup *record.DropRollup) error { + if j.isBinlogCommitted(dropRollup.TableId, commitSeq) { + return nil + } - log.Infof("handle dummy binlog, need full sync. SyncType: %v, seq: %v", j.SyncType, dummyCommitSeq) + var destTableName string + if j.SyncType == TableSync { + destTableName = j.Dest.Table + } else { + destTableName = dropRollup.TableName + } - return j.newSnapshot(dummyCommitSeq) + return j.IDest.DropRollup(destTableName, dropRollup.IndexName) } -// handleAlterJob -func (j *Job) handleAlterJob(binlog *festruct.TBinlog) error { - log.Infof("handle alter job binlog") +func (j *Job) handleRecoverInfo(binlog *festruct.TBinlog) error { + log.Infof("handle recoverInfo binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) data := binlog.GetData() - alterJob, err := record.NewAlterJobV2FromJson(data) + recoverInfo, err := record.NewRecoverInfoFromJson(data) if err != nil { return err } - if alterJob.TableName == "" { - return xerror.Errorf(xerror.Normal, "invalid alter job, tableName: %s", alterJob.TableName) - } - if !alterJob.IsFinished() { + + return j.handleRecoverInfoRecord(binlog.GetCommitSeq(), recoverInfo) +} + +func (j *Job) handleRecoverInfoRecord(commitSeq int64, recoverInfo *record.RecoverInfo) error { + if j.isBinlogCommitted(recoverInfo.TableId, commitSeq) { return nil } - // HACK: busy loop for success - // TODO: Add to state machine - for { - // drop table dropTableSql - // TODO: [IMPROVEMENT] use rename table instead of drop table - var dropTableSql string - if j.SyncType == TableSync { - dropTableSql = fmt.Sprintf("DROP TABLE %s FORCE", j.Dest.Table) + if recoverInfo.IsRecoverTable() { + var tableName string + if recoverInfo.NewTableName != "" { + tableName = recoverInfo.NewTableName } else { - dropTableSql = fmt.Sprintf("DROP TABLE %s FORCE", alterJob.TableName) - } - log.Infof("dropTableSql: %s", dropTableSql) - - if err := j.destMeta.DbExec(dropTableSql); err == nil { - break + tableName = recoverInfo.TableName } + log.Infof("recover info with for table %s, will trigger partial sync", tableName) + return j.newPartialSnapshot(recoverInfo.TableId, tableName, nil, true) } - return j.newSnapshot(j.progress.CommitSeq) + var partitions []string + if recoverInfo.NewPartitionName != "" { + partitions = append(partitions, recoverInfo.NewPartitionName) + } else { + partitions = append(partitions, recoverInfo.PartitionName) + } + log.Infof("recover info with for partition(%s) for table %s, will trigger partial sync", + partitions, recoverInfo.TableName) + // if source does multiple recover of partition, then there is a race + // condition and some recover might miss due to commitseq change after snapshot. + return j.newPartialSnapshot(recoverInfo.TableId, recoverInfo.TableName, nil, true) } -// handleLightningSchemaChange -func (j *Job) handleLightningSchemaChange(binlog *festruct.TBinlog) error { - log.Infof("handle lightning schema change binlog") +func (j *Job) handleRestoreInfo(binlog *festruct.TBinlog) error { + log.Infof("handle restore info binlog, prevCommitSeq: %d, commitSeq: %d", + j.progress.PrevCommitSeq, j.progress.CommitSeq) data := binlog.GetData() - lightningSchemaChange, err := record.NewModifyTableAddOrDropColumnsFromJson(data) + restoreInfo, err := record.NewRestoreInfoFromJson(data) if err != nil { return err } - - log.Debugf("lightningSchemaChange %v", lightningSchemaChange) - - rawSql := lightningSchemaChange.RawSql - // "rawSql": "ALTER TABLE `default_cluster:ccr`.`test_ddl` ADD COLUMN `nid1` int(11) NULL COMMENT \"\"" - // replace `default_cluster:${Src.Database}`.`test_ddl` to `test_ddl` - sql := strings.Replace(rawSql, fmt.Sprintf("`default_cluster:%s`.", j.Src.Database), "", 1) - log.Infof("lightningSchemaChangeSql, rawSql: %s, sql: %s", rawSql, sql) - return j.IDest.DbExec(sql) + return j.handleRestoreInfoRecord(binlog.GetCommitSeq(), restoreInfo) } -func (j *Job) handleTruncateTable(binlog *festruct.TBinlog) error { - log.Infof("handle truncate table binlog") +func (j *Job) handleRestoreInfoRecord(commitSeq int64, restoreInfo *record.RestoreInfo) error { + if len(restoreInfo.TableInfo) != 1 { + // for both table and db sync take a full snapshot. + log.Warnf("Lets do new snapshot") + return j.newSnapshot(commitSeq) + } + + if len(restoreInfo.TableInfo) == 1 { + for tableId, tableName := range restoreInfo.TableInfo { + switch j.SyncType { + case TableSync: + log.Warnf("full snapshot, table:%d and name:%s", + tableId, tableName) + return j.newSnapshot(commitSeq) + case DBSync: + log.Warnf("new partial snapshot, table:%d and name:%s", + tableId, tableName) + replace := true // replace the old data to avoid blocking reading + return j.newPartialSnapshot(tableId, tableName, nil, replace) + default: + break + } + } + } + //This is unreachable. + return nil +} +func (j *Job) handleBarrier(binlog *festruct.TBinlog) error { data := binlog.GetData() - truncateTable, err := record.NewTruncateTableFromJson(data) + barrierLog, err := record.NewBarrierLogFromJson(data) if err != nil { return err } - var destTableName string - switch j.SyncType { - case DBSync: - destTableName = truncateTable.TableName - case TableSync: - destTableName = j.Dest.Table - default: - return xerror.Panicf(xerror.Normal, "invalid sync type: %v", j.SyncType) - } - - var sql string - if truncateTable.RawSql == "" { - sql = fmt.Sprintf("TRUNCATE TABLE %s", destTableName) - } else { - sql = fmt.Sprintf("TRUNCATE TABLE %s %s", destTableName, truncateTable.RawSql) + if barrierLog.Binlog == "" { + log.Info("handle barrier binlog, ignore it") + return nil } - log.Infof("truncateTableSql: %s", sql) + binlogType := festruct.TBinlogType(barrierLog.BinlogType) + log.Infof("handle barrier binlog with type %s, prevCommitSeq: %d, commitSeq: %d", + binlogType, j.progress.PrevCommitSeq, j.progress.CommitSeq) - err = j.IDest.DbExec(sql) - if err == nil { - if srcTableName, err := j.srcMeta.GetTableNameById(truncateTable.TableId); err == nil { - // if err != nil, maybe truncate table had been dropped - j.srcMeta.ClearTable(j.Src.Database, srcTableName) + commitSeq := binlog.GetCommitSeq() + switch binlogType { + case festruct.TBinlogType_RENAME_TABLE: + renameTable, err := record.NewRenameTableFromJson(barrierLog.Binlog) + if err != nil { + return err } - j.destMeta.ClearTable(j.Dest.Database, destTableName) + return j.handleRenameTableRecord(commitSeq, renameTable) + case festruct.TBinlogType_RENAME_COLUMN: + renameColumn, err := record.NewRenameColumnFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleRenameColumnRecord(commitSeq, renameColumn) + case festruct.TBinlogType_RENAME_PARTITION: + renamePartition, err := record.NewRenamePartitionFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleRenamePartitionRecord(commitSeq, renamePartition) + case festruct.TBinlogType_RENAME_ROLLUP: + renameRollup, err := record.NewRenameRollupFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleRenameRollupRecord(binlog.GetCommitSeq(), renameRollup) + case festruct.TBinlogType_DROP_ROLLUP: + dropRollup, err := record.NewDropRollupFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleDropRollupRecord(commitSeq, dropRollup) + case festruct.TBinlogType_REPLACE_TABLE: + replaceTable, err := record.NewReplaceTableRecordFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleReplaceTableRecord(commitSeq, replaceTable) + case festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES: + m, err := record.NewModifyTableAddOrDropInvertedIndicesFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleModifyTableAddOrDropInvertedIndicesRecord(commitSeq, m) + case festruct.TBinlogType_INDEX_CHANGE_JOB: + job, err := record.NewIndexChangeJobFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleIndexChangeJobRecord(commitSeq, job) + case festruct.TBinlogType_MODIFY_VIEW_DEF: + alterView, err := record.NewAlterViewFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleAlterViewDefRecord(commitSeq, alterView) + case festruct.TBinlogType_MODIFY_COMMENT: + modifyComment, err := record.NewModifyCommentFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleModifyCommentRecord(commitSeq, modifyComment) + case festruct.TBinlogType_RECOVER_INFO: + recoverInfo, err := record.NewRecoverInfoFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleRecoverInfoRecord(commitSeq, recoverInfo) + case festruct.TBinlogType_RESTORE_INFO: + restoreInfo, err := record.NewRestoreInfoFromJson(barrierLog.Binlog) + if err != nil { + return err + } + return j.handleRestoreInfoRecord(commitSeq, restoreInfo) + case festruct.TBinlogType_BARRIER: + log.Info("handle barrier binlog, ignore it") + default: + return xerror.Errorf(xerror.Normal, "unknown binlog type wrapped by barrier: %d", barrierLog.BinlogType) } - - return err + return nil } // return: error && bool backToRunLoop func (j *Job) handleBinlogs(binlogs []*festruct.TBinlog) (error, bool) { + log.Infof("handle binlogs, binlogs size: %d", len(binlogs)) + for _, binlog := range binlogs { // Step 1: dispatch handle binlog if err := j.handleBinlog(binlog); err != nil { + log.Errorf("handle binlog failed, prevCommitSeq: %d, commitSeq: %d, binlog type: %s, binlog data: %s", + j.progress.PrevCommitSeq, j.progress.CommitSeq, binlog.GetType(), binlog.GetData()) return err, false } + // Step 2: check job state, if not incrementalSync, such as DBPartialSync, break + if !j.isIncrementalSync() { + log.Debugf("job state is not incremental sync, back to run loop, job state: %s", j.progress.SyncState) + return nil, true + } + + // Step 3: update progress commitSeq := binlog.GetCommitSeq() if j.SyncType == DBSync && j.progress.TableCommitSeqMap != nil { - // TODO: [PERFORMANCE] use largest tableCommitSeq in memorydata to acc it // when all table commit seq > commitSeq, it's true reachSwitchToDBIncrementalSync := true for _, tableCommitSeq := range j.progress.TableCommitSeqMap { @@ -1049,15 +2783,10 @@ func (j *Job) handleBinlogs(binlogs []*festruct.TBinlog) (error, bool) { } } - // Step 2: update progress to db + // Step 4: update progress to db if !j.progress.IsDone() { j.progress.Done() } - - // Step 3: check job state, if not incrementalSync, break - if !j.isIncrementalSync() { - return nil, true - } } return nil, false } @@ -1067,15 +2796,15 @@ func (j *Job) handleBinlog(binlog *festruct.TBinlog) error { return xerror.Errorf(xerror.Normal, "invalid binlog: %v", binlog) } - log.Debugf("binlog data: %s", binlog.GetData()) + log.Debugf("binlog type: %s, binlog data: %s", binlog.GetType(), binlog.GetData()) // Step 2: update job progress j.progress.StartHandle(binlog.GetCommitSeq()) + xmetrics.HandlingBinlog(j.Name, binlog.GetCommitSeq()) - // TODO: use table driven, keep this and driven, conert BinlogType to TBinlogType switch binlog.GetType() { case festruct.TBinlogType_UPSERT: - return j.handleUpsert(binlog) + return j.handleUpsertWithRetry(binlog) case festruct.TBinlogType_ADD_PARTITION: return j.handleAddPartition(binlog) case festruct.TBinlogType_CREATE_TABLE: @@ -1088,16 +2817,44 @@ func (j *Job) handleBinlog(binlog *festruct.TBinlog) error { return j.handleAlterJob(binlog) case festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_COLUMNS: return j.handleLightningSchemaChange(binlog) + case festruct.TBinlogType_RENAME_COLUMN: + return j.handleRenameColumn(binlog) + case festruct.TBinlogType_MODIFY_COMMENT: + return j.handleModifyComment(binlog) case festruct.TBinlogType_DUMMY: return j.handleDummy(binlog) case festruct.TBinlogType_ALTER_DATABASE_PROPERTY: - // TODO(Drogon) + log.Info("handle alter database property binlog, ignore it") case festruct.TBinlogType_MODIFY_TABLE_PROPERTY: - // TODO(Drogon) + log.Info("handle alter table property binlog, ignore it") case festruct.TBinlogType_BARRIER: - log.Info("handle barrier binlog") + return j.handleBarrier(binlog) case festruct.TBinlogType_TRUNCATE_TABLE: return j.handleTruncateTable(binlog) + case festruct.TBinlogType_RENAME_TABLE: + return j.handleRenameTable(binlog) + case festruct.TBinlogType_REPLACE_PARTITIONS: + return j.handleReplacePartitions(binlog) + case festruct.TBinlogType_MODIFY_PARTITIONS: + return j.handleModifyPartitions(binlog) + case festruct.TBinlogType_REPLACE_TABLE: + return j.handleReplaceTable(binlog) + case festruct.TBinlogType_MODIFY_VIEW_DEF: + return j.handleAlterViewDef(binlog) + case festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES: + return j.handleModifyTableAddOrDropInvertedIndices(binlog) + case festruct.TBinlogType_INDEX_CHANGE_JOB: + return j.handleIndexChangeJob(binlog) + case festruct.TBinlogType_RENAME_PARTITION: + return j.handleRenamePartition(binlog) + case festruct.TBinlogType_RENAME_ROLLUP: + return j.handleRenameRollup(binlog) + case festruct.TBinlogType_DROP_ROLLUP: + return j.handleDropRollup(binlog) + case festruct.TBinlogType_RECOVER_INFO: + return j.handleRecoverInfo(binlog) + case festruct.TBinlogType_RESTORE_INFO: + return j.handleRestoreInfo(binlog) default: return xerror.Errorf(xerror.Normal, "unknown binlog type: %v", binlog.GetType()) } @@ -1110,7 +2867,7 @@ func (j *Job) recoverIncrementalSync() error { case BinlogUpsert: return j.handleUpsert(nil) default: - j.progress.Rollback() + j.progress.Rollback(j.SkipError) } return nil @@ -1118,7 +2875,8 @@ func (j *Job) recoverIncrementalSync() error { func (j *Job) incrementalSync() error { if !j.progress.IsDone() { - log.Infof("job progress is not done, state is (%s), need recover", j.progress.SubSyncState) + log.Infof("job progress is not done, need recover. state: %s, prevCommitSeq: %d, commitSeq: %d", + j.progress.SubSyncState, j.progress.PrevCommitSeq, j.progress.CommitSeq) return j.recoverIncrementalSync() } @@ -1126,7 +2884,7 @@ func (j *Job) incrementalSync() error { // Step 1: get binlog log.Debug("start incremental sync") src := &j.Src - srcRpc, err := j.rpcFactory.NewFeRpc(src) + srcRpc, err := j.factory.NewFeRpc(src) if err != nil { log.Errorf("new fe rpc failed, src: %v, err: %+v", src, err) return err @@ -1134,12 +2892,20 @@ func (j *Job) incrementalSync() error { // Step 2: handle all binlog for { + if j.forceFullsync { + log.Warnf("job is forced to step fullsync by user") + j.forceFullsync = false + _ = j.newSnapshot(j.progress.CommitSeq) + return nil + } + + // The CommitSeq is equals to PrevCommitSeq in here. commitSeq := j.progress.CommitSeq log.Debugf("src: %s, commitSeq: %v", src, commitSeq) getBinlogResp, err := srcRpc.GetBinlog(src, commitSeq) if err != nil { - return nil + return err } log.Debugf("resp: %v", getBinlogResp) @@ -1157,7 +2923,8 @@ func (j *Job) incrementalSync() error { case tstatus.TStatusCode_BINLOG_NOT_FOUND_TABLE: return xerror.Errorf(xerror.Normal, "can't found table") default: - return xerror.Errorf(xerror.Normal, "invalid binlog status type: %v", status.StatusCode) + return xerror.Errorf(xerror.Normal, "invalid binlog status type: %v, msg: %s", + status.StatusCode, utils.FirstOr(status.GetErrorMsgs(), "")) } // Step 2.2: handle binlogs records if has job @@ -1197,6 +2964,9 @@ func (j *Job) tableSync() error { case TableIncrementalSync: log.Debug("table incremental sync") return j.incrementalSync() + case TablePartialSync: + log.Debug("table partial sync") + return j.partialSync() default: return xerror.Errorf(xerror.Normal, "unknown sync state: %v", j.progress.SyncState) } @@ -1208,7 +2978,6 @@ func (j *Job) dbTablesIncrementalSync() error { return j.incrementalSync() } -// TODO(Drogon): impl DBSpecificTableFullSync func (j *Job) dbSpecificTableFullSync() error { log.Debug("db specific table full sync") @@ -1227,6 +2996,9 @@ func (j *Job) dbSync() error { case DBIncrementalSync: log.Debug("db incremental sync") return j.incrementalSync() + case DBPartialSync: + log.Debug("db partial sync") + return j.partialSync() default: return xerror.Errorf(xerror.Normal, "unknown db sync state: %v", j.progress.SyncState) } @@ -1250,17 +3022,19 @@ func (j *Job) sync() error { func (j *Job) handleError(err error) error { var xerr *xerror.XError if !errors.As(err, &xerr) { + log.Errorf("convert error to xerror failed, err: %+v", err) return nil } + xmetrics.AddError(xerr) if xerr.IsPanic() { + log.Errorf("job panic, job: %s, err: %+v", j.Name, err) return err } - // TODO(Drogon): do more things, not only snapshot if xerr.Category() == xerror.Meta { - // TODO(Drogon): handle error - j.newSnapshot(j.progress.CommitSeq) + log.Warnf("receive meta category error, make new snapshot, job: %s, err: %v", j.Name, err) + _ = j.newSnapshot(j.progress.CommitSeq) } return nil } @@ -1272,18 +3046,25 @@ func (j *Job) run() { var panicError error for { + j.updateJobStatus() + + // do maybeDeleted first to avoid mark job deleted after job stopped & before job run & close stop chan gap in Delete, so job will not run + if j.maybeDeleted() { + return + } + select { case <-j.stop: gls.DeleteGls(gls.GoID()) log.Infof("job stopped, job: %s", j.Name) return + case <-ticker.C: + // loop to print error, not panic, waiting for user to pause/stop/remove Job if j.getJobState() != JobRunning { break } - // loop to print error, not panic, waiting for user to pause/stop/remove Job - // TODO(Drogon): Add user resume the job, so reset panicError for retry if panicError != nil { log.Errorf("job panic, job: %s, err: %+v", j.Name, panicError) break @@ -1294,7 +3075,7 @@ func (j *Job) run() { break } - log.Errorf("job sync failed, job: %s, err: %+v", j.Name, err) + log.Warnf("job sync failed, job: %s, err: %+v", j.Name, err) panicError = j.handleError(err) } } @@ -1303,6 +3084,9 @@ func (j *Job) run() { func (j *Job) newSnapshot(commitSeq int64) error { log.Infof("new snapshot, commitSeq: %d", commitSeq) + j.progress.PartialSyncData = nil + j.progress.TableAliases = nil + j.progress.SyncId += 1 switch j.SyncType { case TableSync: j.progress.NextWithPersist(commitSeq, TableFullSync, BeginCreateSnapshot, "") @@ -1317,6 +3101,58 @@ func (j *Job) newSnapshot(commitSeq int64) error { } } +// New partial snapshot, with the source cluster table name and the partitions to sync. +// A empty partitions means to sync the whole table. +// +// If the replace is true, the restore task will load data into a new table and replaces the old +// one when restore finished. So replace requires whole table partial sync. +func (j *Job) newPartialSnapshot(tableId int64, table string, partitions []string, replace bool) error { + if j.SyncType == TableSync && table != j.Src.Table { + return xerror.Errorf(xerror.Normal, + "partial sync table name is not equals to the source name %s, table: %s, sync type: table", j.Src.Table, table) + } + + if replace && len(partitions) != 0 { + return xerror.Errorf(xerror.Normal, + "partial sync with replace but partitions is not empty, table: %s, len: %d", table, len(partitions)) + } + + // The binlog of commitSeq will be skipped once the partial snapshot finished. + commitSeq := j.progress.CommitSeq + + syncData := &JobPartialSyncData{ + TableId: tableId, + Table: table, + Partitions: partitions, + } + j.progress.PartialSyncData = syncData + j.progress.TableAliases = nil + j.progress.SyncId += 1 + if replace { + alias := TableAlias(table) + j.progress.TableAliases = make(map[string]string) + j.progress.TableAliases[table] = alias + log.Infof("new partial snapshot, commitSeq: %d, table id: %d, table: %s, alias: %s", + commitSeq, tableId, table, alias) + } else { + log.Infof("new partial snapshot, commitSeq: %d, table id: %d, table: %s, partitions: %v", + commitSeq, tableId, table, partitions) + } + + switch j.SyncType { + case TableSync: + j.progress.NextWithPersist(commitSeq, TablePartialSync, BeginCreateSnapshot, "") + return nil + case DBSync: + j.progress.NextWithPersist(commitSeq, DBPartialSync, BeginCreateSnapshot, "") + return nil + default: + err := xerror.Panicf(xerror.Normal, "unknown table sync type: %v", j.SyncType) + log.Fatalf("run %+v", err) + return err + } +} + // run job func (j *Job) Run() error { gls.ResetGls(gls.GoID(), map[interface{}]interface{}{}) @@ -1351,8 +3187,8 @@ func (j *Job) Run() error { // Hack: for drop table if j.SyncType == DBSync { - j.srcMeta.GetTables() - j.destMeta.GetTables() + j.srcMeta.ClearTablesCache() + j.destMeta.ClearTablesCache() } j.run() @@ -1366,37 +3202,23 @@ func (j *Job) desyncTable() error { if err != nil { return err } - - desyncSql := fmt.Sprintf("ALTER TABLE %s SET (\"is_being_synced\"=\"false\")", tableName) - log.Debugf("db exec: %s", desyncSql) - if err := j.IDest.DbExec(desyncSql); err != nil { - return xerror.Wrapf(err, xerror.FE, "failed tables: %s", tableName) - } - return nil + return j.IDest.DesyncTables(tableName) } func (j *Job) desyncDB() error { log.Debugf("desync db") - var failedTable string = "" tables, err := j.destMeta.GetTables() if err != nil { return err } + tableNames := []string{} for _, tableMeta := range tables { - desyncSql := fmt.Sprintf("ALTER TABLE %s SET (\"is_being_synced\"=\"false\")", tableMeta.Name) - log.Debugf("db exec: %s", desyncSql) - if err := j.IDest.DbExec(desyncSql); err != nil { - failedTable += tableMeta.Name + " " - } - } - - if failedTable != "" { - return xerror.Errorf(xerror.FE, "failed tables: %s", failedTable) + tableNames = append(tableNames, tableMeta.Name) } - return nil + return j.IDest.DesyncTables(tableNames...) } func (j *Job) Desync() error { @@ -1407,13 +3229,79 @@ func (j *Job) Desync() error { } } +func (j *Job) UpdateSkipError(skipError bool) error { + j.lock.Lock() + defer j.lock.Unlock() + + originSkipError := j.SkipError + if originSkipError == skipError { + return nil + } + + j.SkipError = skipError + if err := j.persistJob(); err != nil { + j.SkipError = originSkipError + return err + } else { + return nil + } +} + // stop job func (j *Job) Stop() { close(j.stop) } +// delete job +func (j *Job) Delete() { + j.isDeleted.Store(true) + close(j.stop) +} + +func (j *Job) maybeDeleted() bool { + if !j.isDeleted.Load() { + return false + } + + // job had been deleted + log.Infof("job deleted, job: %s, remove in db", j.Name) + if err := j.db.RemoveJob(j.Name); err != nil { + log.Errorf("remove job failed, job: %s, err: %+v", j.Name, err) + } + return true +} + +func (j *Job) updateFrontends() error { + if frontends, err := j.srcMeta.GetFrontends(); err != nil { + log.Warnf("get src frontends failed, fe: %+v", j.Src) + return err + } else { + for _, frontend := range frontends { + j.Src.Frontends = append(j.Src.Frontends, *frontend) + } + } + log.Debugf("src frontends %+v", j.Src.Frontends) + + if frontends, err := j.destMeta.GetFrontends(); err != nil { + log.Warnf("get dest frontends failed, fe: %+v", j.Dest) + return err + } else { + for _, frontend := range frontends { + j.Dest.Frontends = append(j.Dest.Frontends, *frontend) + } + } + log.Debugf("dest frontends %+v", j.Dest.Frontends) + + return nil +} + func (j *Job) FirstRun() error { - log.Info("first run check job", zap.String("src", j.Src.String()), zap.String("dest", j.Dest.String())) + log.Infof("first run check job, src: %s, dest: %s", &j.Src, &j.Dest) + + // Step 0: get all frontends + if err := j.updateFrontends(); err != nil { + return err + } // Step 1: check fe and be binlog feature is enabled if err := j.srcMeta.CheckBinlogFeature(); err != nil { @@ -1479,7 +3367,7 @@ func (j *Job) FirstRun() error { } else { j.Dest.DbId = destDbId } - if j.SyncType == TableSync { + if j.SyncType == TableSync && !j.allowTableExists { dest_table_exists, err := j.IDest.CheckTableExists() if err != nil { return err @@ -1492,13 +3380,12 @@ func (j *Job) FirstRun() error { return nil } -// HACK: temp impl func (j *Job) GetLag() (int64, error) { j.lock.Lock() defer j.lock.Unlock() srcSpec := &j.Src - rpc, err := j.rpcFactory.NewFeRpc(srcSpec) + rpc, err := j.factory.NewFeRpc(srcSpec) if err != nil { return 0, err } @@ -1551,6 +3438,26 @@ func (j *Job) Resume() error { return j.changeJobState(JobRunning) } +func (j *Job) ForceFullsync() { + log.Infof("force job %s step full sync", j.Name) + + j.lock.Lock() + defer j.lock.Unlock() + j.forceFullsync = true +} + +type RawJobStatus struct { + state int32 + progressState int32 +} + +func (j *Job) updateJobStatus() { + atomic.StoreInt32(&j.rawStatus.state, int32(j.State)) + if j.progress != nil { + atomic.StoreInt32(&j.rawStatus.progressState, int32(j.progress.SyncState)) + } +} + type JobStatus struct { Name string `json:"name"` State string `json:"state"` @@ -1558,15 +3465,89 @@ type JobStatus struct { } func (j *Job) Status() *JobStatus { - j.lock.Lock() - defer j.lock.Unlock() - - state := j.State.String() - progress_state := j.progress.SyncState.String() + state := JobState(atomic.LoadInt32(&j.rawStatus.state)).String() + progressState := SyncState(atomic.LoadInt32(&j.rawStatus.progressState)).String() return &JobStatus{ Name: j.Name, State: state, - ProgressState: progress_state, + ProgressState: progressState, + } +} + +func (j *Job) UpdateHostMapping(srcHostMaps, destHostMaps map[string]string) error { + j.lock.Lock() + defer j.lock.Unlock() + + oldSrcHostMapping := j.Src.HostMapping + if j.Src.HostMapping == nil { + j.Src.HostMapping = make(map[string]string) + } + for private, public := range srcHostMaps { + if public == "" { + delete(j.Src.HostMapping, private) + } else { + j.Src.HostMapping[private] = public + } + } + + oldDestHostMapping := j.Dest.HostMapping + if j.Dest.HostMapping == nil { + j.Dest.HostMapping = make(map[string]string) + } + for private, public := range destHostMaps { + if public == "" { + delete(j.Dest.HostMapping, private) + } else { + j.Dest.HostMapping[private] = public + } + } + + if err := j.persistJob(); err != nil { + j.Src.HostMapping = oldSrcHostMapping + j.Dest.HostMapping = oldDestHostMapping + return err + } + + log.Debugf("update job %s src host mapping %+v, dest host mapping: %+v", j.Name, srcHostMaps, destHostMaps) + return nil +} + +func isTxnCommitted(status *tstatus.TStatus) bool { + return isStatusContainsAny(status, "is already COMMITTED") +} + +func isTxnNotFound(status *tstatus.TStatus) bool { + errMessages := status.GetErrorMsgs() + for _, errMessage := range errMessages { + // detailMessage = transaction not found + // or detailMessage = transaction [12356] not found + if strings.Contains(errMessage, "transaction not found") || regexp.MustCompile(`transaction \[\d+\] not found`).MatchString(errMessage) { + return true + } + } + return false +} + +func isTxnAborted(status *tstatus.TStatus) bool { + return isStatusContainsAny(status, "is already aborted") +} + +func isTableNotFound(status *tstatus.TStatus) bool { + // 1. FE FrontendServiceImpl.beginTxnImpl + // 2. FE FrontendServiceImpl.commitTxnImpl + // 3. FE Table.tryWriteLockOrMetaException + return isStatusContainsAny(status, "can't find table id:", "table not found", "unknown table") +} + +func isStatusContainsAny(status *tstatus.TStatus, patterns ...string) bool { + errMessages := status.GetErrorMsgs() + for _, errMessage := range errMessages { + for _, substr := range patterns { + if strings.Contains(errMessage, substr) { + return true + } + } } + return false } diff --git a/pkg/ccr/job_factory.go b/pkg/ccr/job_factory.go index 326bf1ed..914657f4 100644 --- a/pkg/ccr/job_factory.go +++ b/pkg/ccr/job_factory.go @@ -2,8 +2,7 @@ package ccr import "context" -type JobFactory struct { -} +type JobFactory struct{} // create job factory func NewJobFactory() *JobFactory { diff --git a/pkg/ccr/job_manager.go b/pkg/ccr/job_manager.go index 6e1f4a4d..fa6e92a6 100644 --- a/pkg/ccr/job_manager.go +++ b/pkg/ccr/job_manager.go @@ -2,16 +2,16 @@ package ccr import ( "encoding/json" + "fmt" "sync" "github.com/selectdb/ccr_syncer/pkg/storage" "github.com/selectdb/ccr_syncer/pkg/xerror" + "github.com/selectdb/ccr_syncer/pkg/xmetrics" log "github.com/sirupsen/logrus" ) -const ( - ErrJobExist = "job exist" -) +var errJobExist = xerror.NewWithoutStack(xerror.Normal, "job exist") // job manager is thread safety type JobManager struct { @@ -47,7 +47,7 @@ func (jm *JobManager) AddJob(job *Job) error { // Step 1: check job exist if _, ok := jm.jobs[job.Name]; ok { - return xerror.Errorf(xerror.Normal, "%s: %s", ErrJobExist, job.Name) + return xerror.XWrapf(errJobExist, "job: %s", job.Name) } // Step 2: check job first run, mostly for dest/src fe db/table info @@ -68,6 +68,9 @@ func (jm *JobManager) AddJob(job *Job) error { jm.jobs[job.Name] = job jm.runJob(job) + // Step 5: add metrics + xmetrics.AddNewJob(job.Name) + return nil } @@ -82,6 +85,7 @@ func (jm *JobManager) Recover(jobNames []string) error { if _, ok := jm.jobs[jobName]; ok { continue } + log.Infof("recover job: %s", jobName) if jobInfo, err := jm.db.GetJobInfo(jobName); err != nil { @@ -107,14 +111,21 @@ func (jm *JobManager) RemoveJob(name string) error { jm.lock.Lock() defer jm.lock.Unlock() + job := jm.jobs[name] // check job exist - if job, ok := jm.jobs[name]; ok { - // stop job - job.Stop() + if job == nil { + return xerror.Errorf(xerror.Normal, "job not exist: %s", name) + } + + // stop job + job.Delete() + if err := jm.db.RemoveJob(name); err == nil { delete(jm.jobs, name) - return jm.db.RemoveJob(name) + log.Infof("job [%s] has been successfully deleted, but it needs to wait until an isochronous point before it will completely STOP", name) + return nil } else { - return xerror.Errorf(xerror.Normal, "job not exist: %s", name) + log.Errorf("remove job [%s] in db failed: %+v, but job is stopped", name, err) + return fmt.Errorf("remove job [%s] in db failed, but job is stopped, if can resume/delete, please do it manually", name) } } @@ -194,6 +205,13 @@ func (jm *JobManager) Resume(jobName string) error { }) } +func (jm *JobManager) ForceFullsync(jobName string) error { + return jm.dealJob(jobName, func(job *Job) error { + job.ForceFullsync() + return nil + }) +} + func (jm *JobManager) GetJobStatus(jobName string) (*JobStatus, error) { jm.lock.RLock() defer jm.lock.RUnlock() @@ -226,3 +244,25 @@ func (jm *JobManager) ListJobs() []*JobStatus { } return jobs } + +func (jm *JobManager) UpdateJobSkipError(jobName string, skipError bool) error { + jm.lock.Lock() + defer jm.lock.Unlock() + + if job, ok := jm.jobs[jobName]; ok { + return job.UpdateSkipError(skipError) + } else { + return xerror.Errorf(xerror.Normal, "job not exist: %s", jobName) + } +} + +func (jm *JobManager) UpdateHostMapping(jobName string, srcHostMapping, destHostMapping map[string]string) error { + jm.lock.Lock() + defer jm.lock.Unlock() + + if job, ok := jm.jobs[jobName]; ok { + return job.UpdateHostMapping(srcHostMapping, destHostMapping) + } else { + return xerror.Errorf(xerror.Normal, "job not exist: %s", jobName) + } +} diff --git a/pkg/ccr/job_progress.go b/pkg/ccr/job_progress.go index 3c9d9803..133f554e 100644 --- a/pkg/ccr/job_progress.go +++ b/pkg/ccr/job_progress.go @@ -7,8 +7,8 @@ import ( "github.com/selectdb/ccr_syncer/pkg/storage" "github.com/selectdb/ccr_syncer/pkg/xerror" + "github.com/selectdb/ccr_syncer/pkg/xmetrics" log "github.com/sirupsen/logrus" - "go.uber.org/zap" ) // TODO: rewrite all progress by two level state machine @@ -26,10 +26,12 @@ const ( DBTablesIncrementalSync SyncState = 1 DBSpecificTableFullSync SyncState = 2 DBIncrementalSync SyncState = 3 + DBPartialSync SyncState = 4 // sync partitions // Table sync state machine states TableFullSync SyncState = 500 TableIncrementalSync SyncState = 501 + TablePartialSync SyncState = 502 // TODO: add timeout state for restart full sync ) @@ -45,10 +47,14 @@ func (s SyncState) String() string { return "DBSpecificTableFullSync" case DBIncrementalSync: return "DBIncrementalSync" + case DBPartialSync: + return "DBPartialSync" case TableFullSync: return "TableFullSync" case TableIncrementalSync: return "TableIncrementalSync" + case TablePartialSync: + return "TablePartialSync" default: return fmt.Sprintf("Unknown SyncState: %d", s) } @@ -89,6 +95,8 @@ var ( AddExtraInfo SubSyncState = SubSyncState{State: 2, BinlogType: BinlogNone} RestoreSnapshot SubSyncState = SubSyncState{State: 3, BinlogType: BinlogNone} PersistRestoreInfo SubSyncState = SubSyncState{State: 4, BinlogType: BinlogNone} + WaitBackupDone SubSyncState = SubSyncState{State: 5, BinlogType: BinlogNone} + WaitRestoreDone SubSyncState = SubSyncState{State: 6, BinlogType: BinlogNone} BeginTransaction SubSyncState = SubSyncState{State: 11, BinlogType: BinlogUpsert} IngestBinlog SubSyncState = SubSyncState{State: 12, BinlogType: BinlogUpsert} @@ -127,6 +135,13 @@ func (s SubSyncState) String() string { } } +type JobPartialSyncData struct { + TableId int64 `json:"table_id"` + Table string `json:"table"` + PartitionIds []int64 `json:"partition_ids"` + Partitions []string `json:"partitions"` +} + type JobProgress struct { JobName string `json:"job_name"` db storage.DB `json:"-"` @@ -136,15 +151,36 @@ type JobProgress struct { // Sub sync state machine states SubSyncState SubSyncState `json:"sub_sync_state"` - PrevCommitSeq int64 `json:"prev_commit_seq"` - CommitSeq int64 `json:"commit_seq"` - TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` // only for DBTablesIncrementalSync - InMemoryData any `json:"-"` - PersistData string `json:"data"` // this often for binlog or snapshot info + // The sync id of full/partial snapshot + SyncId int64 `json:"job_sync_id"` + // The commit seq where the target cluster has synced. + PrevCommitSeq int64 `json:"prev_commit_seq"` + CommitSeq int64 `json:"commit_seq"` + TableMapping map[int64]int64 `json:"table_mapping"` + // the upstream table id to name mapping, build during the fullsync, + // keep snapshot to avoid rename. it might be staled. + TableNameMapping map[int64]string `json:"table_name_mapping,omitempty"` + TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` // only for DBTablesIncrementalSync + InMemoryData any `json:"-"` + PersistData string `json:"data"` // this often for binlog or snapshot info + PartialSyncData *JobPartialSyncData `json:"partial_sync_data,omitempty"` + + // The tables need to be replaced rather than dropped during sync. + TableAliases map[string]string `json:"table_aliases,omitempty"` + + // The shadow indexes of the pending schema changes + ShadowIndexes map[int64]int64 `json:"shadow_index_map,omitempty"` + + // Some fields to save the unix epoch time of the key timepoint. + CreatedAt int64 `json:"created_at,omitempty"` + FullSyncStartAt int64 `json:"full_sync_start_at,omitempty"` + IncrementalSyncStartAt int64 `json:"incremental_sync_start_at,omitempty"` + IngestBinlogAt int64 `json:"ingest_binlog_at,omitempty"` } func (j *JobProgress) String() string { - return fmt.Sprintf("JobProgress{JobName: %s, SyncState: %s, SubSyncState: %s, CommitSeq: %d, TableCommitSeqMap: %v, InMemoryData: %v, PersistData: %s}", j.JobName, j.SyncState, j.SubSyncState, j.CommitSeq, j.TableCommitSeqMap, j.InMemoryData, j.PersistData) + // const maxStringLength = 64 + return fmt.Sprintf("JobProgress{JobName: %s, SyncState: %s, SubSyncState: %s, CommitSeq: %d, TableCommitSeqMap: %v, InMemoryData: %.64v, PersistData: %.64s}", j.JobName, j.SyncState, j.SubSyncState, j.CommitSeq, j.TableCommitSeqMap, j.InMemoryData, j.PersistData) } func NewJobProgress(jobName string, syncType SyncType, db storage.DB) *JobProgress { @@ -158,13 +194,23 @@ func NewJobProgress(jobName string, syncType SyncType, db storage.DB) *JobProgre JobName: jobName, db: db, + SyncId: time.Now().Unix(), SyncState: syncState, SubSyncState: BeginCreateSnapshot, CommitSeq: 0, + TableMapping: nil, TableCommitSeqMap: nil, InMemoryData: nil, PersistData: "", + PartialSyncData: nil, + TableAliases: nil, + ShadowIndexes: nil, + + CreatedAt: time.Now().Unix(), + FullSyncStartAt: 0, + IncrementalSyncStartAt: 0, + IngestBinlogAt: 0, } } @@ -176,7 +222,7 @@ func NewJobProgressFromJson(jobName string, db storage.DB) (*JobProgress, error) for i := 0; i < 3; i++ { jsonData, err = db.GetProgress(jobName) if err != nil { - log.Error("get job progress failed", zap.String("job", jobName), zap.Error(err)) + log.Errorf("get job progress failed, error: %+v", err) continue } break @@ -195,8 +241,17 @@ func NewJobProgressFromJson(jobName string, db storage.DB) (*JobProgress, error) } } +// GetTableId get table id by table name from TableNameMapping +func (j *JobProgress) GetTableId(tableName string) (int64, bool) { + for tableId, table := range j.TableNameMapping { + if table == tableName { + return tableId, true + } + } + return 0, false +} + func (j *JobProgress) StartHandle(commitSeq int64) { - j.PrevCommitSeq = j.CommitSeq j.CommitSeq = commitSeq j.Persist() @@ -229,6 +284,10 @@ func _convertToPersistData(persistData any) string { // Persist is checkpint, next state only get it from persistData func (j *JobProgress) NextSubCheckpoint(subSyncState SubSyncState, persistData any) { + if subSyncState == IngestBinlog { + j.IngestBinlogAt = time.Now().Unix() + } + j.SubSyncState = subSyncState j.PersistData = _convertToPersistData(persistData) @@ -247,8 +306,24 @@ func (j *JobProgress) CommitNextSubWithPersist(commitSeq int64, subSyncState Sub j.Persist() } +// Switch to new sync state. +// +// The PrevCommitSeq is set to commitSeq, if the sub sync state is done. func (j *JobProgress) NextWithPersist(commitSeq int64, syncState SyncState, subSyncState SubSyncState, persistData string) { + if subSyncState == BeginCreateSnapshot && (syncState == TableFullSync || syncState == DBFullSync) { + j.FullSyncStartAt = time.Now().Unix() + j.IncrementalSyncStartAt = 0 + j.IngestBinlogAt = 0 + } else if subSyncState == Done && (syncState == TableIncrementalSync || syncState == DBIncrementalSync) { + j.IncrementalSyncStartAt = time.Now().Unix() + j.IngestBinlogAt = 0 + } + j.CommitSeq = commitSeq + if subSyncState == Done { + j.PrevCommitSeq = commitSeq + } + j.SyncState = syncState j.SubSyncState = subSyncState j.PersistData = persistData @@ -257,7 +332,7 @@ func (j *JobProgress) NextWithPersist(commitSeq int64, syncState SyncState, subS j.Persist() } -func (j *JobProgress) IsDone() bool { return j.SubSyncState == Done } +func (j *JobProgress) IsDone() bool { return j.SubSyncState == Done && j.PrevCommitSeq == j.CommitSeq } // TODO(Drogon): check reset some fields func (j *JobProgress) Done() { @@ -266,29 +341,36 @@ func (j *JobProgress) Done() { j.SubSyncState = Done j.PrevCommitSeq = j.CommitSeq + xmetrics.ConsumeBinlog(j.JobName, j.PrevCommitSeq) + j.Persist() } -func (j *JobProgress) Rollback() { +func (j *JobProgress) Rollback(skipError bool) { log.Debugf("job %s step rollback", j.JobName) j.SubSyncState = Done - j.CommitSeq = j.PrevCommitSeq + // if rollback, then prev commit seq is the last commit seq + // but if skip error, we can consume the binlog then prev commit seq is the last commit seq + if !skipError { + j.CommitSeq = j.PrevCommitSeq + } + xmetrics.Rollback(j.JobName, j.PrevCommitSeq) j.Persist() } // write progress to db, busy loop until success // TODO: add timeout check func (j *JobProgress) Persist() { - log.Debugf("update job progress: %s", j) + log.Trace("update job progress") for { // Step 1: to json // TODO: fix to json error jsonBytes, err := json.Marshal(j) if err != nil { - log.Error("parse job progress failed", zap.String("job", j.JobName), zap.Error(err)) + log.Errorf("parse job progress failed, error: %+v", err) time.Sleep(UPDATE_JOB_PROGRESS_DURATION) continue } @@ -296,7 +378,7 @@ func (j *JobProgress) Persist() { // Step 2: write to db err = j.db.UpdateProgress(j.JobName, string(jsonBytes)) if err != nil { - log.Error("update job progress failed", zap.String("job", j.JobName), zap.Error(err)) + log.Errorf("update job progress failed, error: %+v", err) time.Sleep(UPDATE_JOB_PROGRESS_DURATION) continue } @@ -304,5 +386,6 @@ func (j *JobProgress) Persist() { break } - log.Debugf("update job progress done: %s", j) + log.Tracef("update job progress done, state: %s, subState: %s, commitSeq: %d, prevCommitSeq: %d", + j.SyncState, j.SubSyncState, j.CommitSeq, j.PrevCommitSeq) } diff --git a/pkg/ccr/job_progress_test.go b/pkg/ccr/job_progress_test.go index db1c511e..2db519d2 100644 --- a/pkg/ccr/job_progress_test.go +++ b/pkg/ccr/job_progress_test.go @@ -14,6 +14,20 @@ func init() { log.SetOutput(io.Discard) } +func deepEqual(got, expect string) bool { + var v1, v2 interface{} + err := json.Unmarshal([]byte(got), &v1) + if err != nil { + return false + } + + err = json.Unmarshal([]byte(expect), &v2) + if err != nil { + return false + } + return reflect.DeepEqual(v1, v2) +} + func TestJobProgress_MarshalJSON(t *testing.T) { type fields struct { JobName string @@ -22,15 +36,17 @@ func TestJobProgress_MarshalJSON(t *testing.T) { SubSyncState SubSyncState PrevCommitSeq int64 CommitSeq int64 + TableMapping map[int64]int64 TransactionId int64 TableCommitSeqMap map[int64]int64 InMemoryData any PersistData string + TableAliases map[string]string } tests := []struct { name string fields fields - want []byte + want string wantErr bool }{ { @@ -45,8 +61,27 @@ func TestJobProgress_MarshalJSON(t *testing.T) { TableCommitSeqMap: map[int64]int64{1: 2}, InMemoryData: nil, PersistData: "test-data", + TableAliases: map[string]string{"table": "alias"}, }, - want: []byte(`{"job_name":"test-job","sync_state":500,"sub_sync_state":{"state":0,"binlog_type":-1},"prev_commit_seq":0,"commit_seq":1,"table_commit_seq_map":{"1":2},"data":"test-data"}`), + want: `{ + "job_name": "test-job", + "sync_state": 500, + "sub_sync_state": { + "state": 0, + "binlog_type": -1 + }, + "job_sync_id":0, + "prev_commit_seq": 0, + "commit_seq": 1, + "table_mapping": null, + "table_commit_seq_map": { + "1": 2 + }, + "data": "test-data", + "table_aliases": { + "table": "alias" + } +}`, wantErr: false, }, } @@ -62,13 +97,14 @@ func TestJobProgress_MarshalJSON(t *testing.T) { TableCommitSeqMap: tt.fields.TableCommitSeqMap, InMemoryData: tt.fields.InMemoryData, PersistData: tt.fields.PersistData, + TableAliases: tt.fields.TableAliases, } got, err := json.Marshal(jp) if (err != nil) != tt.wantErr { t.Errorf("JobProgress.MarshalJSON() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { + if !deepEqual(string(got), tt.want) { t.Errorf("JobProgress.MarshalJSON() = %v, want %v", string(got), string(tt.want)) } }) diff --git a/pkg/ccr/job_test.go b/pkg/ccr/job_test.go deleted file mode 100644 index 3fddaa8e..00000000 --- a/pkg/ccr/job_test.go +++ /dev/null @@ -1,1357 +0,0 @@ -package ccr - -import ( - "context" - "encoding/json" - "fmt" - "testing" - - "github.com/selectdb/ccr_syncer/pkg/ccr/base" - "github.com/selectdb/ccr_syncer/pkg/ccr/record" - rpc "github.com/selectdb/ccr_syncer/pkg/rpc" - bestruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice" - festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice" - "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" - ttypes "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" - "github.com/selectdb/ccr_syncer/pkg/test_util" - "github.com/selectdb/ccr_syncer/pkg/xerror" - "github.com/tidwall/btree" - "go.uber.org/mock/gomock" -) - -const ( - backendBaseId = int64(0xdeadbeef * 10) - tableBaseId = int64(23330) - dbBaseId = int64(114514) -) - -var ( - dbSrcSpec base.Spec - dbDestSpec base.Spec - tblSrcSpec base.Spec - tblDestSpec base.Spec -) - -func init() { - dbSrcSpec = base.Spec{ - Frontend: base.Frontend{ - Host: "localhost", - Port: "9030", - ThriftPort: "9020", - }, - User: "root", - Password: "", - Database: "src_db_case", - DbId: dbBaseId, - Table: "", - } - dbDestSpec = base.Spec{ - Frontend: base.Frontend{ - Host: "localhost", - Port: "9030", - ThriftPort: "9020", - }, - User: "root", - Password: "", - Database: "dest_db_case", - DbId: dbBaseId, - Table: "", - } - tblSrcSpec = base.Spec{ - Frontend: base.Frontend{ - Host: "localhost", - Port: "9030", - ThriftPort: "9020", - }, - User: "root", - Password: "", - Database: "src_tbl_case", - DbId: dbBaseId, - Table: fmt.Sprint(tableBaseId), - TableId: tableBaseId, - } - tblDestSpec = base.Spec{ - Frontend: base.Frontend{ - Host: "localhost", - Port: "9030", - ThriftPort: "9020", - }, - User: "root", - Password: "", - Database: "dest_tbl_case", - DbId: dbBaseId, - Table: fmt.Sprint(tableBaseId), - TableId: tableBaseId, - } -} - -func getPartitionBaseId(tableId int64) int64 { - return tableId * 10 -} - -func getIndexBaseId(partitionId int64) int64 { - return partitionId * 10 -} - -func getTabletBaseId(indexId int64) int64 { - return indexId * 10 -} - -func getReplicaBaseId(indexId int64) int64 { - return indexId * 100 -} - -type BinlogImpl struct { - CommitSeq int64 - Timestamp int64 - Type festruct.TBinlogType - DbId int64 -} - -func newTestBinlog(binlogType festruct.TBinlogType, tableIds []int64) *festruct.TBinlog { - binlogImpl := BinlogImpl{ - CommitSeq: 114, - Timestamp: 514, - Type: binlogType, - DbId: 114514, - } - binlog := &festruct.TBinlog{ - CommitSeq: &binlogImpl.CommitSeq, - Timestamp: &binlogImpl.Timestamp, - Type: &binlogImpl.Type, - DbId: &binlogImpl.DbId, - TableIds: tableIds, - } - - return binlog -} - -func newMeta(spec *base.Spec, backends *map[int64]*base.Backend) *DatabaseMeta { - var tableIds []int64 - if spec.Table == "" { - tableIds = make([]int64, 0, 3) - for i := 0; i < 3; i++ { - tableIds = append(tableIds, tableBaseId+int64(i)) - } - } else { - tableIds = make([]int64, 0, 1) - tableIds = append(tableIds, spec.TableId) - } - - dbMeta := newDatabaseMeta(spec.DbId) - for _, tableId := range tableIds { - tblMeta := newTableMeta(tableId) - tblMeta.DatabaseMeta = dbMeta - - partitionId := getPartitionBaseId(tableId) - partitionMeta := newPartitionMeta(partitionId) - partitionMeta.TableMeta = tblMeta - tblMeta.PartitionIdMap[partitionId] = partitionMeta - tblMeta.PartitionRangeMap[fmt.Sprint(partitionId)] = partitionMeta - - indexId := getIndexBaseId(partitionId) - indexMeta := newIndexMeta(indexId) - indexMeta.PartitionMeta = partitionMeta - partitionMeta.IndexIdMap[indexId] = indexMeta - partitionMeta.IndexNameMap[indexMeta.Name] = indexMeta - - tabletId := getTabletBaseId(indexId) - tabletMeta := newTabletMeta(tabletId) - tabletMeta.IndexMeta = indexMeta - tabletMeta.ReplicaMetas = indexMeta.ReplicaMetas - indexMeta.TabletMetas.Set(tabletId, tabletMeta) - - replicaBaseId := getReplicaBaseId(indexId) - backendNum := len(*backends) - backendIds := make([]int64, 0, backendNum) - for backendId := range *backends { - backendIds = append(backendIds, backendId) - } - for i := 0; i < backendNum; i++ { - replicaId := replicaBaseId + int64(i) - replicaMeta := newReplicaMeta(replicaId) - replicaMeta.TabletMeta = tabletMeta - replicaMeta.TabletId = tabletId - replicaMeta.BackendId = backendIds[replicaId%int64(backendNum)] - indexMeta.ReplicaMetas.Set(replicaId, replicaMeta) - } - dbMeta.Tables[tableId] = tblMeta - } - - return dbMeta -} - -func newDatabaseMeta(dbId int64) *DatabaseMeta { - return &DatabaseMeta{ - Id: dbId, - Tables: make(map[int64]*TableMeta), - } -} - -func newTableMeta(tableId int64) *TableMeta { - return &TableMeta{ - Id: tableId, - Name: fmt.Sprint(tableId), - PartitionIdMap: make(map[int64]*PartitionMeta), - PartitionRangeMap: make(map[string]*PartitionMeta), - } -} - -func newPartitionMeta(partitionId int64) *PartitionMeta { - return &PartitionMeta{ - Id: partitionId, - Name: fmt.Sprint(partitionId), - Key: fmt.Sprint(partitionId), - Range: fmt.Sprint(partitionId), - IndexIdMap: make(map[int64]*IndexMeta), - IndexNameMap: make(map[string]*IndexMeta), - } -} - -func newIndexMeta(indexId int64) *IndexMeta { - return &IndexMeta{ - Id: indexId, - Name: fmt.Sprint(indexId), - TabletMetas: btree.NewMap[int64, *TabletMeta](degree), - ReplicaMetas: btree.NewMap[int64, *ReplicaMeta](degree), - } -} - -func newTabletMeta(tabletId int64) *TabletMeta { - return &TabletMeta{ - Id: tabletId, - } -} - -func newReplicaMeta(replicaId int64) *ReplicaMeta { - return &ReplicaMeta{ - Id: replicaId, - } -} - -func newBackendMap(backendNum int) map[int64]*base.Backend { - backendMap := make(map[int64]*base.Backend) - for i := 0; i < backendNum; i++ { - backendId := backendBaseId + int64(i) - backendMap[backendId] = &base.Backend{ - Id: backendId, - Host: "localhost", - HeartbeatPort: 0xbeef, - BePort: 0xbeef, - HttpPort: 0xbeef, - BrpcPort: 0xbeef, - } - } - - return backendMap -} - -type UpsertContext struct { - context.Context - CommitSeq int64 - DbId int64 - TableId int64 - TxnId int64 - Version int64 - PartitionId int64 - IndexId int64 - TabletId int64 -} - -func newUpsertData(ctx context.Context) (string, error) { - upsertContext, ok := ctx.(*UpsertContext) - if !ok { - return "", xerror.Errorf(xerror.Normal, "invalid context type: %T", ctx) - } - - dataMap := make(map[string]interface{}) - dataMap["commitSeq"] = upsertContext.CommitSeq - dataMap["txnId"] = upsertContext.TxnId - dataMap["timeStamp"] = 514 - dataMap["label"] = "insert_cca56f22e3624ab2_90b6b4ac06b44360" - dataMap["dbId"] = upsertContext.DbId - tableMap := make(map[string]interface{}) - dataMap["tableRecords"] = tableMap - - recordMap := make(map[string]interface{}) - - partitionRecords := make([]map[string]interface{}, 0, 1) - partitionRecord := make(map[string]interface{}) - partitionRecord["partitionId"] = upsertContext.PartitionId - partitionRecord["range"] = fmt.Sprint(upsertContext.PartitionId) - partitionRecord["version"] = upsertContext.Version - partitionRecords = append(partitionRecords, partitionRecord) - recordMap["partitionRecords"] = partitionRecords - - indexRecords := make([]int64, 0, 1) - indexRecords = append(indexRecords, upsertContext.IndexId) - recordMap["indexIds"] = indexRecords - - tableMap[fmt.Sprint(upsertContext.TableId)] = recordMap - - if data, err := json.Marshal(dataMap); err != nil { - return "", err - } else { - return string(data), nil - } -} - -type inMemoryData struct { - CommitSeq int64 `json:"commit_seq"` - TxnId int64 `json:"txn_id"` - DestTableIds []int64 `json:"dest_table_ids"` - TableRecords []*record.TableRecord `json:"table_records"` - CommitInfos []*ttypes.TTabletCommitInfo `json:"commit_infos"` -} - -func upateInMemory(jobProgress *JobProgress) error { - persistData := jobProgress.PersistData - inMemoryData := &inMemoryData{} - if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil { - return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData) - } - jobProgress.InMemoryData = inMemoryData - return nil -} - -func TestHandleUpsertInTableSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init test data - txnId := int64(114514233) - commitSeq := int64(114233514) - version := int64(233114514) - srcPartitionId := getPartitionBaseId(tblSrcSpec.TableId) - srcIndexId := getIndexBaseId(srcPartitionId) - srcTabletId := getTabletBaseId(srcIndexId) - destPartitionId := getPartitionBaseId(tblSrcSpec.TableId) - destIndexId := getIndexBaseId(destPartitionId) - destTabletId := getTabletBaseId(destIndexId) - - backendMap := newBackendMap(3) - srcMeta := newMeta(&tblSrcSpec, &backendMap) - destMeta := newMeta(&tblDestSpec, &backendMap) - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn( - func(_ string, progressJson string) error { - var jobProgress JobProgress - if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil { - t.Error(err) - } - if err := upateInMemory(&jobProgress); err != nil { - t.Error(err) - } - - inMemoryData := jobProgress.InMemoryData.(*inMemoryData) - if inMemoryData.TxnId != txnId { - t.Errorf("txnId missmatch: expect %d, but get %d", txnId, inMemoryData.TxnId) - } - return nil - }) - - db.EXPECT().UpdateProgress("Test", gomock.Any()).Return(nil).Times(2) - - // init factory - rpcFactory := NewMockIRpcFactory(ctrl) - metaFactory := NewMockMetaerFactory(ctrl) - factory := NewFactory(rpcFactory, metaFactory, base.NewSpecerFactory()) - - // init rpcFactory - rpcFactory.EXPECT().NewFeRpc(&tblDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) { - mockFeRpc := NewMockIFeRpc(ctrl) - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tblDestSpec.TableId) - mockFeRpc.EXPECT().BeginTransaction(&tblDestSpec, gomock.Any(), tableIds).Return( - &festruct.TBeginTxnResult_{ - Status: &status.TStatus{ - StatusCode: status.TStatusCode_OK, - ErrorMsgs: nil, - }, - TxnId: &txnId, - JobStatus: nil, - DbId: &tblDestSpec.DbId, - }, nil) - return mockFeRpc, nil - }) - rpcFactory.EXPECT().NewFeRpc(&tblDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) { - mockFeRpc := NewMockIFeRpc(ctrl) - mockFeRpc.EXPECT().CommitTransaction(&tblDestSpec, txnId, gomock.Any()).Return( - &festruct.TCommitTxnResult_{ - Status: &status.TStatus{ - StatusCode: status.TStatusCode_OK, - ErrorMsgs: nil, - }, - }, nil) - return mockFeRpc, nil - }) - rpcFactory.EXPECT().NewBeRpc(gomock.Any()).DoAndReturn(func(_ *base.Backend) (rpc.IBeRpc, error) { - mockBeRpc := NewMockIBeRpc(ctrl) - mockBeRpc.EXPECT().IngestBinlog(gomock.Any()).DoAndReturn( - func(req *bestruct.TIngestBinlogRequest) (*bestruct.TIngestBinlogResult_, error) { - if req.GetTxnId() != txnId { - t.Errorf("txnId is mismatch: %d, need %d", req.GetTxnId(), txnId) - } else if req.GetRemoteTabletId() != srcTabletId { - t.Errorf("remote tabletId mismatch: %d, need %d", req.GetRemoteTabletId(), srcTabletId) - } else if req.GetBinlogVersion() != version { - t.Errorf("version mismatch: %d, need %d", req.GetBinlogVersion(), version) - } else if req.GetRemoteHost() != "localhost" { - t.Errorf("remote host mismatch: %s, need localhost", req.GetRemoteHost()) - } else if req.GetPartitionId() != destPartitionId { - t.Errorf("partitionId mismatch: %d, need %d", req.GetPartitionId(), destPartitionId) - } else if req.GetLocalTabletId() != destTabletId { - t.Errorf("local tabletId mismatch: %d, need %d", req.GetLocalTabletId(), destTabletId) - } - - return &bestruct.TIngestBinlogResult_{ - Status: &status.TStatus{ - StatusCode: status.TStatusCode_OK, - ErrorMsgs: nil, - }, - }, nil - }) - - return mockBeRpc, nil - }).Times(3) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - - mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil) - mockMeta.EXPECT().GetPartitionRangeMap(tblSrcSpec.TableId).DoAndReturn( - func(tableId int64) (map[string]*PartitionMeta, error) { - return srcMeta.Tables[tableId].PartitionRangeMap, nil - }) - mockMeta.EXPECT().GetIndexIdMap(tblSrcSpec.TableId, srcPartitionId).DoAndReturn( - func(tableId int64, partitionId int64) (map[int64]*IndexMeta, error) { - return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap, nil - }) - mockMeta.EXPECT().GetTablets(tblSrcSpec.TableId, srcPartitionId, srcIndexId).DoAndReturn( - func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) { - return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil - }) - - return mockMeta - }) - metaFactory.EXPECT().NewMeta(&tblDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil) - mockMeta.EXPECT().GetPartitionRangeMap(tblDestSpec.TableId).DoAndReturn( - func(tableId int64) (map[string]*PartitionMeta, error) { - return destMeta.Tables[tableId].PartitionRangeMap, nil - }) - mockMeta.EXPECT().GetPartitionIdByRange(tblDestSpec.TableId, fmt.Sprint(destPartitionId)).DoAndReturn( - func(tableId int64, partitionRange string) (int64, error) { - return destMeta.Tables[tableId].PartitionRangeMap[partitionRange].Id, nil - }) - mockMeta.EXPECT().GetIndexNameMap(tblDestSpec.TableId, destPartitionId).DoAndReturn( - func(tableId int64, partitionId int64) (map[string]*IndexMeta, error) { - return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexNameMap, nil - }) - mockMeta.EXPECT().GetTablets(tblDestSpec.TableId, destPartitionId, destIndexId).DoAndReturn( - func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) { - return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil - }) - return mockMeta - }) - - // init job - ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - job.progress.SyncState = TableIncrementalSync - job.progress.SubSyncState = Done - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tblSrcSpec.TableId) - binlog := newTestBinlog(festruct.TBinlogType_UPSERT, tableIds) - upsertContext := &UpsertContext{ - Context: context.Background(), - CommitSeq: commitSeq, - DbId: tblSrcSpec.DbId, - TableId: tblSrcSpec.TableId, - TxnId: txnId, - Version: version, - PartitionId: srcPartitionId, - IndexId: srcIndexId, - TabletId: srcTabletId, - } - if data, err := newUpsertData(upsertContext); err != nil { - t.Error(err) - } else { - binlog.SetData(&data) - } - - if err := job.handleUpsert(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleUpsertInDbSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init test data - txnId := int64(114514233) - commitSeq := int64(114233514) - version := int64(233114514) - srcPartitionId := getPartitionBaseId(tableBaseId) - srcIndexId := getIndexBaseId(srcPartitionId) - srcTabletId := getTabletBaseId(srcIndexId) - destPartitionId := getPartitionBaseId(tableBaseId) - destIndexId := getIndexBaseId(destPartitionId) - destTabletId := getTabletBaseId(destIndexId) - - backendMap := newBackendMap(3) - srcMeta := newMeta(&dbSrcSpec, &backendMap) - destMeta := newMeta(&dbDestSpec, &backendMap) - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn( - func(_ string, progressJson string) error { - var jobProgress JobProgress - if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil { - t.Error(err) - } - if err := upateInMemory(&jobProgress); err != nil { - t.Error(err) - } - - inMemoryData := jobProgress.InMemoryData.(*inMemoryData) - if inMemoryData.TxnId != txnId { - t.Errorf("txnId missmatch: expect %d, but get %d", txnId, inMemoryData.TxnId) - } - return nil - }) - - db.EXPECT().UpdateProgress("Test", gomock.Any()).Return(nil).Times(2) - - // init factory - rpcFactory := NewMockIRpcFactory(ctrl) - metaFactory := NewMockMetaerFactory(ctrl) - factory := NewFactory(rpcFactory, metaFactory, base.NewSpecerFactory()) - - // init rpcFactory - rpcFactory.EXPECT().NewFeRpc(&dbDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) { - mockFeRpc := NewMockIFeRpc(ctrl) - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tableBaseId) - mockFeRpc.EXPECT().BeginTransaction(&dbDestSpec, gomock.Any(), tableIds).Return( - &festruct.TBeginTxnResult_{ - Status: &status.TStatus{ - StatusCode: status.TStatusCode_OK, - ErrorMsgs: nil, - }, - TxnId: &txnId, - JobStatus: nil, - DbId: &dbDestSpec.DbId, - }, nil) - return mockFeRpc, nil - }) - rpcFactory.EXPECT().NewFeRpc(&dbDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) { - mockFeRpc := NewMockIFeRpc(ctrl) - mockFeRpc.EXPECT().CommitTransaction(&dbDestSpec, txnId, gomock.Any()).Return( - &festruct.TCommitTxnResult_{ - Status: &status.TStatus{ - StatusCode: status.TStatusCode_OK, - ErrorMsgs: nil, - }, - }, nil) - return mockFeRpc, nil - }) - rpcFactory.EXPECT().NewBeRpc(gomock.Any()).DoAndReturn(func(_ *base.Backend) (rpc.IBeRpc, error) { - mockBeRpc := NewMockIBeRpc(ctrl) - mockBeRpc.EXPECT().IngestBinlog(gomock.Any()).DoAndReturn( - func(req *bestruct.TIngestBinlogRequest) (*bestruct.TIngestBinlogResult_, error) { - if req.GetTxnId() != txnId { - t.Errorf("txnId is mismatch: %d, need %d", req.GetTxnId(), txnId) - } else if req.GetRemoteTabletId() != srcTabletId { - t.Errorf("remote tabletId mismatch: %d, need %d", req.GetRemoteTabletId(), srcTabletId) - } else if req.GetBinlogVersion() != version { - t.Errorf("version mismatch: %d, need %d", req.GetBinlogVersion(), version) - } else if req.GetRemoteHost() != "localhost" { - t.Errorf("remote host mismatch: %s, need localhost", req.GetRemoteHost()) - } else if req.GetPartitionId() != destPartitionId { - t.Errorf("partitionId mismatch: %d, need %d", req.GetPartitionId(), destPartitionId) - } else if req.GetLocalTabletId() != destTabletId { - t.Errorf("local tabletId mismatch: %d, need %d", req.GetLocalTabletId(), destTabletId) - } - - return &bestruct.TIngestBinlogResult_{ - Status: &status.TStatus{ - StatusCode: status.TStatusCode_OK, - ErrorMsgs: nil, - }, - }, nil - }) - - return mockBeRpc, nil - }).Times(3) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - - mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil) - mockMeta.EXPECT().GetTableNameById(tableBaseId).Return(fmt.Sprint(tableBaseId), nil) - mockMeta.EXPECT().GetPartitionRangeMap(tableBaseId).DoAndReturn( - func(tableId int64) (map[string]*PartitionMeta, error) { - return srcMeta.Tables[tableId].PartitionRangeMap, nil - }) - mockMeta.EXPECT().GetIndexIdMap(tableBaseId, srcPartitionId).DoAndReturn( - func(tableId int64, partitionId int64) (map[int64]*IndexMeta, error) { - return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap, nil - }) - mockMeta.EXPECT().GetTablets(tableBaseId, srcPartitionId, srcIndexId).DoAndReturn( - func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) { - return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil - }) - - return mockMeta - }) - metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil) - mockMeta.EXPECT().GetTableId(fmt.Sprint(tableBaseId)).Return(tableBaseId, nil) - mockMeta.EXPECT().GetPartitionRangeMap(tblDestSpec.TableId).DoAndReturn( - func(tableId int64) (map[string]*PartitionMeta, error) { - return destMeta.Tables[tableId].PartitionRangeMap, nil - }) - mockMeta.EXPECT().GetPartitionIdByRange(tblDestSpec.TableId, fmt.Sprint(destPartitionId)).DoAndReturn( - func(tableId int64, partitionRange string) (int64, error) { - return destMeta.Tables[tableId].PartitionRangeMap[partitionRange].Id, nil - }) - mockMeta.EXPECT().GetIndexNameMap(tblDestSpec.TableId, destPartitionId).DoAndReturn( - func(tableId int64, partitionId int64) (map[string]*IndexMeta, error) { - return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexNameMap, nil - }) - mockMeta.EXPECT().GetTablets(tblDestSpec.TableId, destPartitionId, destIndexId).DoAndReturn( - func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) { - return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil - }) - return mockMeta - }) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - job.progress.SyncState = DBIncrementalSync - job.progress.SubSyncState = Done - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tableBaseId) - binlog := newTestBinlog(festruct.TBinlogType_UPSERT, tableIds) - upsertContext := &UpsertContext{ - Context: context.Background(), - CommitSeq: commitSeq, - DbId: dbSrcSpec.DbId, - TableId: tableBaseId, - TxnId: txnId, - Version: version, - PartitionId: srcPartitionId, - IndexId: srcIndexId, - TabletId: srcTabletId, - } - if data, err := newUpsertData(upsertContext); err != nil { - t.Error(err) - } else { - binlog.SetData(&data) - } - - if err := job.handleUpsert(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleAddPartitionInTableSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := "ADD PARTITION `zero_to_five` VALUES [(\"0\"), (\"5\"))(\"version_info\" \u003d \"1\");" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), iSpecFactory) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&tblDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", tblDestSpec.Database, tblDestSpec.Table, testSql) - mockISpec.EXPECT().Exec(fullSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tblSrcSpec.TableId) - binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds) - dataMap := make(map[string]interface{}) - dataMap["tableId"] = tblSrcSpec.TableId - dataMap["sql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleAddPartition(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleAddPartitionInDbSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := "ADD PARTITION `zero_to_five` VALUES [(\"0\"), (\"5\"))(\"version_info\" \u003d \"1\");" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - metaFactory := NewMockMetaerFactory(ctrl) - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&dbSrcSpec).Return(NewMockMetaer(ctrl)) - metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetTableNameById(tableBaseId).Return(fmt.Sprint(tableBaseId), nil) - return mockMeta - }) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", dbDestSpec.Database, fmt.Sprint(tableBaseId), testSql) - mockISpec.EXPECT().Exec(fullSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tableBaseId) - binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds) - dataMap := make(map[string]interface{}) - dataMap["tableId"] = tableBaseId - dataMap["sql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleAddPartition(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleDropPartitionInTableSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := "DROP PARTITION `zero_to_five`" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), iSpecFactory) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&tblDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", tblDestSpec.Database, tblDestSpec.Table, testSql) - mockISpec.EXPECT().Exec(fullSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tblSrcSpec.TableId) - binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds) - dataMap := make(map[string]interface{}) - dataMap["tableId"] = tblSrcSpec.TableId - dataMap["sql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleDropPartition(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleDropPartitionInDbSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := "DROP PARTITION `zero_to_five`" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - metaFactory := NewMockMetaerFactory(ctrl) - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&dbSrcSpec).Return(NewMockMetaer(ctrl)) - metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetTableNameById(tableBaseId).Return(fmt.Sprint(tableBaseId), nil) - return mockMeta - }) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", dbDestSpec.Database, fmt.Sprint(tableBaseId), testSql) - mockISpec.EXPECT().Exec(fullSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tableBaseId) - binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds) - dataMap := make(map[string]interface{}) - dataMap["tableId"] = tableBaseId - dataMap["sql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleAddPartition(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleCreateTable(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := "A CREATE TABLE SQL" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - metaFactory := NewMockMetaerFactory(ctrl) - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil) - return mockMeta - }) - metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil) - return mockMeta - }) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().DbExec(testSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tableBaseId) - binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds) - dataMap := make(map[string]interface{}) - dataMap["dbId"] = dbSrcSpec.DbId - dataMap["tableId"] = tableBaseId - dataMap["sql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleCreateTable(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleDropTable(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := "A DROP TABLE SQL" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - metaFactory := NewMockMetaerFactory(ctrl) - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil) - return mockMeta - }) - metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil) - return mockMeta - }) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - dropSql := fmt.Sprintf("DROP TABLE %v FORCE", tableBaseId) - mockISpec.EXPECT().DbExec(dropSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - - // init binlog - tableIds := make([]int64, 0, 1) - tableIds = append(tableIds, tableBaseId) - binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds) - dataMap := make(map[string]interface{}) - dataMap["dbId"] = dbSrcSpec.DbId - dataMap["tableId"] = tableBaseId - dataMap["tableName"] = fmt.Sprint(tableBaseId) - dataMap["rawSql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleDropTable(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleDummyInTableSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - dummyCommitSeq := int64(114514) - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn( - func(_ string, progressJson string) error { - var jobProgress JobProgress - if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil { - t.Error(err) - } else if jobProgress.CommitSeq != dummyCommitSeq { - t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, dummyCommitSeq) - } else if jobProgress.SyncState != TableFullSync { - t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, TableFullSync) - } else if jobProgress.SubSyncState != BeginCreateSnapshot { - t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot) - } - return nil - }) - - // init factory - factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), base.NewSpecerFactory()) - - // init job - ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - - // init binlog - binlog := newTestBinlog(festruct.TBinlogType_DUMMY, nil) - binlog.SetCommitSeq(&dummyCommitSeq) - - // test begin - if err := job.handleDummy(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleDummyInDbSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - dummyCommitSeq := int64(114514) - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn( - func(_ string, progressJson string) error { - var jobProgress JobProgress - if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil { - t.Error(err) - } else if jobProgress.CommitSeq != dummyCommitSeq { - t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, dummyCommitSeq) - } else if jobProgress.SyncState != DBFullSync { - t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, DBFullSync) - } else if jobProgress.SubSyncState != BeginCreateSnapshot { - t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot) - } - return nil - }) - - // init factory - factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), base.NewSpecerFactory()) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - - // init binlog - binlog := newTestBinlog(festruct.TBinlogType_DUMMY, nil) - binlog.SetCommitSeq(&dummyCommitSeq) - - // test begin - if err := job.handleDummy(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleAlterJobInTableSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - commitSeq := int64(233114514) - alterType := "UNUSED_TYPE" - jobId := int64(114514233) - jobState := "FINISHED" - rawSql := "A blank SQL" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn( - func(_ string, progressJson string) error { - var jobProgress JobProgress - if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil { - t.Error(err) - } else if jobProgress.CommitSeq != commitSeq { - t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, commitSeq) - } else if jobProgress.SyncState != TableFullSync { - t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, TableFullSync) - } else if jobProgress.SubSyncState != BeginCreateSnapshot { - t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot) - } - return nil - }) - - // init factory - metaFactory := NewMockMetaerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), metaFactory, base.NewSpecerFactory()) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&tblSrcSpec).Return(NewMockMetaer(ctrl)) - metaFactory.EXPECT().NewMeta(&tblDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - dropSql := fmt.Sprintf("DROP TABLE %s FORCE", tblDestSpec.Table) - mockMeta.EXPECT().DbExec(dropSql).Return(nil) - return mockMeta - }) - - // init job - ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - job.progress.CommitSeq = commitSeq - - // init binlog - binlog := newTestBinlog(festruct.TBinlogType_ALTER_JOB, nil) - dataMap := make(map[string]interface{}) - dataMap["type"] = alterType - dataMap["dbId"] = tblSrcSpec.DbId - dataMap["tableId"] = tblSrcSpec.TableId - dataMap["tableName"] = fmt.Sprint(tblSrcSpec.Table) - dataMap["jobId"] = jobId - dataMap["jobState"] = jobState - dataMap["rawSql"] = rawSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleAlterJob(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleAlterJobInDbSync(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - commitSeq := int64(233114514) - alterType := "UNUSED_TYPE" - jobId := int64(114514233) - jobState := "FINISHED" - rawSql := "A blank SQL" - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn( - func(_ string, progressJson string) error { - var jobProgress JobProgress - if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil { - t.Error(err) - } else if jobProgress.CommitSeq != commitSeq { - t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, commitSeq) - } else if jobProgress.SyncState != DBFullSync { - t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, DBFullSync) - } else if jobProgress.SubSyncState != BeginCreateSnapshot { - t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot) - } - return nil - }) - - // init factory - metaFactory := NewMockMetaerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), metaFactory, base.NewSpecerFactory()) - - // init metaFactory - metaFactory.EXPECT().NewMeta(&dbSrcSpec).Return(NewMockMetaer(ctrl)) - metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer { - mockMeta := NewMockMetaer(ctrl) - dropSql := fmt.Sprintf("DROP TABLE %s FORCE", fmt.Sprint(tableBaseId)) - mockMeta.EXPECT().DbExec(dropSql).Return(nil) - return mockMeta - }) - - // init job - ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - job.progress.CommitSeq = commitSeq - - // init binlog - binlog := newTestBinlog(festruct.TBinlogType_ALTER_JOB, nil) - dataMap := make(map[string]interface{}) - dataMap["type"] = alterType - dataMap["dbId"] = dbSrcSpec.DbId - dataMap["tableId"] = tableBaseId - dataMap["tableName"] = fmt.Sprint(tableBaseId) - dataMap["jobId"] = jobId - dataMap["jobState"] = jobState - dataMap["rawSql"] = rawSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleAlterJob(binlog); err != nil { - t.Error(err) - } -} - -func TestHandleLightningSchemaChange(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // init data - testSql := fmt.Sprintf("`default_cluster:%s`.`%s` a test sql", tblSrcSpec.Database, tblSrcSpec.Table) - - // init db_mock - db := test_util.NewMockDB(ctrl) - db.EXPECT().IsJobExist("Test").Return(false, nil) - - // init factory - iSpecFactory := NewMockSpecerFactory(ctrl) - factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), iSpecFactory) - - // init iSpecFactory - iSpecFactory.EXPECT().NewSpecer(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - iSpecFactory.EXPECT().NewSpecer(&tblDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer { - mockISpec := NewMockSpecer(ctrl) - execSql := fmt.Sprintf("`%s` a test sql", tblSrcSpec.Table) - mockISpec.EXPECT().DbExec(execSql).Return(nil) - mockISpec.EXPECT().Valid().Return(nil) - return mockISpec - }) - - // init job - ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory) - job, err := NewJobFromService("Test", ctx) - if err != nil { - t.Error(err) - } - job.progress = NewJobProgress("Test", job.SyncType, db) - - // init binlog - binlog := newTestBinlog(festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_COLUMNS, nil) - dataMap := make(map[string]interface{}) - dataMap["dbId"] = tblSrcSpec.DbId - dataMap["tableId"] = tblSrcSpec.TableId - dataMap["rawSql"] = testSql - if data, err := json.Marshal(dataMap); err != nil { - t.Error(err) - } else { - dataStr := string(data) - binlog.SetData(&dataStr) - } - - // test begin - if err := job.handleLightningSchemaChange(binlog); err != nil { - t.Error(err) - } -} diff --git a/pkg/ccr/label.go b/pkg/ccr/label.go new file mode 100644 index 00000000..43a1c4a7 --- /dev/null +++ b/pkg/ccr/label.go @@ -0,0 +1,33 @@ +package ccr + +import ( + "fmt" + "time" +) + +// snapshot name format "ccrs_${ccr_name}_${sync_id}" +func NewSnapshotLabelPrefix(ccrName string, syncId int64) string { + return fmt.Sprintf("ccrs_%s_%d", ccrName, syncId) +} + +// snapshot name format "ccrp_${ccr_name}_${sync_id}" +func NewPartialSnapshotLabelPrefix(ccrName string, syncId int64) string { + return fmt.Sprintf("ccrp_%s_%d", ccrName, syncId) +} + +func NewLabelWithTs(prefix string) string { + return fmt.Sprintf("%s_%d", prefix, time.Now().Unix()) +} + +func NewRestoreLabel(snapshotName string) string { + if snapshotName == "" { + return "" + } + + // use current seconds + return fmt.Sprintf("%s_r_%d", snapshotName, time.Now().Unix()) +} + +func TableAlias(tableName string) string { + return fmt.Sprintf("__ccr_%s_%d", tableName, time.Now().Unix()) +} diff --git a/pkg/ccr/meta.go b/pkg/ccr/meta.go index f45cf72b..2f60883e 100644 --- a/pkg/ccr/meta.go +++ b/pkg/ccr/meta.go @@ -9,6 +9,7 @@ import ( "github.com/selectdb/ccr_syncer/pkg/ccr/base" "github.com/selectdb/ccr_syncer/pkg/rpc" + tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" utils "github.com/selectdb/ccr_syncer/pkg/utils" "github.com/selectdb/ccr_syncer/pkg/xerror" @@ -20,6 +21,9 @@ const ( degree = 128 showErrMsg = "show proc '/dbs/' failed" + + TABLE_TYPE_OLAP = "OLAP" + TABLE_TYPE_VIEW = "VIEW" ) // All Update* functions force to update meta from fe @@ -41,6 +45,19 @@ type Meta struct { BackendHostPort2IdMap map[string]int64 } +func NewMeta(spec *base.Spec) *Meta { + return &Meta{ + Spec: spec, + DatabaseMeta: DatabaseMeta{ + Tables: make(map[int64]*TableMeta), + }, + Backends: make(map[int64]*base.Backend), + DatabaseName2IdMap: make(map[string]int64), + TableName2IdMap: make(map[string]int64), + BackendHostPort2IdMap: make(map[string]int64), + } +} + func (m *Meta) GetDbId() (int64, error) { dbName := m.Database @@ -53,9 +70,9 @@ func (m *Meta) GetDbId() (int64, error) { // +-------+------------------------------------+----------+----------+-------------+--------------------------+--------------+--------------+------------------+ // | DbId | DbName | TableNum | Size | Quota | LastConsistencyCheckTime | ReplicaCount | ReplicaQuota | TransactionQuota | // +-------+------------------------------------+----------+----------+-------------+--------------------------+--------------+--------------+------------------+ - // | 0 | default_cluster:information_schema | 24 | 0.000 | 1024.000 TB | NULL | 0 | 1073741824 | 100 | - // | 10002 | default_cluster:__internal_schema | 4 | 0.000 | 1024.000 TB | NULL | 28 | 1073741824 | 100 | - // | 10116 | default_cluster:ccr | 2 | 2.738 KB | 1024.000 TB | NULL | 27 | 1073741824 | 100 | + // | 0 | information_schema | 24 | 0.000 | 1024.000 TB | NULL | 0 | 1073741824 | 100 | + // | 10002 | __internal_schema | 4 | 0.000 | 1024.000 TB | NULL | 28 | 1073741824 | 100 | + // | 10116 | ccr | 2 | 2.738 KB | 1024.000 TB | NULL | 27 | 1073741824 | 100 | // +-------+------------------------------------+----------+----------+-------------+--------------------------+--------------+--------------+------------------+ db, err := m.Connect() if err != nil { @@ -84,7 +101,9 @@ func (m *Meta) GetDbId() (int64, error) { } // match parsedDbname == dbname, return dbId - if parsedDbName == dbFullName { + // the default_cluster prefix of db name will be removed in Doris v2.1. + // here we compare both db name and db full name to make it compatible. + if parsedDbName == dbName || parsedDbName == dbFullName { m.DatabaseName2IdMap[dbFullName] = dbId m.DatabaseMeta.Id = dbId return dbId, nil @@ -96,6 +115,7 @@ func (m *Meta) GetDbId() (int64, error) { } // not found + // ATTN: we don't treat db not found as xerror.Meta category. return 0, xerror.Errorf(xerror.Normal, "%s not found dbId", dbFullName) } @@ -105,6 +125,7 @@ func (m *Meta) GetFullTableName(tableName string) string { return fullTableName } +// Update table meta, return xerror.Meta category if no such table exists. func (m *Meta) UpdateTable(tableName string, tableId int64) (*TableMeta, error) { log.Infof("UpdateTable tableName: %s, tableId: %d", tableName, tableId) @@ -127,6 +148,7 @@ func (m *Meta) UpdateTable(tableName string, tableId int64) (*TableMeta, error) } query := fmt.Sprintf("show proc '/dbs/%d/'", dbId) + log.Infof("UpdateTable Sql: %s", query) rows, err := db.Query(query) if err != nil { return nil, xerror.Wrap(err, xerror.Normal, query) @@ -169,7 +191,7 @@ func (m *Meta) UpdateTable(tableName string, tableId int64) (*TableMeta, error) } // not found - return nil, xerror.Errorf(xerror.Normal, "tableId %v not found table", tableId) + return nil, xerror.Errorf(xerror.Meta, "tableName %s tableId %v not found table", tableName, tableId) } func (m *Meta) GetTable(tableId int64) (*TableMeta, error) { @@ -245,10 +267,6 @@ func (m *Meta) UpdatePartitions(tableId int64) error { if err != nil { return xerror.Wrapf(err, xerror.Normal, query) } - partitionKey, err := rowParser.GetString("PartitionKey") - if err != nil { - return xerror.Wrapf(err, xerror.Normal, query) - } partitionRange, err := rowParser.GetString("Range") if err != nil { return xerror.Wrapf(err, xerror.Normal, query) @@ -258,7 +276,6 @@ func (m *Meta) UpdatePartitions(tableId int64) error { TableMeta: table, Id: partitionId, Name: partitionName, - Key: partitionKey, Range: partitionRange, } partitions = append(partitions, partition) @@ -293,7 +310,7 @@ func (m *Meta) getPartitionsWithUpdate(tableId int64, depth int64) (map[int64]*P func (m *Meta) getPartitions(tableId int64, depth int64) (map[int64]*PartitionMeta, error) { if depth >= 3 { - return nil, fmt.Errorf("getPartitions depth >= 3") + return nil, xerror.Errorf(xerror.Normal, "getPartitions depth >= 3") } tableMeta, err := m.GetTable(tableId) @@ -307,10 +324,12 @@ func (m *Meta) getPartitions(tableId int64, depth int64) (map[int64]*PartitionMe return tableMeta.PartitionIdMap, nil } +// Get partition id map, return xerror.Meta category if no such table exists. func (m *Meta) GetPartitionIdMap(tableId int64) (map[int64]*PartitionMeta, error) { return m.getPartitions(tableId, 0) } +// Get partition range map, return xerror.Meta category if no such table exists. func (m *Meta) GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) { if _, err := m.GetPartitionIdMap(tableId); err != nil { return nil, err @@ -346,6 +365,7 @@ func (m *Meta) GetPartitionIds(tableName string) ([]int64, error) { return partitionIds, nil } +// Get partition range by name, return xerror.Meta category if no such table or partition exists. func (m *Meta) GetPartitionName(tableId int64, partitionId int64) (string, error) { partitions, err := m.GetPartitionIdMap(tableId) if err != nil { @@ -358,13 +378,14 @@ func (m *Meta) GetPartitionName(tableId int64, partitionId int64) (string, error return "", err } if partition, ok = partitions[partitionId]; !ok { - return "", xerror.Errorf(xerror.Normal, "partitionId %d not found", partitionId) + return "", xerror.Errorf(xerror.Meta, "partitionId %d not found", partitionId) } } return partition.Name, nil } +// Get partition range by id, return xerror.Meta category if no such table or partition exists. func (m *Meta) GetPartitionRange(tableId int64, partitionId int64) (string, error) { partitions, err := m.GetPartitionIdMap(tableId) if err != nil { @@ -377,13 +398,14 @@ func (m *Meta) GetPartitionRange(tableId int64, partitionId int64) (string, erro return "", err } if partition, ok = partitions[partitionId]; !ok { - return "", xerror.Errorf(xerror.Normal, "partitionId %d not found", partitionId) + return "", xerror.Errorf(xerror.Meta, "partitionId %d not found", partitionId) } } return partition.Range, nil } +// Get partition id by name, return xerror.Meta category if no such partition exists. func (m *Meta) GetPartitionIdByName(tableId int64, partitionName string) (int64, error) { // TODO: optimize performance partitions, err := m.GetPartitionIdMap(tableId) @@ -406,9 +428,10 @@ func (m *Meta) GetPartitionIdByName(tableId int64, partitionName string) (int64, } } - return 0, xerror.Errorf(xerror.Normal, "partition name %s not found", partitionName) + return 0, xerror.Errorf(xerror.Meta, "partition name %s not found", partitionName) } +// Get partition id by range, return xerror.Meta category if no such partition exists. func (m *Meta) GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) { // TODO: optimize performance partitions, err := m.GetPartitionIdMap(tableId) @@ -431,7 +454,7 @@ func (m *Meta) GetPartitionIdByRange(tableId int64, partitionRange string) (int6 } } - return 0, xerror.Errorf(xerror.Normal, "partition range %s not found", partitionRange) + return 0, xerror.Errorf(xerror.Meta, "partition range %s not found", partitionRange) } func (m *Meta) UpdateBackends() error { @@ -472,11 +495,6 @@ func (m *Meta) UpdateBackends() error { } var port int64 - port, err = rowParser.GetInt64("HeartbeatPort") - if err != nil { - return xerror.Wrapf(err, xerror.Normal, query) - } - backend.HeartbeatPort = uint16(port) port, err = rowParser.GetInt64("BePort") if err != nil { return xerror.Wrapf(err, xerror.Normal, query) @@ -497,6 +515,10 @@ func (m *Meta) UpdateBackends() error { backends = append(backends, &backend) } + if err := rows.Err(); err != nil { + return xerror.Wrap(err, xerror.Normal, query) + } + for _, backend := range backends { m.Backends[backend.Id] = backend @@ -507,11 +529,85 @@ func (m *Meta) UpdateBackends() error { return nil } +func (m *Meta) GetFrontends() ([]*base.Frontend, error) { + db, err := m.Connect() + if err != nil { + return nil, err + } + + query := "select Host, QueryPort, RpcPort, IsMaster from frontends();" + log.Debug(query) + rows, err := db.Query(query) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, query) + } + + frontends := make([]*base.Frontend, 0) + defer rows.Close() + for rows.Next() { + rowParser := utils.NewRowParser() + if err := rowParser.Parse(rows); err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, query) + } + + var fe base.Frontend + fe.Host, err = rowParser.GetString("Host") + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, query) + } + + fe.Port, err = rowParser.GetString("QueryPort") + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, query) + } + + fe.ThriftPort, err = rowParser.GetString("RpcPort") + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, query) + } + + fe.IsMaster, err = rowParser.GetBool("IsMaster") + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, query) + } + + frontends = append(frontends, &fe) + } + + if err := rows.Err(); err != nil { + return nil, xerror.Wrap(err, xerror.Normal, query) + } + + if len(m.HostMapping) != 0 { + for _, frontend := range frontends { + if host, ok := m.HostMapping[frontend.Host]; ok { + frontend.Host = host + } else { + return nil, xerror.Errorf(xerror.Normal, + "the public ip of host %s is not found, consider adding it via HTTP API /add_host_mapping", frontend.Host) + } + } + } + + return frontends, nil +} + func (m *Meta) GetBackends() ([]*base.Backend, error) { if len(m.Backends) > 0 { backends := make([]*base.Backend, 0, len(m.Backends)) for _, backend := range m.Backends { - backends = append(backends, backend) + backend := *backend // copy + backends = append(backends, &backend) + } + if len(m.HostMapping) != 0 { + for _, backend := range backends { + if host, ok := m.HostMapping[backend.Host]; ok { + backend.Host = host + } else { + return nil, xerror.Errorf(xerror.Normal, + "the public ip of host %s is not found, consider adding it via HTTP API /add_host_mapping", backend.Host) + } + } } return backends, nil } @@ -555,6 +651,7 @@ func (m *Meta) GetBackendId(host string, portStr string) (int64, error) { return 0, xerror.Errorf(xerror.Normal, "hostPort: %s not found", hostPort) } +// Update indexes by table and partition, return xerror.Meta category if no such table or partition exists. func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error { // TODO: Optimize performance // Step 1: get dbId @@ -577,7 +674,7 @@ func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error { partition, ok := partitions[partitionId] if !ok { - return xerror.Errorf(xerror.Normal, "partitionId: %d not found", partitionId) + return xerror.Errorf(xerror.Meta, "partitionId: %d not found", partitionId) } // mysql> show proc '/dbs/10116/10118/partitions/10117'; @@ -617,10 +714,12 @@ func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error { } log.Debugf("indexId: %d, indexName: %s", indexId, indexName) + isBaseIndex := table.Name == indexName // it might be staled, caused by rename table index := &IndexMeta{ PartitionMeta: partition, Id: indexId, Name: indexName, + IsBaseIndex: isBaseIndex, } indexes = append(indexes, index) } @@ -639,6 +738,7 @@ func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error { return nil } +// Get indexes by table and partition, return xerror.Meta if no such table or partition exists. func (m *Meta) getIndexes(tableId int64, partitionId int64, hasUpdate bool) (map[int64]*IndexMeta, error) { partitions, err := m.GetPartitionIdMap(tableId) if err != nil { @@ -648,7 +748,7 @@ func (m *Meta) getIndexes(tableId int64, partitionId int64, hasUpdate bool) (map partition, ok := partitions[partitionId] if !ok || len(partition.IndexIdMap) == 0 { if hasUpdate { - return nil, xerror.Errorf(xerror.Normal, "partitionId: %d not found", partitionId) + return nil, xerror.Errorf(xerror.Meta, "partitionId: %d not found", partitionId) } err = m.UpdateIndexes(tableId, partitionId) @@ -661,22 +761,27 @@ func (m *Meta) getIndexes(tableId int64, partitionId int64, hasUpdate bool) (map return partition.IndexIdMap, nil } +// Get indexes id map by table and partition, return xerror.Meta if no such table or partition exists. func (m *Meta) GetIndexIdMap(tableId int64, partitionId int64) (map[int64]*IndexMeta, error) { return m.getIndexes(tableId, partitionId, false) } -func (m *Meta) GetIndexNameMap(tableId int64, partitionId int64) (map[string]*IndexMeta, error) { +// Get indexes name map by table and partition, return xerror.Meta if no such table or partition exists. +func (m *Meta) GetIndexNameMap(tableId int64, partitionId int64) (map[string]*IndexMeta, *IndexMeta, error) { if _, err := m.getIndexes(tableId, partitionId, false); err != nil { - return nil, err + return nil, nil, err } partitions, err := m.GetPartitionIdMap(tableId) if err != nil { - return nil, err + return nil, nil, err } - partition := partitions[partitionId] - return partition.IndexNameMap, nil + if partition, ok := partitions[partitionId]; !ok { + return nil, nil, xerror.Errorf(xerror.Meta, "partition %d is not found", partitionId) + } else { + return partition.IndexNameMap, nil, nil + } } func (m *Meta) updateReplica(index *IndexMeta) error { @@ -756,6 +861,7 @@ func (m *Meta) updateReplica(index *IndexMeta) error { return nil } +// Update replicas by table and partition, return xerror.Meta category if no such table or partition exists. func (m *Meta) UpdateReplicas(tableId int64, partitionId int64) error { indexes, err := m.GetIndexIdMap(tableId, partitionId) if err != nil { @@ -763,7 +869,7 @@ func (m *Meta) UpdateReplicas(tableId int64, partitionId int64) error { } if len(indexes) == 0 { - return xerror.Errorf(xerror.Normal, "indexes is empty") + return xerror.Errorf(xerror.Meta, "indexes is empty") } // TODO: Update index as much as possible, record error @@ -776,6 +882,7 @@ func (m *Meta) UpdateReplicas(tableId int64, partitionId int64) error { return nil } +// Get replicas by table and partition, return xerror.Meta category if no such table or partition exists. func (m *Meta) GetReplicas(tableId int64, partitionId int64) (*btree.Map[int64, *ReplicaMeta], error) { indexes, err := m.GetIndexIdMap(tableId, partitionId) if err != nil { @@ -783,7 +890,7 @@ func (m *Meta) GetReplicas(tableId int64, partitionId int64) (*btree.Map[int64, } if len(indexes) == 0 { - return nil, xerror.Errorf(xerror.Normal, "indexes is empty") + return nil, xerror.Errorf(xerror.Meta, "indexes is empty") } // fast path, no rollup @@ -820,6 +927,7 @@ func (m *Meta) GetReplicas(tableId int64, partitionId int64) (*btree.Map[int64, return replicas, nil } +// Get tablets by table, partition and index, return xerror.Meta category if no such table, partition or index exists. func (m *Meta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) { _, err := m.GetReplicas(tableId, partitionId) if err != nil { @@ -834,7 +942,7 @@ func (m *Meta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64 if tablets, ok := indexes[indexId]; ok { return tablets.TabletMetas, nil } else { - return nil, xerror.Errorf(xerror.Normal, "index %d not found", indexId) + return nil, xerror.Errorf(xerror.Meta, "index %d not found", indexId) } } @@ -846,10 +954,12 @@ func (m *Meta) UpdateToken(rpcFactory rpc.IRpcFactory) error { return err } - if token, err := rpc.GetMasterToken(spec); err != nil { + if resp, err := rpc.GetMasterToken(spec); err != nil { return err + } else if resp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK { + return xerror.Errorf(xerror.Meta, "get master token failed, status: %s", resp.GetStatus().String()) } else { - m.token = token + m.token = resp.GetToken() return nil } } @@ -879,7 +989,6 @@ func (m *Meta) GetTableNameById(tableId int64) (string, error) { return "", err } - var tableName string sql := fmt.Sprintf("show table %d", tableId) rows, err := db.Query(sql) if err != nil { @@ -887,15 +996,18 @@ func (m *Meta) GetTableNameById(tableId int64) (string, error) { } defer rows.Close() + var tableName string for rows.Next() { rowParser := utils.NewRowParser() if err := rowParser.Parse(rows); err != nil { return "", xerror.Wrapf(err, xerror.Normal, sql) } + tableName, err = rowParser.GetString("TableName") if err != nil { return "", xerror.Wrap(err, xerror.Normal, sql) } + log.Debugf("found table %d name %s", tableId, tableName) } if err := rows.Err(); err != nil { @@ -948,10 +1060,20 @@ func (m *Meta) GetTables() (map[int64]*TableMeta, error) { if err != nil { return nil, xerror.Wrapf(err, xerror.Normal, query) } + tableType, err := rowParser.GetString("Type") + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "get tables Type failed, query: %s", query) + } - // match parsedDbname == dbname, return dbId fullTableName := m.GetFullTableName(tableName) - log.Debugf("found table:%s, tableId:%d", fullTableName, tableId) + log.Debugf("found table: %s, id: %d, type: %s", fullTableName, tableId, tableType) + + if tableType != TABLE_TYPE_OLAP && tableType != TABLE_TYPE_VIEW { + // See fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java:backup() for details + continue + } + + // match parsedDbname == dbname, return dbId tableName2IdMap[fullTableName] = tableId tables[tableId] = &TableMeta{ DatabaseMeta: &m.DatabaseMeta, @@ -975,7 +1097,7 @@ func (m *Meta) CheckBinlogFeature() error { if binlogIsEnabled, err := m.isFEBinlogFeature(); err != nil { return err } else if !binlogIsEnabled { - return xerror.Errorf(xerror.Normal, "Fe %v:%v enable_binlog_feature=false, please set it true in fe.conf", + return xerror.Errorf(xerror.Normal, "Fe %v:%v enable_feature_binlog=false, please set it true in fe.conf", m.Spec.Host, m.Spec.Port) } @@ -1058,6 +1180,11 @@ func (m *Meta) DirtyGetTables() map[int64]*TableMeta { return m.Tables } +func (m *Meta) ClearTablesCache() { + m.Tables = make(map[int64]*TableMeta) + m.TableName2IdMap = make(map[string]int64) +} + func (m *Meta) ClearDB(dbName string) { if m.Database != dbName { log.Info("dbName not match, skip clear") @@ -1089,3 +1216,15 @@ func (m *Meta) ClearTable(dbName string, tableName string) { delete(m.TableName2IdMap, tableName) } + +func (m *Meta) IsPartitionDropped(partitionId int64) bool { + panic("IsPartitionDropped is not supported, please use ThriftMeta instead") +} + +func (m *Meta) IsTableDropped(partitionId int64) bool { + panic("IsTableDropped is not supported, please use ThriftMeta instead") +} + +func (m *Meta) IsIndexDropped(indexId int64) bool { + panic("IsIndexDropped is not supported, please use ThriftMeta instead") +} diff --git a/pkg/ccr/metaer.go b/pkg/ccr/metaer.go index 1048ce24..f7286f12 100644 --- a/pkg/ccr/metaer.go +++ b/pkg/ccr/metaer.go @@ -16,6 +16,7 @@ type DatabaseMeta struct { type TableMeta struct { DatabaseMeta *DatabaseMeta Id int64 + BaseIndexId int64 Name string // maybe dirty, such after rename PartitionIdMap map[int64]*PartitionMeta // partitionId -> partitionMeta PartitionRangeMap map[string]*PartitionMeta // partitionRange -> partitionMeta @@ -27,24 +28,25 @@ func (t *TableMeta) String() string { } type PartitionMeta struct { - TableMeta *TableMeta - Id int64 - Name string - Key string - Range string - IndexIdMap map[int64]*IndexMeta // indexId -> indexMeta - IndexNameMap map[string]*IndexMeta // indexName -> indexMeta + TableMeta *TableMeta + Id int64 + Name string + Range string + VisibleVersion int64 + IndexIdMap map[int64]*IndexMeta // indexId -> indexMeta + IndexNameMap map[string]*IndexMeta // indexName -> indexMeta } // Stringer func (p *PartitionMeta) String() string { - return fmt.Sprintf("PartitionMeta{(id:%d), (name:%s), (key:%s), (range:%s)}", p.Id, p.Name, p.Key, p.Range) + return fmt.Sprintf("PartitionMeta{(id:%d), (name:%s), (range:%s)}", p.Id, p.Name, p.Range) } type IndexMeta struct { PartitionMeta *PartitionMeta Id int64 Name string + IsBaseIndex bool TabletMetas *btree.Map[int64, *TabletMeta] // tabletId -> tablet ReplicaMetas *btree.Map[int64, *ReplicaMeta] // replicaId -> replica } @@ -60,6 +62,7 @@ type ReplicaMeta struct { Id int64 TabletId int64 BackendId int64 + Version int64 } type MetaCleaner interface { @@ -67,6 +70,18 @@ type MetaCleaner interface { ClearTable(dbName string, tableName string) } +type IngestBinlogMetaer interface { + GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) + GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) + GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) + GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error) + GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, *IndexMeta, error) + GetBackendMap() (map[int64]*base.Backend, error) + IsPartitionDropped(partitionId int64) bool + IsTableDropped(tableId int64) bool + IsIndexDropped(indexId int64) bool +} + type Metaer interface { GetDbId() (int64, error) GetFullTableName(tableName string) string @@ -79,35 +94,29 @@ type Metaer interface { UpdatePartitions(tableId int64) error GetPartitionIdMap(tableId int64) (map[int64]*PartitionMeta, error) - GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) GetPartitionIds(tableName string) ([]int64, error) GetPartitionName(tableId int64, partitionId int64) (string, error) GetPartitionRange(tableId int64, partitionId int64) (string, error) GetPartitionIdByName(tableId int64, partitionName string) (int64, error) - GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) + GetFrontends() ([]*base.Frontend, error) UpdateBackends() error GetBackends() ([]*base.Backend, error) - GetBackendMap() (map[int64]*base.Backend, error) GetBackendId(host, portStr string) (int64, error) UpdateIndexes(tableId, partitionId int64) error - GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error) - GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, error) UpdateReplicas(tableId, partitionId int64) error GetReplicas(tableId, partitionId int64) (*btree.Map[int64, *ReplicaMeta], error) - GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) - UpdateToken(rpcFactory rpc.IRpcFactory) error GetMasterToken(rpcFactory rpc.IRpcFactory) (string, error) CheckBinlogFeature() error DirtyGetTables() map[int64]*TableMeta + ClearTablesCache() - // from Spec - DbExec(sql string) error + IngestBinlogMetaer MetaCleaner } diff --git a/pkg/ccr/metaer_factory.go b/pkg/ccr/metaer_factory.go index 64801492..ea498659 100644 --- a/pkg/ccr/metaer_factory.go +++ b/pkg/ccr/metaer_factory.go @@ -8,22 +8,12 @@ type MetaerFactory interface { NewMeta(tableSpec *base.Spec) Metaer } -type MetaFactory struct { -} +type MetaFactory struct{} func NewMetaFactory() MetaerFactory { return &MetaFactory{} } -func (mf *MetaFactory) NewMeta(tableSpec *base.Spec) Metaer { - return &Meta{ - Spec: tableSpec, - DatabaseMeta: DatabaseMeta{ - Tables: make(map[int64]*TableMeta), - }, - Backends: make(map[int64]*base.Backend), - DatabaseName2IdMap: make(map[string]int64), - TableName2IdMap: make(map[string]int64), - BackendHostPort2IdMap: make(map[string]int64), - } +func (mf *MetaFactory) NewMeta(spec *base.Spec) Metaer { + return NewMeta(spec) } diff --git a/pkg/ccr/metaer_factory_mock.go b/pkg/ccr/metaer_factory_mock.go index e1600f05..6b640756 100644 --- a/pkg/ccr/metaer_factory_mock.go +++ b/pkg/ccr/metaer_factory_mock.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ccr/metaer_factory.go +// Source: pkg/ccr/metaer_factory.go // // Generated by this command: // -// mockgen -source=ccr/metaer_factory.go -destination=ccr/metaer_factory_mock.go -package=ccr +// mockgen -source=pkg/ccr/metaer_factory.go -destination=pkg/ccr/metaer_factory_mock.go -package=ccr // // Package ccr is a generated GoMock package. package ccr diff --git a/pkg/ccr/metaer_mock.go b/pkg/ccr/metaer_mock.go index a0f0cb9a..bfba5308 100644 --- a/pkg/ccr/metaer_mock.go +++ b/pkg/ccr/metaer_mock.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ccr/metaer.go +// Source: pkg/ccr/metaer.go // // Generated by this command: // -// mockgen -source=ccr/metaer.go -destination=ccr/metaer_mock.go -package=ccr +// mockgen -source=pkg/ccr/metaer.go -destination=pkg/ccr/metaer_mock.go -package=ccr // // Package ccr is a generated GoMock package. package ccr @@ -64,6 +64,119 @@ func (mr *MockMetaCleanerMockRecorder) ClearTable(dbName, tableName any) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearTable", reflect.TypeOf((*MockMetaCleaner)(nil).ClearTable), dbName, tableName) } +// MockIngestBinlogMetaer is a mock of IngestBinlogMetaer interface. +type MockIngestBinlogMetaer struct { + ctrl *gomock.Controller + recorder *MockIngestBinlogMetaerMockRecorder +} + +// MockIngestBinlogMetaerMockRecorder is the mock recorder for MockIngestBinlogMetaer. +type MockIngestBinlogMetaerMockRecorder struct { + mock *MockIngestBinlogMetaer +} + +// NewMockIngestBinlogMetaer creates a new mock instance. +func NewMockIngestBinlogMetaer(ctrl *gomock.Controller) *MockIngestBinlogMetaer { + mock := &MockIngestBinlogMetaer{ctrl: ctrl} + mock.recorder = &MockIngestBinlogMetaerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIngestBinlogMetaer) EXPECT() *MockIngestBinlogMetaerMockRecorder { + return m.recorder +} + +// GetBackendMap mocks base method. +func (m *MockIngestBinlogMetaer) GetBackendMap() (map[int64]*base.Backend, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBackendMap") + ret0, _ := ret[0].(map[int64]*base.Backend) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBackendMap indicates an expected call of GetBackendMap. +func (mr *MockIngestBinlogMetaerMockRecorder) GetBackendMap() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackendMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetBackendMap)) +} + +// GetIndexIdMap mocks base method. +func (m *MockIngestBinlogMetaer) GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIndexIdMap", tableId, partitionId) + ret0, _ := ret[0].(map[int64]*IndexMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIndexIdMap indicates an expected call of GetIndexIdMap. +func (mr *MockIngestBinlogMetaerMockRecorder) GetIndexIdMap(tableId, partitionId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexIdMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetIndexIdMap), tableId, partitionId) +} + +// GetIndexNameMap mocks base method. +func (m *MockIngestBinlogMetaer) GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIndexNameMap", tableId, partitionId) + ret0, _ := ret[0].(map[string]*IndexMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIndexNameMap indicates an expected call of GetIndexNameMap. +func (mr *MockIngestBinlogMetaerMockRecorder) GetIndexNameMap(tableId, partitionId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexNameMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetIndexNameMap), tableId, partitionId) +} + +// GetPartitionIdByRange mocks base method. +func (m *MockIngestBinlogMetaer) GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPartitionIdByRange", tableId, partitionRange) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPartitionIdByRange indicates an expected call of GetPartitionIdByRange. +func (mr *MockIngestBinlogMetaerMockRecorder) GetPartitionIdByRange(tableId, partitionRange any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPartitionIdByRange", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetPartitionIdByRange), tableId, partitionRange) +} + +// GetPartitionRangeMap mocks base method. +func (m *MockIngestBinlogMetaer) GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPartitionRangeMap", tableId) + ret0, _ := ret[0].(map[string]*PartitionMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPartitionRangeMap indicates an expected call of GetPartitionRangeMap. +func (mr *MockIngestBinlogMetaerMockRecorder) GetPartitionRangeMap(tableId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPartitionRangeMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetPartitionRangeMap), tableId) +} + +// GetTablets mocks base method. +func (m *MockIngestBinlogMetaer) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTablets", tableId, partitionId, indexId) + ret0, _ := ret[0].(*btree.Map[int64, *TabletMeta]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTablets indicates an expected call of GetTablets. +func (mr *MockIngestBinlogMetaerMockRecorder) GetTablets(tableId, partitionId, indexId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTablets", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetTablets), tableId, partitionId, indexId) +} + // MockMetaer is a mock of Metaer interface. type MockMetaer struct { ctrl *gomock.Controller @@ -213,6 +326,21 @@ func (mr *MockMetaerMockRecorder) GetDbId() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDbId", reflect.TypeOf((*MockMetaer)(nil).GetDbId)) } +// GetFrontends mocks base method. +func (m *MockMetaer) GetFrontends() ([]*base.Frontend, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFrontends") + ret0, _ := ret[0].([]*base.Frontend) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFrontends indicates an expected call of GetFrontends. +func (mr *MockMetaerMockRecorder) GetFrontends() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFrontends", reflect.TypeOf((*MockMetaer)(nil).GetFrontends)) +} + // GetFullTableName mocks base method. func (m *MockMetaer) GetFullTableName(tableName string) string { m.ctrl.T.Helper() diff --git a/pkg/ccr/record/add_partition.go b/pkg/ccr/record/add_partition.go index 04f78f22..567913e4 100644 --- a/pkg/ccr/record/add_partition.go +++ b/pkg/ccr/record/add_partition.go @@ -2,13 +2,32 @@ package record import ( "encoding/json" + "fmt" + "strings" + "github.com/selectdb/ccr_syncer/pkg/utils" "github.com/selectdb/ccr_syncer/pkg/xerror" + + log "github.com/sirupsen/logrus" ) +type DistributionInfo struct { + BucketNum int `json:"bucketNum"` + Type string `json:"type"` + DistributionColumns []struct { + Name string `json:"name"` + } `json:"distributionColumns"` +} + type AddPartition struct { - TableId int64 `json:"tableId"` - Sql string `json:"sql"` + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + Sql string `json:"sql"` + IsTemp bool `json:"isTempPartition"` + Partition struct { + DistributionInfoOld *DistributionInfo `json:"distributionInfo"` + DistributionInfoNew *DistributionInfo `json:"di"` + } `json:"partition"` } func NewAddPartitionFromJson(data string) (*AddPartition, error) { @@ -19,7 +38,6 @@ func NewAddPartitionFromJson(data string) (*AddPartition, error) { } if addPartition.Sql == "" { - // TODO: fallback to create sql from other fields return nil, xerror.Errorf(xerror.Normal, "add partition sql is empty") } @@ -29,3 +47,61 @@ func NewAddPartitionFromJson(data string) (*AddPartition, error) { return &addPartition, nil } + +func (addPartition *AddPartition) getDistributionInfo() *DistributionInfo { + if addPartition.Partition.DistributionInfoOld != nil { + return addPartition.Partition.DistributionInfoOld + } + return addPartition.Partition.DistributionInfoNew +} + +func (addPartition *AddPartition) getDistributionColumns() []string { + var distributionColumns []string + for _, column := range addPartition.getDistributionInfo().DistributionColumns { + distributionColumns = append(distributionColumns, column.Name) + } + return distributionColumns +} + +func (addPartition *AddPartition) GetSql(destTableName string) string { + // addPartitionSql = "ALTER TABLE " + sql + addPartitionSql := fmt.Sprintf("ALTER TABLE %s %s", utils.FormatKeywordName(destTableName), addPartition.Sql) + // remove last ';' and add BUCKETS num + addPartitionSql = strings.TrimRight(addPartitionSql, ";") + // check contains BUCKETS num, ignore case + if strings.Contains(strings.ToUpper(addPartitionSql), "BUCKETS") { + // if not contains BUCKETS AUTO, return directly + if !strings.Contains(strings.ToUpper(addPartitionSql), "BUCKETS AUTO") { + log.Infof("addPartitionSql contains BUCKETS declaration, sql: %s", addPartitionSql) + return addPartitionSql + } + + log.Info("addPartitionSql contains BUCKETS AUTO, remove it") + // BUCKETS AUTO is in the end of sql, remove it, so we not care about the string after BUCKETS AUTO + // Remove BUCKETS AUTO case, but not change other sql case + // find BUCKETS AUTO index, remove it from origin sql + bucketsAutoIndex := strings.LastIndex(strings.ToUpper(addPartitionSql), "BUCKETS AUTO") + addPartitionSql = addPartitionSql[:bucketsAutoIndex] + } + + // check contain DISTRIBUTED BY + // if not contain + // create like below sql + // ALTER TABLE my_table + // ADD PARTITION p1 VALUES LESS THAN ("2015-01-01") + // DISTRIBUTED BY HASH(k1) BUCKETS 20; + // or DISTRIBUTED BY RANDOM BUCKETS 20; + distributionInfo := addPartition.getDistributionInfo() + if !strings.Contains(strings.ToUpper(addPartitionSql), "DISTRIBUTED BY") { + if distributionInfo.Type == "HASH" { + addPartitionSql = fmt.Sprintf("%s DISTRIBUTED BY HASH(%s)", addPartitionSql, + "`"+strings.Join(addPartition.getDistributionColumns(), "`,`")+"`") + } else { + addPartitionSql = fmt.Sprintf("%s DISTRIBUTED BY RANDOM", addPartitionSql) + } + } + bucketNum := distributionInfo.BucketNum + addPartitionSql = fmt.Sprintf("%s BUCKETS %d", addPartitionSql, bucketNum) + + return addPartitionSql +} diff --git a/pkg/ccr/record/alter_job_v2.go b/pkg/ccr/record/alter_job_v2.go index e52c337c..c74a7a6c 100644 --- a/pkg/ccr/record/alter_job_v2.go +++ b/pkg/ccr/record/alter_job_v2.go @@ -7,14 +7,32 @@ import ( "github.com/selectdb/ccr_syncer/pkg/xerror" ) +const ( + ALTER_JOB_SCHEMA_CHANGE = "SCHEMA_CHANGE" + ALTER_JOB_ROLLUP = "ROLLUP" + + ALTER_JOB_STATE_PENDING = "PENDING" + ALTER_JOB_STATE_WAITING_TXN = "WAITING_TXN" + ALTER_JOB_STATE_RUNNING = "RUNNING" + ALTER_JOB_STATE_FINISHED = "FINISHED" + ALTER_JOB_STATE_CANCELLED = "CANCELLED" +) + type AlterJobV2 struct { - Type string `json:"type"` - DbId int64 `json:"dbId"` - TableId int64 `json:"tableId"` - TableName string `json:"tableName"` - JobId int64 `json:"jobId"` - JobState string `json:"jobState"` - RawSql string `json:"rawSql"` + Type string `json:"type"` + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + TableName string `json:"tableName"` + JobId int64 `json:"jobId"` + JobState string `json:"jobState"` + RawSql string `json:"rawSql"` + ShadowIndexes map[int64]int64 `json:"iim"` + + // for rollup + RollupIndexId int64 `json:"rollupIndexId"` + RollupIndexName string `json:"rollupIndexName"` + BaseIndexId int64 `json:"baseIndexId"` + BaseIndexName string `json:"baseIndexName"` } func NewAlterJobV2FromJson(data string) (*AlterJobV2, error) { @@ -31,14 +49,18 @@ func NewAlterJobV2FromJson(data string) (*AlterJobV2, error) { // } if alterJob.TableId == 0 { - return nil, xerror.Errorf(xerror.Normal, "table id not found") + return nil, xerror.Errorf(xerror.Normal, "invalid alter job, table id not found") + } + + if alterJob.TableName == "" { + return nil, xerror.Errorf(xerror.Normal, "invalid alter job, tableName is empty") } return &alterJob, nil } func (a *AlterJobV2) IsFinished() bool { - return a.JobState == "FINISHED" + return a.JobState == ALTER_JOB_STATE_FINISHED } // Stringer diff --git a/pkg/ccr/record/alter_view.go b/pkg/ccr/record/alter_view.go new file mode 100644 index 00000000..eb5687f9 --- /dev/null +++ b/pkg/ccr/record/alter_view.go @@ -0,0 +1,31 @@ +package record + +import ( + "encoding/json" + "fmt" +) + +type AlterView struct { + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + InlineViewDef string `json:"inlineViewDef"` + SqlMode int64 `json:"sqlMode"` +} + +func NewAlterViewFromJson(data string) (*AlterView, error) { + var alterView AlterView + err := json.Unmarshal([]byte(data), &alterView) + if err != nil { + return nil, fmt.Errorf("unmarshal alter view error: %v", err) + } + + if alterView.TableId == 0 { + return nil, fmt.Errorf("table id not found") + } + + return &alterView, nil +} + +func (a *AlterView) String() string { + return fmt.Sprintf("AlterView: DbId: %d, TableId: %d, InlineViewDef: %s, SqlMode: %d", a.DbId, a.TableId, a.InlineViewDef, a.SqlMode) +} diff --git a/pkg/ccr/record/barrier_log.go b/pkg/ccr/record/barrier_log.go new file mode 100644 index 00000000..d1b93ffe --- /dev/null +++ b/pkg/ccr/record/barrier_log.go @@ -0,0 +1,23 @@ +package record + +import ( + "encoding/json" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type BarrierLog struct { + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + BinlogType int64 `json:"binlogType"` + Binlog string `json:"binlog"` +} + +func NewBarrierLogFromJson(data string) (*BarrierLog, error) { + var log BarrierLog + err := json.Unmarshal([]byte(data), &log) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal barrier log error") + } + return &log, nil +} diff --git a/pkg/ccr/record/create_table.go b/pkg/ccr/record/create_table.go index 634fc218..182107dd 100644 --- a/pkg/ccr/record/create_table.go +++ b/pkg/ccr/record/create_table.go @@ -3,6 +3,7 @@ package record import ( "encoding/json" "fmt" + "regexp" "github.com/selectdb/ccr_syncer/pkg/xerror" ) @@ -11,6 +12,10 @@ type CreateTable struct { DbId int64 `json:"dbId"` TableId int64 `json:"tableId"` Sql string `json:"sql"` + + // Below fields was added in doris 2.0.3: https://github.com/apache/doris/pull/26901 + DbName string `json:"dbName"` + TableName string `json:"tableName"` } func NewCreateTableFromJson(data string) (*CreateTable, error) { @@ -32,7 +37,13 @@ func NewCreateTableFromJson(data string) (*CreateTable, error) { return &createTable, nil } +func (c *CreateTable) IsCreateView() bool { + viewRegex := regexp.MustCompile(`(?i)^CREATE(\s+)VIEW`) + return viewRegex.MatchString(c.Sql) +} + // String func (c *CreateTable) String() string { - return fmt.Sprintf("CreateTable: DbId: %d, TableId: %d, Sql: %s", c.DbId, c.TableId, c.Sql) + return fmt.Sprintf("CreateTable: DbId: %d, DbName: %s, TableId: %d, TableName: %s, Sql: %s", + c.DbId, c.DbName, c.TableId, c.TableName, c.Sql) } diff --git a/pkg/ccr/record/drop_partition.go b/pkg/ccr/record/drop_partition.go index c1cdf02d..37b01245 100644 --- a/pkg/ccr/record/drop_partition.go +++ b/pkg/ccr/record/drop_partition.go @@ -9,6 +9,7 @@ import ( type DropPartition struct { TableId int64 `json:"tableId"` Sql string `json:"sql"` + IsTemp bool `json:"isTempPartition"` } func NewDropPartitionFromJson(data string) (*DropPartition, error) { diff --git a/pkg/ccr/record/drop_rollup.go b/pkg/ccr/record/drop_rollup.go new file mode 100644 index 00000000..d7e546e2 --- /dev/null +++ b/pkg/ccr/record/drop_rollup.go @@ -0,0 +1,43 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type DropRollup struct { + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + TableName string `json:"tableName"` + IndexId int64 `json:"indexId"` + IndexName string `json:"indexName"` +} + +func NewDropRollupFromJson(data string) (*DropRollup, error) { + var dropRollup DropRollup + err := json.Unmarshal([]byte(data), &dropRollup) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal drop rollup error") + } + + if dropRollup.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "invalid drop rollup, table id not found") + } + + if dropRollup.TableName == "" { + return nil, xerror.Errorf(xerror.Normal, "invalid drop rollup, tableName is empty") + } + + if dropRollup.IndexName == "" { + return nil, xerror.Errorf(xerror.Normal, "invalid drop rollup, indexName is empty") + } + + return &dropRollup, nil +} + +func (d *DropRollup) String() string { + return fmt.Sprintf("DropRollup{DbId: %d, TableId: %d, TableName: %s, IndexId: %d, IndexName: %s}", + d.DbId, d.TableId, d.TableName, d.IndexId, d.IndexName) +} diff --git a/pkg/ccr/record/drop_table.go b/pkg/ccr/record/drop_table.go index 05e4d96c..b61d0eff 100644 --- a/pkg/ccr/record/drop_table.go +++ b/pkg/ccr/record/drop_table.go @@ -11,6 +11,7 @@ type DropTable struct { DbId int64 `json:"dbId"` TableId int64 `json:"tableId"` TableName string `json:"tableName"` + IsView bool `json:"isView"` RawSql string `json:"rawSql"` } @@ -30,5 +31,5 @@ func NewDropTableFromJson(data string) (*DropTable, error) { // Stringer, all fields func (c *DropTable) String() string { - return fmt.Sprintf("DropTable: DbId: %d, TableId: %d, TableName: %s, RawSql: %s", c.DbId, c.TableId, c.TableName, c.RawSql) + return fmt.Sprintf("DropTable: DbId: %d, TableId: %d, TableName: %s, IsView: %t, RawSql: %s", c.DbId, c.TableId, c.TableName, c.IsView, c.RawSql) } diff --git a/pkg/ccr/record/index.go b/pkg/ccr/record/index.go new file mode 100644 index 00000000..94714957 --- /dev/null +++ b/pkg/ccr/record/index.go @@ -0,0 +1,58 @@ +package record + +const ( + INDEX_TYPE_BITMAP = "BITMAP" + INDEX_TYPE_INVERTED = "INVERTED" + INDEX_TYPE_BLOOMFILTER = "BLOOMFILTER" + INDEX_TYPE_NGRAM_BF = "NGRAM_BF" +) + +type Index struct { + IndexId int64 `json:"indexId"` + IndexName string `json:"indexName"` + Columns []string `json:"columns"` + IndexType string `json:"indexType"` + Properties map[string]string `json:"properties"` + Comment string `json:"comment"` + ColumnUniqueIds []int `json:"columnUniqueIds"` + + IndexIdAlternative int64 `json:"i"` + IndexNameAlternative string `json:"in"` + ColumnsAlternative []string `json:"c"` + IndexTypeAlternative string `json:"it"` + PropertiesAlternative map[string]string `json:"pt"` + CommentAlternative string `json:"ct"` + ColumnUniqueIdsAlternative []int `json:"cui"` +} + +func (index *Index) GetIndexName() string { + if index.IndexName != "" { + return index.IndexName + } + return index.IndexNameAlternative +} + +func (index *Index) GetColumns() []string { + if len(index.Columns) > 0 { + return index.Columns + } + return index.ColumnsAlternative +} + +func (index *Index) GetComment() string { + if index.Comment != "" { + return index.Comment + } + return index.CommentAlternative +} + +func (index *Index) GetIndexType() string { + if index.IndexType != "" { + return index.IndexType + } + return index.IndexTypeAlternative +} + +func (index *Index) IsInvertedIndex() bool { + return index.GetIndexType() == INDEX_TYPE_INVERTED +} diff --git a/pkg/ccr/record/index_change_job.go b/pkg/ccr/record/index_change_job.go new file mode 100644 index 00000000..979e1673 --- /dev/null +++ b/pkg/ccr/record/index_change_job.go @@ -0,0 +1,59 @@ +package record + +import ( + "encoding/json" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +const ( + INDEX_CHANGE_JOB_STATE_RUNNING = "RUNNING" + INDEX_CHANGE_JOB_STATE_FINISHED = "FINISHED" + INDEX_CHANGE_JOB_STATE_CANCELLED = "CANCELLED" + INDEX_CHANGE_JOB_STATE_WAITING_TXN = "WATING_TXN" +) + +type IndexChangeJob struct { + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + TableName string `json:"tableName"` + PartitionId int64 `json:"partitionId"` + PartitionName string `json:"partitionName"` + JobState string `json:"jobState"` + ErrMsg string `json:"errMsg"` + CreateTimeMs int64 `json:"createTimeMs"` + FinishedTimeMs int64 `json:"finishedTimeMs"` + IsDropOp bool `json:"isDropOp"` + OriginIndexId int64 `json:"originIndexId"` + TimeoutMs int64 `json:"timeoutMs"` + Indexes []Index `json:"alterInvertedIndexes"` +} + +func NewIndexChangeJobFromJson(data string) (*IndexChangeJob, error) { + m := &IndexChangeJob{} + if err := json.Unmarshal([]byte(data), m); err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal index change job error") + } + + if m.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "index change job table id not found") + } + + if m.PartitionId == 0 { + return nil, xerror.Errorf(xerror.Normal, "index change job partition id not found") + } + + if m.JobState == "" { + return nil, xerror.Errorf(xerror.Normal, "index change job state not found") + } + + if len(m.Indexes) == 0 { + return nil, xerror.Errorf(xerror.Normal, "index change job alter inverted indexes is empty") + } + + if !m.IsDropOp && len(m.Indexes) != 1 { + return nil, xerror.Errorf(xerror.Normal, "index change job alter inverted indexes length is not 1") + } + + return m, nil +} diff --git a/pkg/ccr/record/modify_comment.go b/pkg/ccr/record/modify_comment.go new file mode 100644 index 00000000..4fc60f2e --- /dev/null +++ b/pkg/ccr/record/modify_comment.go @@ -0,0 +1,35 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type ModifyComment struct { + Type string `json:"type"` + DbId int64 `json:"dbId"` + TblId int64 `json:"tblId"` + ColToComment map[string]string `json:"colToComment"` + TblComment string `json:"tblComment"` +} + +func NewModifyCommentFromJson(data string) (*ModifyComment, error) { + var modifyComment ModifyComment + err := json.Unmarshal([]byte(data), &modifyComment) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal modify comment error") + } + + if modifyComment.TblId == 0 { + return nil, xerror.Errorf(xerror.Normal, "table id not found") + } + + return &modifyComment, nil +} + +// Stringer +func (r *ModifyComment) String() string { + return fmt.Sprintf("ModifyComment: Type: %s, DbId: %d, TblId: %d, ColToComment: %v, TblComment: %s", r.Type, r.DbId, r.TblId, r.ColToComment, r.TblComment) +} diff --git a/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go b/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go new file mode 100644 index 00000000..6e99f68d --- /dev/null +++ b/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go @@ -0,0 +1,45 @@ +package record + +import ( + "encoding/json" + "strings" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type ModifyTableAddOrDropInvertedIndices struct { + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + IsDropInvertedIndex bool `json:"isDropInvertedIndex"` + RawSql string `json:"rawSql"` + Indexes []Index `json:"indexes"` + AlternativeIndexes []Index `json:"alterInvertedIndexes"` +} + +func NewModifyTableAddOrDropInvertedIndicesFromJson(data string) (*ModifyTableAddOrDropInvertedIndices, error) { + m := &ModifyTableAddOrDropInvertedIndices{} + if err := json.Unmarshal([]byte(data), m); err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal modify table add or drop inverted indices error") + } + + if m.RawSql == "" { + // TODO: fallback to create sql from other fields + return nil, xerror.Errorf(xerror.Normal, "modify table add or drop inverted indices sql is empty") + } + + if m.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "modify table add or drop inverted indices table id not found") + } + + return m, nil +} + +func (m *ModifyTableAddOrDropInvertedIndices) GetRawSql() string { + if strings.Contains(m.RawSql, "ALTER TABLE") && strings.Contains(m.RawSql, "INDEX") && + !strings.Contains(m.RawSql, "DROP INDEX") && !strings.Contains(m.RawSql, "ADD INDEX") { + // fix the syntax error + // See apache/doris#44392 for details + return strings.ReplaceAll(m.RawSql, "INDEX", "ADD INDEX") + } + return m.RawSql +} diff --git a/pkg/ccr/record/recover_info.go b/pkg/ccr/record/recover_info.go new file mode 100644 index 00000000..6325dbd7 --- /dev/null +++ b/pkg/ccr/record/recover_info.go @@ -0,0 +1,50 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type RecoverInfo struct { + DbId int64 `json:"dbId"` + NewDbName string `json:"newDbName"` + TableId int64 `json:"tableId"` + TableName string `json:"tableName"` + NewTableName string `json:"newTableName"` + PartitionId int64 `json:"partitionId"` + PartitionName string `json:"partitionName"` + NewPartitionName string `json:"newPartitionName"` +} + +func NewRecoverInfoFromJson(data string) (*RecoverInfo, error) { + var recoverInfo RecoverInfo + err := json.Unmarshal([]byte(data), &recoverInfo) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal create table error") + } + + if recoverInfo.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "table id not found") + } + + // table name must exist. partition name not checked since optional. + if recoverInfo.TableName == "" { + return nil, xerror.Errorf(xerror.Normal, "Table Name can not be null") + } + return &recoverInfo, nil +} + +func (c *RecoverInfo) IsRecoverTable() bool { + if c.PartitionName == "" || c.PartitionId == -1 { + return true + } + return false +} + +// String +func (c *RecoverInfo) String() string { + return fmt.Sprintf("RecoverInfo: DbId: %d, NewDbName: %s, TableId: %d, TableName: %s, NewTableName: %s, PartitionId: %d, PartitionName: %s, NewPartitionName: %s", + c.DbId, c.NewDbName, c.TableId, c.TableName, c.NewTableName, c.PartitionId, c.PartitionName, c.NewPartitionName) +} diff --git a/pkg/ccr/record/rename_column.go b/pkg/ccr/record/rename_column.go new file mode 100644 index 00000000..ab1c5388 --- /dev/null +++ b/pkg/ccr/record/rename_column.go @@ -0,0 +1,35 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type RenameColumn struct { + DbId int64 `json:"dbId"` + TableId int64 `json:"tableId"` + ColName string `json:"colName"` + NewColName string `json:"newColName"` + IndexIdToSchemaVersion map[int64]int32 `json:"indexIdToSchemaVersion"` +} + +func NewRenameColumnFromJson(data string) (*RenameColumn, error) { + var renameColumn RenameColumn + err := json.Unmarshal([]byte(data), &renameColumn) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename column error") + } + + if renameColumn.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "table id not found") + } + + return &renameColumn, nil +} + +// Stringer +func (r *RenameColumn) String() string { + return fmt.Sprintf("RenameColumn: DbId: %d, TableId: %d, ColName: %s, NewColName: %s, IndexIdToSchemaVersion: %v", r.DbId, r.TableId, r.ColName, r.NewColName, r.IndexIdToSchemaVersion) +} diff --git a/pkg/ccr/record/rename_partition.go b/pkg/ccr/record/rename_partition.go new file mode 100644 index 00000000..1ab9bb35 --- /dev/null +++ b/pkg/ccr/record/rename_partition.go @@ -0,0 +1,44 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type RenamePartition struct { + DbId int64 `json:"db"` + TableId int64 `json:"tb"` + PartitionId int64 `json:"p"` + NewPartitionName string `json:"nP"` + OldPartitionName string `json:"oP"` +} + +func NewRenamePartitionFromJson(data string) (*RenamePartition, error) { + var rename RenamePartition + err := json.Unmarshal([]byte(data), &rename) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename partition record error") + } + + if rename.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "rename partition record table id not found") + } + + if rename.PartitionId == 0 { + return nil, xerror.Errorf(xerror.Normal, "rename partition record partition id not found") + } + + if rename.NewPartitionName == "" { + return nil, xerror.Errorf(xerror.Normal, "rename partition record new partition name not found") + } + + return &rename, nil +} + +// Stringer +func (r *RenamePartition) String() string { + return fmt.Sprintf("RenamePartition: DbId: %d, TableId: %d, PartitionId: %d, NewPartitionName: %s, OldPartitionName: %s", + r.DbId, r.TableId, r.PartitionId, r.NewPartitionName, r.OldPartitionName) +} diff --git a/pkg/ccr/record/rename_rollup.go b/pkg/ccr/record/rename_rollup.go new file mode 100644 index 00000000..c5eb011d --- /dev/null +++ b/pkg/ccr/record/rename_rollup.go @@ -0,0 +1,40 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type RenameRollup struct { + DbId int64 `json:"db"` + TableId int64 `json:"tb"` + IndexId int64 `json:"ind"` + NewRollupName string `json:"nR"` + OldRollupName string `json:"oR"` +} + +func NewRenameRollupFromJson(data string) (*RenameRollup, error) { + var record RenameRollup + err := json.Unmarshal([]byte(data), &record) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename rollup record error") + } + + if record.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "rename rollup record table id not found") + } + + if record.NewRollupName == "" { + return nil, xerror.Errorf(xerror.Normal, "rename rollup record old rollup name not found") + } + + return &record, nil +} + +// Stringer +func (r *RenameRollup) String() string { + return fmt.Sprintf("RenameRollup: DbId: %d, TableId: %d, IndexId: %d, NewRollupName: %s, OldRollupName: %s", + r.DbId, r.TableId, r.IndexId, r.NewRollupName, r.OldRollupName) +} diff --git a/pkg/ccr/record/rename_table.go b/pkg/ccr/record/rename_table.go new file mode 100644 index 00000000..1905133c --- /dev/null +++ b/pkg/ccr/record/rename_table.go @@ -0,0 +1,40 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type RenameTable struct { + DbId int64 `json:"db"` + TableId int64 `json:"tb"` + IndexId int64 `json:"ind"` + PartitionId int64 `json:"p"` + NewTableName string `json:"nT"` + OldTableName string `json:"oT"` + NewRollupName string `json:"nR"` + OldRollupName string `json:"oR"` + NewPartitionName string `json:"nP"` + OldPartitionName string `json:"oP"` +} + +func NewRenameTableFromJson(data string) (*RenameTable, error) { + var renameTable RenameTable + err := json.Unmarshal([]byte(data), &renameTable) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename table error") + } + + if renameTable.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "table id not found") + } + + return &renameTable, nil +} + +// Stringer +func (r *RenameTable) String() string { + return fmt.Sprintf("RenameTable: DbId: %d, TableId: %d, PartitionId: %d, IndexId: %d, NewTableName: %s, OldTableName: %s, NewRollupName: %s, OldRollupName: %s, NewPartitionName: %s, OldPartitionName: %s", r.DbId, r.TableId, r.PartitionId, r.IndexId, r.NewTableName, r.OldTableName, r.NewRollupName, r.OldRollupName, r.NewPartitionName, r.OldPartitionName) +} diff --git a/pkg/ccr/record/replace_partition.go b/pkg/ccr/record/replace_partition.go new file mode 100644 index 00000000..02b1bd90 --- /dev/null +++ b/pkg/ccr/record/replace_partition.go @@ -0,0 +1,40 @@ +package record + +import ( + "encoding/json" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type ReplacePartitionRecord struct { + DbId int64 `json:"dbId"` + DbName string `json:"dbName"` + TableId int64 `json:"tblId"` + TableName string `json:"tblName"` + Partitions []string `json:"partitions"` + TempPartitions []string `json:"tempPartitions"` + StrictRange bool `json:"strictRange"` + UseTempName bool `json:"useTempPartitionName"` +} + +func NewReplacePartitionFromJson(data string) (*ReplacePartitionRecord, error) { + var replacePartition ReplacePartitionRecord + err := json.Unmarshal([]byte(data), &replacePartition) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal replace partition error") + } + + if len(replacePartition.TempPartitions) == 0 { + return nil, xerror.Errorf(xerror.Normal, "the temp partitions of the replace partition record is empty") + } + + if replacePartition.TableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "table id not found") + } + + if replacePartition.TableName == "" { + return nil, xerror.Errorf(xerror.Normal, "table name is empty") + } + + return &replacePartition, nil +} diff --git a/pkg/ccr/record/replace_table.go b/pkg/ccr/record/replace_table.go new file mode 100644 index 00000000..718ed348 --- /dev/null +++ b/pkg/ccr/record/replace_table.go @@ -0,0 +1,50 @@ +package record + +import ( + "encoding/json" + "fmt" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type ReplaceTableRecord struct { + DbId int64 `json:"dbId"` + OriginTableId int64 `json:"origTblId"` + OriginTableName string `json:"origTblName"` + NewTableId int64 `json:"newTblName"` + NewTableName string `json:"actualNewTblName"` + SwapTable bool `json:"swapTable"` + IsForce bool `json:"isForce"` +} + +func NewReplaceTableRecordFromJson(data string) (*ReplaceTableRecord, error) { + record := &ReplaceTableRecord{} + err := json.Unmarshal([]byte(data), record) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal replace table record error") + } + + if record.OriginTableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "table id of replace table record not found") + } + + if record.OriginTableName == "" { + return nil, xerror.Errorf(xerror.Normal, "table name of replace table record not found") + } + + if record.NewTableId == 0 { + return nil, xerror.Errorf(xerror.Normal, "new table id of replace table record not found") + } + + if record.NewTableName == "" { + return nil, xerror.Errorf(xerror.Normal, "new table name of replace table record not found") + } + + return record, nil +} + +// Stringer +func (r *ReplaceTableRecord) String() string { + return fmt.Sprintf("ReplaceTableRecord: DbId: %d, OriginTableId: %d, OriginTableName: %s, NewTableId: %d, NewTableName: %s, SwapTable: %v, IsForce: %v", + r.DbId, r.OriginTableId, r.OriginTableName, r.NewTableId, r.NewTableName, r.SwapTable, r.IsForce) +} diff --git a/pkg/ccr/record/restore_info.go b/pkg/ccr/record/restore_info.go new file mode 100644 index 00000000..030d1af2 --- /dev/null +++ b/pkg/ccr/record/restore_info.go @@ -0,0 +1,26 @@ +package record + +import ( + "encoding/json" + + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type RestoreInfo struct { + DbId int64 `json:"dbId"` + DbName string `json:"dbName"` + TableInfo map[int64]string `json:"tableInfo"` +} + +func NewRestoreInfoFromJson(data string) (*RestoreInfo, error) { + var restoreInfo RestoreInfo + err := json.Unmarshal([]byte(data), &restoreInfo) + if err != nil { + return nil, xerror.Wrap(err, xerror.Normal, "unmarshal create table error") + } + + if restoreInfo.DbId == 0 { + return nil, xerror.Errorf(xerror.Normal, "db id not found") + } + return &restoreInfo, nil +} diff --git a/pkg/ccr/record/truncate_table.go b/pkg/ccr/record/truncate_table.go index 3bd9004d..c40c75eb 100644 --- a/pkg/ccr/record/truncate_table.go +++ b/pkg/ccr/record/truncate_table.go @@ -9,7 +9,7 @@ import ( // { // "dbId": 10079, -// "db": "default_cluster:ccr", +// "db": "default_cluster:ccr", # "default_cluster:" prefix will be removed in Doris v2.1 // "tblId": 77395, // "table": "src_1_alias", // "isEntireTable": false, diff --git a/pkg/ccr/record/upsert.go b/pkg/ccr/record/upsert.go index ced175d8..fcfea4b5 100644 --- a/pkg/ccr/record/upsert.go +++ b/pkg/ccr/record/upsert.go @@ -11,10 +11,13 @@ type PartitionRecord struct { Id int64 `json:"partitionId"` Range string `json:"range"` Version int64 `json:"version"` + IsTemp bool `json:"isTempPartition"` + Stid int64 `json:"stid"` } func (p PartitionRecord) String() string { - return fmt.Sprintf("PartitionRecord{Id: %d, Range: %s, Version: %d}", p.Id, p.Range, p.Version) + return fmt.Sprintf("PartitionRecord{Id: %d, Range: %s, Version: %d, IsTemp: %v, Stid: %d}", + p.Id, p.Range, p.Version, p.IsTemp, p.Stid) } type TableRecord struct { @@ -34,11 +37,12 @@ type Upsert struct { Label string `json:"label"` DbID int64 `json:"dbId"` TableRecords map[int64]*TableRecord `json:"tableRecords"` + Stids []int64 `json:"stids"` } // Stringer func (u Upsert) String() string { - return fmt.Sprintf("Upsert{CommitSeq: %d, TxnID: %d, TimeStamp: %d, Label: %s, DbID: %d, TableRecords: %v}", u.CommitSeq, u.TxnID, u.TimeStamp, u.Label, u.DbID, u.TableRecords) + return fmt.Sprintf("Upsert{CommitSeq: %d, TxnID: %d, TimeStamp: %d, Label: %s, DbID: %d, TableRecords: %v, Stids: %v}", u.CommitSeq, u.TxnID, u.TimeStamp, u.Label, u.DbID, u.TableRecords, u.Stids) } // { diff --git a/pkg/ccr/rpc_factory_mock.go b/pkg/ccr/rpc_factory_mock.go index c8b44c5a..e4a58219 100644 --- a/pkg/ccr/rpc_factory_mock.go +++ b/pkg/ccr/rpc_factory_mock.go @@ -1,6 +1,10 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: rpc/rpc_factory.go - +// Source: pkg/rpc/rpc_factory.go +// +// Generated by this command: +// +// mockgen -source=pkg/rpc/rpc_factory.go -destination=pkg/ccr/rpc_factory_mock.go -package=ccr +// // Package ccr is a generated GoMock package. package ccr @@ -45,7 +49,7 @@ func (m *MockIRpcFactory) NewBeRpc(be *base.Backend) (rpc.IBeRpc, error) { } // NewBeRpc indicates an expected call of NewBeRpc. -func (mr *MockIRpcFactoryMockRecorder) NewBeRpc(be interface{}) *gomock.Call { +func (mr *MockIRpcFactoryMockRecorder) NewBeRpc(be any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBeRpc", reflect.TypeOf((*MockIRpcFactory)(nil).NewBeRpc), be) } @@ -60,7 +64,7 @@ func (m *MockIRpcFactory) NewFeRpc(spec *base.Spec) (rpc.IFeRpc, error) { } // NewFeRpc indicates an expected call of NewFeRpc. -func (mr *MockIRpcFactoryMockRecorder) NewFeRpc(spec interface{}) *gomock.Call { +func (mr *MockIRpcFactoryMockRecorder) NewFeRpc(spec any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFeRpc", reflect.TypeOf((*MockIRpcFactory)(nil).NewFeRpc), spec) } diff --git a/pkg/ccr/thrift_meta.go b/pkg/ccr/thrift_meta.go new file mode 100644 index 00000000..ed64571d --- /dev/null +++ b/pkg/ccr/thrift_meta.go @@ -0,0 +1,285 @@ +package ccr + +import ( + "github.com/selectdb/ccr_syncer/pkg/ccr/base" + "github.com/selectdb/ccr_syncer/pkg/rpc" + "github.com/selectdb/ccr_syncer/pkg/xerror" + + tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" + + "github.com/tidwall/btree" +) + +var DefaultThriftMetaFactory ThriftMetaFactory = &defaultThriftMetaFactory{} + +type ThriftMetaFactory interface { + NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64) (*ThriftMeta, error) +} + +type defaultThriftMetaFactory struct{} + +func (dtmf *defaultThriftMetaFactory) NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64) (*ThriftMeta, error) { + return NewThriftMeta(spec, rpcFactory, tableIds) +} + +func NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64) (*ThriftMeta, error) { + meta := NewMeta(spec) + feRpc, err := rpcFactory.NewFeRpc(spec) + if err != nil { + return nil, err + } + + // Step 1: get backends + backendMetaResp, err := feRpc.GetBackends(spec) + if err != nil { + return nil, err + } + + if backendMetaResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK { + return nil, xerror.Errorf(xerror.Meta, "get backend meta failed, status: %s", backendMetaResp.GetStatus()) + } + + if !backendMetaResp.IsSetBackends() { + return nil, xerror.New(xerror.Meta, "get backend meta failed, backend meta not set") + } + + for _, backend := range backendMetaResp.GetBackends() { + backendMeta := &base.Backend{ + Id: backend.GetId(), + Host: backend.GetHost(), + BePort: uint16(backend.GetBePort()), + HttpPort: uint16(backend.GetHttpPort()), + BrpcPort: uint16(backend.GetBrpcPort()), + } + meta.Backends[backendMeta.Id] = backendMeta + } + + // Step 2: get table metas + tableMetaResp, err := feRpc.GetTableMeta(spec, tableIds) + if err != nil { + return nil, err + } + + if tableMetaResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK { + return nil, xerror.Errorf(xerror.Meta, "get table meta failed, status: %s", tableMetaResp.GetStatus()) + } + + if !tableMetaResp.IsSetDbMeta() { + return nil, xerror.New(xerror.Meta, "get table meta failed, db meta not set") + } + + dbMeta := tableMetaResp.GetDbMeta() + for _, table := range dbMeta.GetTables() { + tableMeta := &TableMeta{ + DatabaseMeta: &meta.DatabaseMeta, + Id: table.GetId(), + Name: table.GetName(), + PartitionIdMap: make(map[int64]*PartitionMeta), + PartitionRangeMap: make(map[string]*PartitionMeta), + } + meta.Id = dbMeta.GetId() + meta.Tables[tableMeta.Id] = tableMeta + meta.TableName2IdMap[tableMeta.Name] = tableMeta.Id + + for _, partition := range table.GetPartitions() { + partitionMeta := &PartitionMeta{ + TableMeta: tableMeta, + Id: partition.GetId(), + Name: partition.GetName(), + Range: partition.GetRange(), + VisibleVersion: partition.GetVisibleVersion(), + IndexIdMap: make(map[int64]*IndexMeta), + IndexNameMap: make(map[string]*IndexMeta), + } + tableMeta.PartitionIdMap[partitionMeta.Id] = partitionMeta + tableMeta.PartitionRangeMap[partitionMeta.Range] = partitionMeta + + for _, index := range partition.GetIndexes() { + indexName := index.GetName() + isBaseIndex := indexName == tableMeta.Name // it is accurate, since lock is held + indexMeta := &IndexMeta{ + PartitionMeta: partitionMeta, + Id: index.GetId(), + Name: indexName, + IsBaseIndex: isBaseIndex, + TabletMetas: btree.NewMap[int64, *TabletMeta](degree), + ReplicaMetas: btree.NewMap[int64, *ReplicaMeta](degree), + } + partitionMeta.IndexIdMap[indexMeta.Id] = indexMeta + partitionMeta.IndexNameMap[indexMeta.Name] = indexMeta + if tableMeta.Name == indexMeta.Name { + tableMeta.BaseIndexId = indexMeta.Id + } + + for _, tablet := range index.GetTablets() { + tabletMeta := &TabletMeta{ + IndexMeta: indexMeta, + Id: tablet.GetId(), + ReplicaMetas: btree.NewMap[int64, *ReplicaMeta](degree), + } + indexMeta.TabletMetas.Set(tabletMeta.Id, tabletMeta) + + for _, replica := range tablet.GetReplicas() { + replicaMeta := &ReplicaMeta{ + TabletMeta: tabletMeta, + Id: replica.GetId(), + TabletId: tabletMeta.Id, + BackendId: replica.GetBackendId(), + Version: replica.GetVersion(), + } + tabletMeta.ReplicaMetas.Set(replicaMeta.Id, replicaMeta) + indexMeta.ReplicaMetas.Set(replicaMeta.Id, replicaMeta) + } + } + } + } + } + + droppedPartitions := make(map[int64]struct{}) + for _, partition := range dbMeta.GetDroppedPartitions() { + droppedPartitions[partition] = struct{}{} + } + droppedTables := make(map[int64]struct{}) + for _, table := range dbMeta.GetDroppedTables() { + droppedTables[table] = struct{}{} + } + droppedIndexes := make(map[int64]struct{}) + for _, index := range dbMeta.GetDroppedIndexes() { + droppedIndexes[index] = struct{}{} + } + + return &ThriftMeta{ + meta: meta, + droppedPartitions: droppedPartitions, + droppedTables: droppedTables, + droppedIndexes: droppedIndexes, + }, nil +} + +type ThriftMeta struct { + meta *Meta + droppedPartitions map[int64]struct{} + droppedTables map[int64]struct{} + droppedIndexes map[int64]struct{} +} + +func (tm *ThriftMeta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) { + dbId := tm.meta.Id + + tableMeta, ok := tm.meta.Tables[tableId] + if !ok { + return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId) + } + + partitionMeta, ok := tableMeta.PartitionIdMap[partitionId] + if !ok { + return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d not found", dbId, tableId, partitionId) + } + + indexMeta, ok := partitionMeta.IndexIdMap[indexId] + if !ok { + return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d, indexId: %d not found", dbId, tableId, partitionId, indexId) + } + + return indexMeta.TabletMetas, nil +} + +func (tm *ThriftMeta) GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) { + dbId := tm.meta.Id + + tableMeta, ok := tm.meta.Tables[tableId] + if !ok { + return 0, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId) + } + + partitionMeta, ok := tableMeta.PartitionRangeMap[partitionRange] + if !ok { + return 0, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionRange: %s not found", dbId, tableId, partitionRange) + } + + return partitionMeta.Id, nil +} + +func (tm *ThriftMeta) GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) { + dbId := tm.meta.Id + + tableMeta, ok := tm.meta.Tables[tableId] + if !ok { + return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId) + } + + return tableMeta.PartitionRangeMap, nil +} + +func (tm *ThriftMeta) GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error) { + dbId := tm.meta.Id + + tableMeta, ok := tm.meta.Tables[tableId] + if !ok { + return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId) + } + + partitionMeta, ok := tableMeta.PartitionIdMap[partitionId] + if !ok { + return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d not found", dbId, tableId, partitionId) + } + + return partitionMeta.IndexIdMap, nil +} + +func (tm *ThriftMeta) GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, *IndexMeta, error) { + dbId := tm.meta.Id + + tableMeta, ok := tm.meta.Tables[tableId] + if !ok { + return nil, nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId) + } + + partitionMeta, ok := tableMeta.PartitionIdMap[partitionId] + if !ok { + return nil, nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d not found", dbId, tableId, partitionId) + } + + baseIndex, ok := partitionMeta.IndexNameMap[tableMeta.Name] + if !ok { + return nil, nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d, indexName: %s not found", dbId, tableId, partitionId, tableMeta.Name) + } + + return partitionMeta.IndexNameMap, baseIndex, nil +} + +func (tm *ThriftMeta) GetBackendMap() (map[int64]*base.Backend, error) { + if tm.meta.HostMapping == nil { + return tm.meta.Backends, nil + } + + backends := make(map[int64]*base.Backend) + for id, backend := range tm.meta.Backends { + if host, ok := tm.meta.HostMapping[backend.Host]; ok { + backend.Host = host + } else { + return nil, xerror.Errorf(xerror.Normal, + "the public ip of host %s is not found, consider adding it via HTTP API /update_host_mapping", backend.Host) + } + backends[id] = backend + } + return backends, nil +} + +// Whether the target partition are dropped +func (tm *ThriftMeta) IsPartitionDropped(partitionId int64) bool { + _, ok := tm.droppedPartitions[partitionId] + return ok +} + +// Whether the target table are dropped +func (tm *ThriftMeta) IsTableDropped(tableId int64) bool { + _, ok := tm.droppedTables[tableId] + return ok +} + +// Whether the target index are dropped +func (tm *ThriftMeta) IsIndexDropped(tableId int64) bool { + _, ok := tm.droppedIndexes[tableId] + return ok +} diff --git a/pkg/ccr/utils.go b/pkg/ccr/utils.go index 6b9e919d..51a586c7 100644 --- a/pkg/ccr/utils.go +++ b/pkg/ccr/utils.go @@ -6,14 +6,58 @@ import ( "github.com/selectdb/ccr_syncer/pkg/xerror" ) -func ExtractTableCommitSeqMap(data []byte) (map[int64]int64, error) { - type JobInfo struct { - TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` - } - var jobInfo JobInfo +type BackupViewInfo struct { + Id int64 `json:"id"` + Name string `json:"name"` +} + +type BackupOlapTableInfo struct { + Id int64 `json:"id"` +} + +type NewBackupObject struct { + Views []BackupViewInfo `json:"views"` +} + +type BackupJobInfo struct { + TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` + BackupObjects map[string]BackupOlapTableInfo `json:"backup_objects"` + NewBackupObjects *NewBackupObject `json:"new_backup_objects"` +} +func NewBackupJobInfoFromJson(data []byte) (*BackupJobInfo, error) { + jobInfo := &BackupJobInfo{} if err := json.Unmarshal(data, &jobInfo); err != nil { return nil, xerror.Wrapf(err, xerror.Normal, "unmarshal job info error: %v", err) } - return jobInfo.TableCommitSeqMap, nil + return jobInfo, nil +} + +func (i *BackupJobInfo) TableNameMapping() map[int64]string { + tableMapping := make(map[int64]string) + for tableName, tableInfo := range i.BackupObjects { + tableMapping[tableInfo.Id] = tableName + } + return tableMapping +} + +// Get the table id by table name, return -1 if not found +func (i *BackupJobInfo) TableId(name string) int64 { + if tableInfo, ok := i.BackupObjects[name]; ok { + return tableInfo.Id + } + + return -1 +} + +func (i *BackupJobInfo) Views() []string { + if i.NewBackupObjects == nil { + return []string{} + } + + views := make([]string, 0) + for _, viewInfo := range i.NewBackupObjects.Views { + views = append(views, viewInfo.Name) + } + return views } diff --git a/pkg/rpc/Makefile b/pkg/rpc/Makefile new file mode 100644 index 00000000..4b52d47e --- /dev/null +++ b/pkg/rpc/Makefile @@ -0,0 +1,3 @@ +gen_thrift: + kitex -module github.com/selectdb/ccr_syncer thrift/FrontendService.thrift + kitex -module github.com/selectdb/ccr_syncer thrift/BackendService.thrift diff --git a/pkg/rpc/be.go b/pkg/rpc/be.go index cf8b012a..bec3d244 100644 --- a/pkg/rpc/be.go +++ b/pkg/rpc/be.go @@ -26,7 +26,8 @@ func (beRpc *BeRpc) IngestBinlog(req *bestruct.TIngestBinlogRequest) (*bestruct. client := beRpc.client if result, err := client.IngestBinlog(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "IngestBinlog error: %v", err) + return nil, xerror.Wrapf(err, xerror.Normal, + "IngestBinlog error: %v, txnId: %d, be: %v", err, req.GetTxnId(), beRpc.backend) } else { return result, nil } diff --git a/pkg/rpc/concurrency.go b/pkg/rpc/concurrency.go new file mode 100644 index 00000000..d10bd827 --- /dev/null +++ b/pkg/rpc/concurrency.go @@ -0,0 +1,72 @@ +package rpc + +import ( + "flag" + "sync" +) + +var ( + FlagMaxIngestConcurrencyPerBackend int64 +) + +func init() { + flag.Int64Var(&FlagMaxIngestConcurrencyPerBackend, "max_ingest_concurrency_per_backend", 48, + "The max concurrency of the binlog ingesting per backend") +} + +type ConcurrencyWindow struct { + mu *sync.Mutex + cond *sync.Cond + + id int64 + inflights int64 +} + +func newCongestionWindow(id int64) *ConcurrencyWindow { + mu := &sync.Mutex{} + return &ConcurrencyWindow{ + mu: mu, + cond: sync.NewCond(mu), + id: id, + inflights: 0, + } +} + +func (cw *ConcurrencyWindow) Acquire() { + cw.mu.Lock() + defer cw.mu.Unlock() + + for cw.inflights+1 > FlagMaxIngestConcurrencyPerBackend { + cw.cond.Wait() + } + cw.inflights += 1 +} + +func (cw *ConcurrencyWindow) Release() { + cw.mu.Lock() + defer cw.mu.Unlock() + + if cw.inflights == 0 { + return + } + + cw.inflights -= 1 + cw.cond.Signal() +} + +type ConcurrencyManager struct { + windows sync.Map +} + +func NewConcurrencyManager() *ConcurrencyManager { + return &ConcurrencyManager{} +} + +func (cm *ConcurrencyManager) GetWindow(id int64) *ConcurrencyWindow { + value, ok := cm.windows.Load(id) + if !ok { + window := newCongestionWindow(id) + value, ok = cm.windows.LoadOrStore(id, window) + } + return value.(*ConcurrencyWindow) +} diff --git a/pkg/rpc/fe.go b/pkg/rpc/fe.go index 62c02901..01217118 100644 --- a/pkg/rpc/fe.go +++ b/pkg/rpc/fe.go @@ -2,34 +2,472 @@ package rpc import ( "context" - - "github.com/selectdb/ccr_syncer/pkg/ccr/base" - "github.com/selectdb/ccr_syncer/pkg/xerror" + "errors" + "flag" + "fmt" + "strings" + "sync" + "time" festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice" feservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice/frontendservice" + tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" festruct_types "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" + "github.com/selectdb/ccr_syncer/pkg/utils" + "github.com/selectdb/ccr_syncer/pkg/xerror" + "github.com/cloudwego/kitex/client" + "github.com/cloudwego/kitex/client/callopt" + "github.com/cloudwego/kitex/pkg/kerrors" + "github.com/selectdb/ccr_syncer/pkg/ccr/base" log "github.com/sirupsen/logrus" ) -const ( - LOCAL_REPO_NAME = "" +var ( + localRepoName string + commitTxnTimeout time.Duration + connectTimeout time.Duration + rpcTimeout time.Duration ) +var ErrFeNotMasterCompatible = xerror.NewWithoutStack(xerror.FE, "not master compatible") + +func init() { + flag.StringVar(&localRepoName, "local_repo_name", "", "local_repo_name") + flag.DurationVar(&commitTxnTimeout, "commit_txn_timeout", 33*time.Second, "commmit_txn_timeout") + flag.DurationVar(&connectTimeout, "connect_timeout", 10*time.Second, "connect timeout") + flag.DurationVar(&rpcTimeout, "rpc_timeout", 30*time.Second, "rpc timeout") +} + +// canUseNextAddr means can try next addr, err is a connection error, not a method not found or other error +func canUseNextAddr(err error) bool { + if errors.Is(err, kerrors.ErrNoConnection) { + return true + } + if errors.Is(err, kerrors.ErrNoResolver) { + return true + } + if errors.Is(err, kerrors.ErrNoDestAddress) { + return true + } + if errors.Is(err, kerrors.ErrRemoteOrNetwork) { + return true + } + + errMsg := err.Error() + if strings.Contains(errMsg, "connection has been closed by peer") { + return true + } + if strings.Contains(errMsg, "closed network connection") { + return true + } + if strings.Contains(errMsg, "connection reset by peer") { + return true + } + if strings.Contains(errMsg, "connection reset by peer") { + return true + } + + return false +} + +type RestoreSnapshotRequest struct { + TableRefs []*festruct.TTableRef + SnapshotName string + SnapshotResult *festruct.TGetSnapshotResult_ + AtomicRestore bool + CleanPartitions bool + CleanTables bool + Compress bool +} + type IFeRpc interface { BeginTransaction(*base.Spec, string, []int64) (*festruct.TBeginTxnResult_, error) + BeginTransactionForTxnInsert(*base.Spec, string, []int64, int64) (*festruct.TBeginTxnResult_, error) CommitTransaction(*base.Spec, int64, []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) + CommitTransactionForTxnInsert(*base.Spec, int64, bool, []*festruct.TSubTxnInfo) (*festruct.TCommitTxnResult_, error) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) GetBinlog(*base.Spec, int64) (*festruct.TGetBinlogResult_, error) GetBinlogLag(*base.Spec, int64) (*festruct.TGetBinlogLagResult_, error) - GetSnapshot(*base.Spec, string) (*festruct.TGetSnapshotResult_, error) - RestoreSnapshot(*base.Spec, []*festruct.TTableRef, string, *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error) - GetMasterToken(*base.Spec) (string, error) + GetSnapshot(*base.Spec, string, bool) (*festruct.TGetSnapshotResult_, error) + RestoreSnapshot(*base.Spec, *RestoreSnapshotRequest) (*festruct.TRestoreSnapshotResult_, error) + GetMasterToken(*base.Spec) (*festruct.TGetMasterTokenResult_, error) + GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error) + GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error) + GetBackends(spec *base.Spec) (*festruct.TGetBackendMetaResult_, error) + + Address() string } type FeRpc struct { - client feservice.Client + spec *base.Spec + masterClient IFeRpc + clients map[string]IFeRpc + cachedFeAddrs map[string]bool + lock sync.RWMutex // for get client +} + +func NewFeRpc(spec *base.Spec) (*FeRpc, error) { + addr := fmt.Sprintf("%s:%s", spec.Host, spec.ThriftPort) + client, err := newSingleFeClient(addr) + if err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "NewFeClient error: %v", err) + } + + clients := make(map[string]IFeRpc) + clients[client.Address()] = client + cachedFeAddrs := make(map[string]bool) + for _, fe := range spec.Frontends { + addr := fmt.Sprintf("%s:%s", fe.Host, fe.ThriftPort) + + if _, ok := cachedFeAddrs[addr]; ok { + continue + } + + // for cached all spec clients + if client, err := newSingleFeClient(addr); err != nil { + log.Warnf("new fe client error: %+v", err) + } else { + clients[client.Address()] = client + } + cachedFeAddrs[addr] = true + } + + return &FeRpc{ + spec: spec, + masterClient: client, + clients: clients, + cachedFeAddrs: cachedFeAddrs, + }, nil +} + +// get all fe addrs +// "[masterAddr],otherCachedFeAddrs" => "[127.0.0.1:1000],127.0.1:1001,127.0.1:1002" +func (rpc *FeRpc) Address() string { + cachedFeAddrs := rpc.getCacheFeAddrs() + masterClient := rpc.getMasterClient() + + var addrBuilder strings.Builder + addrBuilder.WriteString(fmt.Sprintf("[%s]", masterClient.Address())) + delete(cachedFeAddrs, masterClient.Address()) + for addr := range cachedFeAddrs { + addrBuilder.WriteString(",") + addrBuilder.WriteString(addr) + } + return addrBuilder.String() +} + +type resultType interface { + GetStatus() *tstatus.TStatus + IsSetMasterAddress() bool + GetMasterAddress() *festruct_types.TNetworkAddress +} +type callerType func(client IFeRpc) (resultType, error) + +func (rpc *FeRpc) getMasterClient() IFeRpc { + rpc.lock.RLock() + defer rpc.lock.RUnlock() + + return rpc.masterClient +} + +func (rpc *FeRpc) updateMasterClient(masterClient IFeRpc) { + rpc.lock.Lock() + defer rpc.lock.Unlock() + + rpc.clients[masterClient.Address()] = masterClient + rpc.masterClient = masterClient +} + +func (rpc *FeRpc) getClient(addr string) (IFeRpc, bool) { + rpc.lock.RLock() + defer rpc.lock.RUnlock() + + client, ok := rpc.clients[addr] + return client, ok +} + +func (rpc *FeRpc) addClient(client IFeRpc) { + rpc.lock.Lock() + defer rpc.lock.Unlock() + + rpc.clients[client.Address()] = client +} + +func (rpc *FeRpc) getClients() map[string]IFeRpc { + rpc.lock.RLock() + defer rpc.lock.RUnlock() + + return utils.CopyMap(rpc.clients) +} + +func (rpc *FeRpc) getCacheFeAddrs() map[string]bool { + rpc.lock.RLock() + defer rpc.lock.RUnlock() + + return utils.CopyMap(rpc.cachedFeAddrs) +} + +type retryWithMasterRedirectAndCachedClientsRpc struct { + rpc *FeRpc + caller callerType + notriedClients map[string]IFeRpc +} + +type call0Result struct { + canUseNextAddr bool + resp resultType + err error + masterAddr string +} + +func (r *retryWithMasterRedirectAndCachedClientsRpc) call0(masterClient IFeRpc) *call0Result { + caller := r.caller + resp, err := caller(masterClient) + log.Tracef("call resp: %.128v, error: %+v", resp, err) + + // Step 1: check error + if err != nil { + if !canUseNextAddr(err) { + return &call0Result{ + canUseNextAddr: false, + err: xerror.Wrap(err, xerror.FE, "thrift error"), + } + } else { + log.Warnf("call error: %+v, try next addr", err) + return &call0Result{ + canUseNextAddr: true, + err: xerror.Wrap(err, xerror.FE, "thrift error"), + } + } + } + + // Step 2: check need redirect + if resp.GetStatus().GetStatusCode() != tstatus.TStatusCode_NOT_MASTER { + return &call0Result{ + canUseNextAddr: false, + resp: resp, + err: nil, + } + } + + // no compatible for master + if !resp.IsSetMasterAddress() { + err = xerror.XPanicWrapf(ErrFeNotMasterCompatible, "fe addr [%s]", masterClient.Address()) + return &call0Result{ + canUseNextAddr: true, + err: err, // not nil + } + } + + // switch to master + masterAddr := resp.GetMasterAddress() + err = xerror.Errorf(xerror.FE, "addr [%s] is not master", masterAddr) + + // convert private ip to public ip, if need + hostname := masterAddr.Hostname + if r.rpc.spec.HostMapping != nil { + if host, ok := r.rpc.spec.HostMapping[hostname]; ok { + hostname = host + } else { + return &call0Result{ + canUseNextAddr: true, + err: xerror.Errorf(xerror.Normal, + "the public ip of %s is not found, consider adding it via HTTP API /update_host_mapping", hostname), + } + } + } + + return &call0Result{ + canUseNextAddr: true, + resp: resp, + masterAddr: fmt.Sprintf("%s:%d", hostname, masterAddr.Port), + err: err, // not nil + } +} + +func (r *retryWithMasterRedirectAndCachedClientsRpc) call() (resultType, error) { + rpc := r.rpc + masterClient := rpc.masterClient + + // Step 1: try master + result := r.call0(masterClient) + log.Tracef("call0 result: %+v", result) + if result.err == nil { + return result.resp, nil + } + + // Step 2: check error, if can't use next addr, return error + // canUseNextAddr means can try next addr, contains ErrNoConnection, ErrNoResolver, ErrNoDestAddress => (feredirect && use next cached addr) + if !result.canUseNextAddr { + return nil, result.err + } + + // Step 3: if set master addr, redirect to master + // redirect to master + if result.masterAddr != "" { + masterAddr := result.masterAddr + log.Infof("switch to master %s", masterAddr) + + var err error + client, ok := rpc.getClient(masterAddr) + if ok { + masterClient = client + } else { + masterClient, err = newSingleFeClient(masterAddr) + if err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "NewFeClient [%s] error: %v", masterAddr, err) + } + } + rpc.updateMasterClient(masterClient) + return r.call() + } + + // Step 4: try all cached fe clients + if r.notriedClients == nil { + r.notriedClients = rpc.getClients() + } + delete(r.notriedClients, masterClient.Address()) + if len(r.notriedClients) == 0 { + return nil, result.err + } + // get first notried client + var client IFeRpc + for _, client = range r.notriedClients { + break + } + // because call0 failed, so original masterClient is not master now, set client as masterClient for retry + rpc.updateMasterClient(client) + return r.call() +} + +func (rpc *FeRpc) callWithMasterRedirect(caller callerType) (resultType, error) { + r := &retryWithMasterRedirectAndCachedClientsRpc{ + rpc: rpc, + caller: caller, + } + return r.call() +} + +func convertResult[T any](result any, err error) (*T, error) { + if result == nil { + return nil, err + } + + return result.(*T), err +} + +func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int64) (*festruct.TBeginTxnResult_, error) { + // return rpc.masterClient.BeginTransaction(spec, label, tableIds) + caller := func(client IFeRpc) (resultType, error) { + return client.BeginTransaction(spec, label, tableIds) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TBeginTxnResult_](result, err) +} + +func (rpc *FeRpc) BeginTransactionForTxnInsert(spec *base.Spec, label string, tableIds []int64, stidNum int64) (*festruct.TBeginTxnResult_, error) { + // return rpc.masterClient.BeginTransactionForTxnInsert(spec, label, tableIds, stidNum) + caller := func(client IFeRpc) (resultType, error) { + return client.BeginTransactionForTxnInsert(spec, label, tableIds, stidNum) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TBeginTxnResult_](result, err) +} + +func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) { + // return rpc.masterClient.CommitTransaction(spec, txnId, commitInfos) + caller := func(client IFeRpc) (resultType, error) { + return client.CommitTransaction(spec, txnId, commitInfos) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TCommitTxnResult_](result, err) +} + +func (rpc *FeRpc) CommitTransactionForTxnInsert(spec *base.Spec, txnId int64, isTxnInsert bool, subTxnInfos []*festruct.TSubTxnInfo) (*festruct.TCommitTxnResult_, error) { + // return rpc.masterClient.CommitTransactionForTxnInsert(spec, txnId, commitInfos, subTxnInfos) + caller := func(client IFeRpc) (resultType, error) { + return client.CommitTransactionForTxnInsert(spec, txnId, isTxnInsert, subTxnInfos) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TCommitTxnResult_](result, err) +} + +func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) { + // return rpc.masterClient.RollbackTransaction(spec, txnId) + caller := func(client IFeRpc) (resultType, error) { + return client.RollbackTransaction(spec, txnId) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TRollbackTxnResult_](result, err) +} + +func (rpc *FeRpc) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogResult_, error) { + // return rpc.masterClient.GetBinlog(spec, commitSeq) + caller := func(client IFeRpc) (resultType, error) { + return client.GetBinlog(spec, commitSeq) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetBinlogResult_](result, err) +} + +func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogLagResult_, error) { + // return rpc.masterClient.GetBinlogLag(spec, commitSeq) + caller := func(client IFeRpc) (resultType, error) { + return client.GetBinlogLag(spec, commitSeq) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetBinlogLagResult_](result, err) +} + +func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string, compress bool) (*festruct.TGetSnapshotResult_, error) { + // return rpc.masterClient.GetSnapshot(spec, labelName) + caller := func(client IFeRpc) (resultType, error) { + return client.GetSnapshot(spec, labelName, compress) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetSnapshotResult_](result, err) +} + +func (rpc *FeRpc) RestoreSnapshot(spec *base.Spec, req *RestoreSnapshotRequest) (*festruct.TRestoreSnapshotResult_, error) { + caller := func(client IFeRpc) (resultType, error) { + return client.RestoreSnapshot(spec, req) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TRestoreSnapshotResult_](result, err) +} + +func (rpc *FeRpc) GetMasterToken(spec *base.Spec) (*festruct.TGetMasterTokenResult_, error) { + // return rpc.masterClient.GetMasterToken(spec) + caller := func(client IFeRpc) (resultType, error) { + return client.GetMasterToken(spec) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetMasterTokenResult_](result, err) +} + +func (rpc *FeRpc) GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error) { + caller := func(client IFeRpc) (resultType, error) { + return client.GetDbMeta(spec) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetMetaResult_](result, err) +} + +func (rpc *FeRpc) GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error) { + caller := func(client IFeRpc) (resultType, error) { + return client.GetTableMeta(spec, tableIds) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetMetaResult_](result, err) +} + +func (rpc *FeRpc) GetBackends(spec *base.Spec) (*festruct.TGetBackendMetaResult_, error) { + caller := func(client IFeRpc) (resultType, error) { + return client.GetBackends(spec) + } + result, err := rpc.callWithMasterRedirect(caller) + return convertResult[festruct.TGetBackendMetaResult_](result, err) } type Request interface { @@ -46,6 +484,27 @@ func setAuthInfo[T Request](request T, spec *base.Spec) { request.SetDb(&spec.Database) } +type singleFeClient struct { + addr string + client feservice.Client +} + +func newSingleFeClient(addr string) (*singleFeClient, error) { + // create kitex FrontendService client + if fe_client, err := feservice.NewClient("FrontendService", client.WithHostPorts(addr), client.WithConnectTimeout(connectTimeout), client.WithRPCTimeout(rpcTimeout)); err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "NewFeClient error: %v, addr: %s", err, addr) + } else { + return &singleFeClient{ + addr: addr, + client: fe_client, + }, nil + } +} + +func (rpc *singleFeClient) Address() string { + return rpc.addr +} + // begin transaction // // struct TBeginTxnRequest { @@ -62,8 +521,8 @@ func setAuthInfo[T Request](request T, spec *base.Spec) { // 10: optional Types.TUniqueId request_id // 11: optional string token // } -func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int64) (*festruct.TBeginTxnResult_, error) { - log.Debugf("BeginTransaction spec: %s, label: %s, tableIds: %v", spec, label, tableIds) +func (rpc *singleFeClient) BeginTransaction(spec *base.Spec, label string, tableIds []int64) (*festruct.TBeginTxnResult_, error) { + log.Debugf("Call BeginTransaction, addr: %s, spec: %s, label: %s, tableIds: %v", rpc.Address(), spec, label, tableIds) client := rpc.client req := &festruct.TBeginTxnRequest{ @@ -74,7 +533,26 @@ func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int log.Debugf("BeginTransaction user %s, label: %s, tableIds: %v", req.GetUser(), label, tableIds) if result, err := client.BeginTxn(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "BeginTransaction error: %v, req: %+v", err, req) + return nil, xerror.Wrapf(err, xerror.RPC, "BeginTransaction error: %v, req: %+v", err, req) + } else { + return result, nil + } +} + +func (rpc *singleFeClient) BeginTransactionForTxnInsert(spec *base.Spec, label string, tableIds []int64, stidNum int64) (*festruct.TBeginTxnResult_, error) { + log.Debugf("Call BeginTransactionForTxnInsert, addr: %s, spec: %s, label: %s, tableIds: %v", rpc.Address(), spec, label, tableIds) + + client := rpc.client + req := &festruct.TBeginTxnRequest{ + Label: &label, + } + setAuthInfo(req, spec) + req.TableIds = tableIds + req.SubTxnNum = stidNum + + log.Debugf("BeginTransactionForTxnInsert user %s, label: %s, tableIds: %v", req.GetUser(), label, tableIds) + if result, err := client.BeginTxn(context.Background(), req); err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "BeginTransactionForTxnInsert error: %v, req: %+v", err, req) } else { return result, nil } @@ -94,8 +572,8 @@ func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int // 11: optional string token // 12: optional i64 db_id // } -func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) { - log.Debugf("CommitTransaction spec: %s, txnId: %d, commitInfos: %v", spec, txnId, commitInfos) +func (rpc *singleFeClient) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) { + log.Debugf("Call CommitTransaction, addr: %s spec: %s, txnId: %d, commitInfos: %v", rpc.Address(), spec, txnId, commitInfos) client := rpc.client req := &festruct.TCommitTxnRequest{} @@ -103,8 +581,25 @@ func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos [] req.TxnId = &txnId req.CommitInfos = commitInfos - if result, err := client.CommitTxn(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "CommitTransaction error: %v, req: %+v", err, req) + if result, err := client.CommitTxn(context.Background(), req, callopt.WithRPCTimeout(commitTxnTimeout)); err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "CommitTransaction error: %v, req: %+v", err, req) + } else { + return result, nil + } +} + +func (rpc *singleFeClient) CommitTransactionForTxnInsert(spec *base.Spec, txnId int64, isTxnInsert bool, subTxnInfos []*festruct.TSubTxnInfo) (*festruct.TCommitTxnResult_, error) { + log.Debugf("Call CommitTransactionForTxnInsert, addr: %s spec: %s, txnId: %d, subTxnInfos: %v", rpc.Address(), spec, txnId, subTxnInfos) + + client := rpc.client + req := &festruct.TCommitTxnRequest{} + setAuthInfo(req, spec) + req.TxnId = &txnId + req.TxnInsert = &isTxnInsert + req.SubTxnInfos = subTxnInfos + + if result, err := client.CommitTxn(context.Background(), req, callopt.WithRPCTimeout(commitTxnTimeout)); err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "CommitTransactionForTxnInsert error: %v, req: %+v", err, req) } else { return result, nil } @@ -123,8 +618,8 @@ func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos [] // 11: optional string token // 12: optional i64 db_id // } -func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) { - log.Debugf("RollbackTransaction spec: %s, txnId: %d", spec, txnId) +func (rpc *singleFeClient) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) { + log.Debugf("Call RollbackTransaction, addr: %s, spec: %s, txnId: %d", rpc.Address(), spec, txnId) client := rpc.client req := &festruct.TRollbackTxnRequest{} @@ -132,7 +627,7 @@ func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.T req.TxnId = &txnId if result, err := client.RollbackTxn(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "RollbackTransaction error: %v, req: %+v", err, req) + return nil, xerror.Wrapf(err, xerror.RPC, "RollbackTransaction error: %v, req: %+v", err, req) } else { return result, nil } @@ -148,8 +643,8 @@ func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.T // 7: optional string token // 8: required i64 prev_commit_seq // } -func (rpc *FeRpc) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogResult_, error) { - log.Debugf("GetBinlog, spec: %s, commit seq: %d", spec, commitSeq) +func (rpc *singleFeClient) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogResult_, error) { + log.Debugf("Call GetBinlog, addr: %s, spec: %s, commit seq: %d", rpc.Address(), spec, commitSeq) client := rpc.client req := &festruct.TGetBinlogRequest{ @@ -167,14 +662,14 @@ func (rpc *FeRpc) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBin log.Debugf("GetBinlog user %s, db %s, tableId %d, prev seq: %d", req.GetUser(), req.GetDb(), req.GetTableId(), req.GetPrevCommitSeq()) if resp, err := client.GetBinlog(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "GetBinlog error: %v, req: %+v", err, req) + return nil, xerror.Wrapf(err, xerror.RPC, "GetBinlog error: %v, req: %+v", err, req) } else { return resp, nil } } -func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogLagResult_, error) { - log.Debugf("GetBinlogLag, spec: %s, commit seq: %d", spec, commitSeq) +func (rpc *singleFeClient) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogLagResult_, error) { + log.Debugf("Call GetBinlogLag, addr: %s, spec: %s, commit seq: %d", rpc.Address(), spec, commitSeq) client := rpc.client req := &festruct.TGetBinlogRequest{ @@ -193,7 +688,7 @@ func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGet log.Debugf("GetBinlog user %s, db %s, tableId %d, prev seq: %d", req.GetUser(), req.GetDb(), req.GetTableId(), req.GetPrevCommitSeq()) if resp, err := client.GetBinlogLag(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "GetBinlogLag error: %v, req: %+v", err, req) + return nil, xerror.Wrapf(err, xerror.RPC, "GetBinlogLag error: %v, req: %+v", err, req) } else { return resp, nil } @@ -209,25 +704,27 @@ func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGet // 7: optional string label_name // 8: optional string snapshot_name // 9: optional TSnapshotType snapshot_type +// 10: optional bool enable_compress // } -func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string) (*festruct.TGetSnapshotResult_, error) { - log.Debugf("GetSnapshot %s, spec: %s", labelName, spec) +func (rpc *singleFeClient) GetSnapshot(spec *base.Spec, labelName string, compress bool) (*festruct.TGetSnapshotResult_, error) { + log.Debugf("Call GetSnapshot, addr: %s, spec: %s, label: %s", rpc.Address(), spec, labelName) client := rpc.client snapshotType := festruct.TSnapshotType_LOCAL snapshotName := "" req := &festruct.TGetSnapshotRequest{ - Table: &spec.Table, - LabelName: &labelName, - SnapshotType: &snapshotType, - SnapshotName: &snapshotName, + Table: &spec.Table, + LabelName: &labelName, + SnapshotType: &snapshotType, + SnapshotName: &snapshotName, + EnableCompress: &compress, } setAuthInfo(req, spec) - log.Debugf("GetSnapshotRequest user %s, db %s, table %s, label name %s, snapshot name %s, snapshot type %d", - req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), req.GetSnapshotName(), req.GetSnapshotType()) + log.Debugf("GetSnapshotRequest user %s, db %s, table %s, label name %s, snapshot name %s, snapshot type %d, enable compress %t", + req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), req.GetSnapshotName(), req.GetSnapshotType(), req.GetEnableCompress()) if resp, err := client.GetSnapshot(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "GetSnapshot error: %v, req: %+v", err, req) + return nil, xerror.Wrapf(err, xerror.RPC, "GetSnapshot error: %v, req: %+v", err, req) } else { return resp, nil } @@ -246,39 +743,67 @@ func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string) (*festruct.TGet // 10: optional map properties // 11: optional binary meta // 12: optional binary job_info +// 13: optional bool clean_tables +// 14: optional bool clean_partitions +// 15: optional bool atomic_restore +// 16: optional bool compressed // } // // Restore Snapshot rpc -func (rpc *FeRpc) RestoreSnapshot(spec *base.Spec, tableRefs []*festruct.TTableRef, label string, snapshotResult *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error) { - log.Debugf("RestoreSnapshot, spec: %s, snapshot result: %+v", spec, snapshotResult) +func (rpc *singleFeClient) RestoreSnapshot(spec *base.Spec, restoreReq *RestoreSnapshotRequest) (*festruct.TRestoreSnapshotResult_, error) { + // NOTE: ignore meta, because it's too large + log.Debugf("Call RestoreSnapshot, addr: %s, spec: %s", rpc.Address(), spec) client := rpc.client repoName := "__keep_on_local__" properties := make(map[string]string) properties["reserve_replica"] = "true" - // log.Infof("meta: %v", string(snapshotResult.GetMeta())) + + // Support compressed snapshot + meta := restoreReq.SnapshotResult.GetMeta() + jobInfo := restoreReq.SnapshotResult.GetJobInfo() + if restoreReq.Compress { + var err error + meta, err = utils.GZIPCompress(meta) + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "gzip compress snapshot meta error: %v", err) + } + jobInfo, err = utils.GZIPCompress(jobInfo) + if err != nil { + return nil, xerror.Wrapf(err, xerror.Normal, "gzip compress snapshot job info error: %v", err) + } + } + req := &festruct.TRestoreSnapshotRequest{ - Table: &spec.Table, - LabelName: &label, // TODO: check remove - RepoName: &repoName, // TODO: check remove - TableRefs: tableRefs, - Properties: properties, - Meta: snapshotResult.GetMeta(), - JobInfo: snapshotResult.GetJobInfo(), + Table: &spec.Table, + LabelName: &restoreReq.SnapshotName, + RepoName: &repoName, + TableRefs: restoreReq.TableRefs, + Properties: properties, + Meta: meta, + JobInfo: jobInfo, + CleanTables: &restoreReq.CleanTables, + CleanPartitions: &restoreReq.CleanPartitions, + AtomicRestore: &restoreReq.AtomicRestore, + Compressed: utils.ThriftValueWrapper(restoreReq.Compress), } setAuthInfo(req, spec) - log.Debugf("RestoreSnapshotRequest user %s, db %s, table %s, label name %s, properties %v, meta %v, job info %v", - req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), properties, snapshotResult.GetMeta(), snapshotResult.GetJobInfo()) + // NOTE: ignore meta, because it's too large + log.Debugf("RestoreSnapshotRequest user %s, db %s, table %s, label name %s, properties %v, clean tables: %t, clean partitions: %t, atomic restore: %t, compressed: %t", + req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), properties, + restoreReq.CleanTables, restoreReq.CleanPartitions, restoreReq.AtomicRestore, + req.GetCompressed()) + if resp, err := client.RestoreSnapshot(context.Background(), req); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "RestoreSnapshot failed, req: %+v", req) + return nil, xerror.Wrapf(err, xerror.RPC, "RestoreSnapshot failed") } else { return resp, nil } } -func (rpc *FeRpc) GetMasterToken(spec *base.Spec) (string, error) { - log.Debugf("GetMasterToken, spec: %s", spec) +func (rpc *singleFeClient) GetMasterToken(spec *base.Spec) (*festruct.TGetMasterTokenResult_, error) { + log.Debugf("Call GetMasterToken, addr: %s, spec: %s", rpc.Address(), spec) client := rpc.client req := &festruct.TGetMasterTokenRequest{ @@ -289,8 +814,65 @@ func (rpc *FeRpc) GetMasterToken(spec *base.Spec) (string, error) { log.Debugf("GetMasterToken user: %s", *req.User) if resp, err := client.GetMasterToken(context.Background(), req); err != nil { - return "", xerror.Wrapf(err, xerror.Normal, "GetMasterToken failed, req: %+v", req) + return nil, xerror.Wrapf(err, xerror.RPC, "GetMasterToken failed, req: %+v", req) } else { - return resp.GetToken(), nil + return resp, nil + } +} + +func (rpc *singleFeClient) getMeta(spec *base.Spec, reqTables []*festruct.TGetMetaTable) (*festruct.TGetMetaResult_, error) { + client := rpc.client + + reqDb := festruct.NewTGetMetaDB() // festruct.NewTGetMetaTable() + reqDb.Id = &spec.DbId + reqDb.SetTables(reqTables) + + req := &festruct.TGetMetaRequest{ + User: &spec.User, + Passwd: &spec.Password, + Db: reqDb, + } + + if resp, err := client.GetMeta(context.Background(), req); err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "GetMeta failed, req: %+v", req) + } else { + return resp, nil + } +} + +func (rpc *singleFeClient) GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error) { + log.Debugf("GetMetaDb, addr: %s, spec: %s", rpc.Address(), spec) + + return rpc.getMeta(spec, nil) +} + +func (rpc *singleFeClient) GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error) { + log.Debugf("GetMetaTable, addr: %s, tableIds: %v", rpc.Address(), tableIds) + + reqTables := make([]*festruct.TGetMetaTable, 0, len(tableIds)) + for _, tableId := range tableIds { + tableId := tableId + reqTable := festruct.NewTGetMetaTable() + reqTable.Id = &tableId + reqTables = append(reqTables, reqTable) + } + + return rpc.getMeta(spec, reqTables) +} + +func (rpc *singleFeClient) GetBackends(spec *base.Spec) (*festruct.TGetBackendMetaResult_, error) { + log.Debugf("GetBackends, addr: %s, spec: %s", rpc.Address(), spec) + + client := rpc.client + req := &festruct.TGetBackendMetaRequest{ + Cluster: &spec.Cluster, + User: &spec.User, + Passwd: &spec.Password, + } + + if resp, err := client.GetBackendMeta(context.Background(), req); err != nil { + return nil, xerror.Wrapf(err, xerror.RPC, "GetBackendMeta failed, req: %+v", req) + } else { + return resp, nil } } diff --git a/pkg/rpc/kitex_gen/agentservice/AgentService.go b/pkg/rpc/kitex_gen/agentservice/AgentService.go index 2dfc8dc2..884aea93 100644 --- a/pkg/rpc/kitex_gen/agentservice/AgentService.go +++ b/pkg/rpc/kitex_gen/agentservice/AgentService.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package agentservice @@ -105,6 +105,78 @@ func (p *TTabletType) Value() (driver.Value, error) { return int64(*p), nil } +type TObjStorageType int64 + +const ( + TObjStorageType_UNKNOWN TObjStorageType = 0 + TObjStorageType_AWS TObjStorageType = 1 + TObjStorageType_AZURE TObjStorageType = 2 + TObjStorageType_BOS TObjStorageType = 3 + TObjStorageType_COS TObjStorageType = 4 + TObjStorageType_OBS TObjStorageType = 5 + TObjStorageType_OSS TObjStorageType = 6 + TObjStorageType_GCP TObjStorageType = 7 +) + +func (p TObjStorageType) String() string { + switch p { + case TObjStorageType_UNKNOWN: + return "UNKNOWN" + case TObjStorageType_AWS: + return "AWS" + case TObjStorageType_AZURE: + return "AZURE" + case TObjStorageType_BOS: + return "BOS" + case TObjStorageType_COS: + return "COS" + case TObjStorageType_OBS: + return "OBS" + case TObjStorageType_OSS: + return "OSS" + case TObjStorageType_GCP: + return "GCP" + } + return "" +} + +func TObjStorageTypeFromString(s string) (TObjStorageType, error) { + switch s { + case "UNKNOWN": + return TObjStorageType_UNKNOWN, nil + case "AWS": + return TObjStorageType_AWS, nil + case "AZURE": + return TObjStorageType_AZURE, nil + case "BOS": + return TObjStorageType_BOS, nil + case "COS": + return TObjStorageType_COS, nil + case "OBS": + return TObjStorageType_OBS, nil + case "OSS": + return TObjStorageType_OSS, nil + case "GCP": + return TObjStorageType_GCP, nil + } + return TObjStorageType(0), fmt.Errorf("not a valid TObjStorageType string") +} + +func TObjStorageTypePtr(v TObjStorageType) *TObjStorageType { return &v } +func (p *TObjStorageType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TObjStorageType(result.Int64) + return +} + +func (p *TObjStorageType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TCompressionType int64 const ( @@ -182,6 +254,55 @@ func (p *TCompressionType) Value() (driver.Value, error) { return int64(*p), nil } +type TInvertedIndexStorageFormat int64 + +const ( + TInvertedIndexStorageFormat_DEFAULT TInvertedIndexStorageFormat = 0 + TInvertedIndexStorageFormat_V1 TInvertedIndexStorageFormat = 1 + TInvertedIndexStorageFormat_V2 TInvertedIndexStorageFormat = 2 +) + +func (p TInvertedIndexStorageFormat) String() string { + switch p { + case TInvertedIndexStorageFormat_DEFAULT: + return "DEFAULT" + case TInvertedIndexStorageFormat_V1: + return "V1" + case TInvertedIndexStorageFormat_V2: + return "V2" + } + return "" +} + +func TInvertedIndexStorageFormatFromString(s string) (TInvertedIndexStorageFormat, error) { + switch s { + case "DEFAULT": + return TInvertedIndexStorageFormat_DEFAULT, nil + case "V1": + return TInvertedIndexStorageFormat_V1, nil + case "V2": + return TInvertedIndexStorageFormat_V2, nil + } + return TInvertedIndexStorageFormat(0), fmt.Errorf("not a valid TInvertedIndexStorageFormat string") +} + +func TInvertedIndexStorageFormatPtr(v TInvertedIndexStorageFormat) *TInvertedIndexStorageFormat { + return &v +} +func (p *TInvertedIndexStorageFormat) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TInvertedIndexStorageFormat(result.Int64) + return +} + +func (p *TInvertedIndexStorageFormat) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TAlterTabletType int64 const ( @@ -369,6 +490,11 @@ type TTabletSchema struct { StoreRowColumn bool `thrift:"store_row_column,16,optional" frugal:"16,optional,bool" json:"store_row_column,omitempty"` EnableSingleReplicaCompaction bool `thrift:"enable_single_replica_compaction,17,optional" frugal:"17,optional,bool" json:"enable_single_replica_compaction,omitempty"` SkipWriteIndexOnLoad bool `thrift:"skip_write_index_on_load,18,optional" frugal:"18,optional,bool" json:"skip_write_index_on_load,omitempty"` + ClusterKeyIdxes []int32 `thrift:"cluster_key_idxes,19,optional" frugal:"19,optional,list" json:"cluster_key_idxes,omitempty"` + RowStoreColCids []int32 `thrift:"row_store_col_cids,20,optional" frugal:"20,optional,list" json:"row_store_col_cids,omitempty"` + RowStorePageSize int64 `thrift:"row_store_page_size,21,optional" frugal:"21,optional,i64" json:"row_store_page_size,omitempty"` + VariantEnableFlattenNested bool `thrift:"variant_enable_flatten_nested,22,optional" frugal:"22,optional,bool" json:"variant_enable_flatten_nested,omitempty"` + StoragePageSize int64 `thrift:"storage_page_size,23,optional" frugal:"23,optional,i64" json:"storage_page_size,omitempty"` } func NewTTabletSchema() *TTabletSchema { @@ -381,20 +507,23 @@ func NewTTabletSchema() *TTabletSchema { StoreRowColumn: false, EnableSingleReplicaCompaction: false, SkipWriteIndexOnLoad: false, + RowStorePageSize: 16384, + VariantEnableFlattenNested: false, + StoragePageSize: 65536, } } func (p *TTabletSchema) InitDefault() { - *p = TTabletSchema{ - - DeleteSignIdx: -1, - SequenceColIdx: -1, - VersionColIdx: -1, - IsDynamicSchema: false, - StoreRowColumn: false, - EnableSingleReplicaCompaction: false, - SkipWriteIndexOnLoad: false, - } + p.DeleteSignIdx = -1 + p.SequenceColIdx = -1 + p.VersionColIdx = -1 + p.IsDynamicSchema = false + p.StoreRowColumn = false + p.EnableSingleReplicaCompaction = false + p.SkipWriteIndexOnLoad = false + p.RowStorePageSize = 16384 + p.VariantEnableFlattenNested = false + p.StoragePageSize = 65536 } func (p *TTabletSchema) GetShortKeyColumnCount() (v int16) { @@ -533,6 +662,51 @@ func (p *TTabletSchema) GetSkipWriteIndexOnLoad() (v bool) { } return p.SkipWriteIndexOnLoad } + +var TTabletSchema_ClusterKeyIdxes_DEFAULT []int32 + +func (p *TTabletSchema) GetClusterKeyIdxes() (v []int32) { + if !p.IsSetClusterKeyIdxes() { + return TTabletSchema_ClusterKeyIdxes_DEFAULT + } + return p.ClusterKeyIdxes +} + +var TTabletSchema_RowStoreColCids_DEFAULT []int32 + +func (p *TTabletSchema) GetRowStoreColCids() (v []int32) { + if !p.IsSetRowStoreColCids() { + return TTabletSchema_RowStoreColCids_DEFAULT + } + return p.RowStoreColCids +} + +var TTabletSchema_RowStorePageSize_DEFAULT int64 = 16384 + +func (p *TTabletSchema) GetRowStorePageSize() (v int64) { + if !p.IsSetRowStorePageSize() { + return TTabletSchema_RowStorePageSize_DEFAULT + } + return p.RowStorePageSize +} + +var TTabletSchema_VariantEnableFlattenNested_DEFAULT bool = false + +func (p *TTabletSchema) GetVariantEnableFlattenNested() (v bool) { + if !p.IsSetVariantEnableFlattenNested() { + return TTabletSchema_VariantEnableFlattenNested_DEFAULT + } + return p.VariantEnableFlattenNested +} + +var TTabletSchema_StoragePageSize_DEFAULT int64 = 65536 + +func (p *TTabletSchema) GetStoragePageSize() (v int64) { + if !p.IsSetStoragePageSize() { + return TTabletSchema_StoragePageSize_DEFAULT + } + return p.StoragePageSize +} func (p *TTabletSchema) SetShortKeyColumnCount(val int16) { p.ShortKeyColumnCount = val } @@ -587,6 +761,21 @@ func (p *TTabletSchema) SetEnableSingleReplicaCompaction(val bool) { func (p *TTabletSchema) SetSkipWriteIndexOnLoad(val bool) { p.SkipWriteIndexOnLoad = val } +func (p *TTabletSchema) SetClusterKeyIdxes(val []int32) { + p.ClusterKeyIdxes = val +} +func (p *TTabletSchema) SetRowStoreColCids(val []int32) { + p.RowStoreColCids = val +} +func (p *TTabletSchema) SetRowStorePageSize(val int64) { + p.RowStorePageSize = val +} +func (p *TTabletSchema) SetVariantEnableFlattenNested(val bool) { + p.VariantEnableFlattenNested = val +} +func (p *TTabletSchema) SetStoragePageSize(val int64) { + p.StoragePageSize = val +} var fieldIDToName_TTabletSchema = map[int16]string{ 1: "short_key_column_count", @@ -607,6 +796,11 @@ var fieldIDToName_TTabletSchema = map[int16]string{ 16: "store_row_column", 17: "enable_single_replica_compaction", 18: "skip_write_index_on_load", + 19: "cluster_key_idxes", + 20: "row_store_col_cids", + 21: "row_store_page_size", + 22: "variant_enable_flatten_nested", + 23: "storage_page_size", } func (p *TTabletSchema) IsSetBloomFilterFpp() bool { @@ -661,6 +855,26 @@ func (p *TTabletSchema) IsSetSkipWriteIndexOnLoad() bool { return p.SkipWriteIndexOnLoad != TTabletSchema_SkipWriteIndexOnLoad_DEFAULT } +func (p *TTabletSchema) IsSetClusterKeyIdxes() bool { + return p.ClusterKeyIdxes != nil +} + +func (p *TTabletSchema) IsSetRowStoreColCids() bool { + return p.RowStoreColCids != nil +} + +func (p *TTabletSchema) IsSetRowStorePageSize() bool { + return p.RowStorePageSize != TTabletSchema_RowStorePageSize_DEFAULT +} + +func (p *TTabletSchema) IsSetVariantEnableFlattenNested() bool { + return p.VariantEnableFlattenNested != TTabletSchema_VariantEnableFlattenNested_DEFAULT +} + +func (p *TTabletSchema) IsSetStoragePageSize() bool { + return p.StoragePageSize != TTabletSchema_StoragePageSize_DEFAULT +} + func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -691,10 +905,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetShortKeyColumnCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -702,10 +914,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -713,10 +923,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetKeysType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -724,10 +932,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStorageType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { @@ -735,147 +941,158 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.DOUBLE { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.BOOL { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I32 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I32 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I32 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I32 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.BOOL { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.BOOL { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.BOOL { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.BOOL { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.LIST { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.LIST { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.I64 { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.I64 { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -927,187 +1144,305 @@ RequiredFieldNotSetError: } func (p *TTabletSchema) ReadField1(iprot thrift.TProtocol) error { + + var _field int16 if v, err := iprot.ReadI16(); err != nil { return err } else { - p.ShortKeyColumnCount = v + _field = v } + p.ShortKeyColumnCount = _field return nil } - func (p *TTabletSchema) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TTabletSchema) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TKeysType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.KeysType = types.TKeysType(v) + _field = types.TKeysType(v) } + p.KeysType = _field return nil } - func (p *TTabletSchema) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TStorageType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StorageType = types.TStorageType(v) + _field = types.TStorageType(v) } + p.StorageType = _field return nil } - func (p *TTabletSchema) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]*descriptors.TColumn, 0, size) + _field := make([]*descriptors.TColumn, 0, size) + values := make([]descriptors.TColumn, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TTabletSchema) ReadField6(iprot thrift.TProtocol) error { + + var _field *float64 if v, err := iprot.ReadDouble(); err != nil { return err } else { - p.BloomFilterFpp = &v + _field = &v } + p.BloomFilterFpp = _field return nil } - func (p *TTabletSchema) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Indexes = make([]*descriptors.TOlapTableIndex, 0, size) + _field := make([]*descriptors.TOlapTableIndex, 0, size) + values := make([]descriptors.TOlapTableIndex, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTOlapTableIndex() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Indexes = append(p.Indexes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Indexes = _field return nil } - func (p *TTabletSchema) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsInMemory = &v + _field = &v } + p.IsInMemory = _field return nil } - func (p *TTabletSchema) ReadField9(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DeleteSignIdx = v + _field = v } + p.DeleteSignIdx = _field return nil } - func (p *TTabletSchema) ReadField10(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SequenceColIdx = v + _field = v } + p.SequenceColIdx = _field return nil } - func (p *TTabletSchema) ReadField11(iprot thrift.TProtocol) error { + + var _field *types.TSortType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TSortType(v) - p.SortType = &tmp + _field = &tmp } + p.SortType = _field return nil } - func (p *TTabletSchema) ReadField12(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SortColNum = &v + _field = &v } + p.SortColNum = _field return nil } - func (p *TTabletSchema) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.DisableAutoCompaction = &v + _field = &v } + p.DisableAutoCompaction = _field return nil } - func (p *TTabletSchema) ReadField14(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VersionColIdx = v + _field = v } + p.VersionColIdx = _field return nil } - func (p *TTabletSchema) ReadField15(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDynamicSchema = v + _field = v } + p.IsDynamicSchema = _field return nil } - func (p *TTabletSchema) ReadField16(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.StoreRowColumn = v + _field = v } + p.StoreRowColumn = _field return nil } - func (p *TTabletSchema) ReadField17(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.EnableSingleReplicaCompaction = v + _field = v } + p.EnableSingleReplicaCompaction = _field return nil } - func (p *TTabletSchema) ReadField18(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.SkipWriteIndexOnLoad = _field + return nil +} +func (p *TTabletSchema) ReadField19(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ClusterKeyIdxes = _field + return nil +} +func (p *TTabletSchema) ReadField20(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.RowStoreColCids = _field + return nil +} +func (p *TTabletSchema) ReadField21(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.RowStorePageSize = _field + return nil +} +func (p *TTabletSchema) ReadField22(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.SkipWriteIndexOnLoad = v + _field = v + } + p.VariantEnableFlattenNested = _field + return nil +} +func (p *TTabletSchema) ReadField23(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v } + p.StoragePageSize = _field return nil } @@ -1189,7 +1524,26 @@ func (p *TTabletSchema) Write(oprot thrift.TProtocol) (err error) { fieldId = 18 goto WriteFieldError } - + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1556,11 +1910,123 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } +func (p *TTabletSchema) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetClusterKeyIdxes() { + if err = oprot.WriteFieldBegin("cluster_key_idxes", thrift.LIST, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.ClusterKeyIdxes)); err != nil { + return err + } + for _, v := range p.ClusterKeyIdxes { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TTabletSchema) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetRowStoreColCids() { + if err = oprot.WriteFieldBegin("row_store_col_cids", thrift.LIST, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.RowStoreColCids)); err != nil { + return err + } + for _, v := range p.RowStoreColCids { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + +func (p *TTabletSchema) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetRowStorePageSize() { + if err = oprot.WriteFieldBegin("row_store_page_size", thrift.I64, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.RowStorePageSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) +} + +func (p *TTabletSchema) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetVariantEnableFlattenNested() { + if err = oprot.WriteFieldBegin("variant_enable_flatten_nested", thrift.BOOL, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.VariantEnableFlattenNested); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +} + +func (p *TTabletSchema) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetStoragePageSize() { + if err = oprot.WriteFieldBegin("storage_page_size", thrift.I64, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.StoragePageSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +} + func (p *TTabletSchema) String() string { if p == nil { return "" } return fmt.Sprintf("TTabletSchema(%+v)", *p) + } func (p *TTabletSchema) DeepEqual(ano *TTabletSchema) bool { @@ -1623,6 +2089,21 @@ func (p *TTabletSchema) DeepEqual(ano *TTabletSchema) bool { if !p.Field18DeepEqual(ano.SkipWriteIndexOnLoad) { return false } + if !p.Field19DeepEqual(ano.ClusterKeyIdxes) { + return false + } + if !p.Field20DeepEqual(ano.RowStoreColCids) { + return false + } + if !p.Field21DeepEqual(ano.RowStorePageSize) { + return false + } + if !p.Field22DeepEqual(ano.VariantEnableFlattenNested) { + return false + } + if !p.Field23DeepEqual(ano.StoragePageSize) { + return false + } return true } @@ -1789,18 +2270,67 @@ func (p *TTabletSchema) Field18DeepEqual(src bool) bool { } return true } +func (p *TTabletSchema) Field19DeepEqual(src []int32) bool { + + if len(p.ClusterKeyIdxes) != len(src) { + return false + } + for i, v := range p.ClusterKeyIdxes { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TTabletSchema) Field20DeepEqual(src []int32) bool { + + if len(p.RowStoreColCids) != len(src) { + return false + } + for i, v := range p.RowStoreColCids { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TTabletSchema) Field21DeepEqual(src int64) bool { + + if p.RowStorePageSize != src { + return false + } + return true +} +func (p *TTabletSchema) Field22DeepEqual(src bool) bool { + + if p.VariantEnableFlattenNested != src { + return false + } + return true +} +func (p *TTabletSchema) Field23DeepEqual(src int64) bool { + + if p.StoragePageSize != src { + return false + } + return true +} type TS3StorageParam struct { - Endpoint *string `thrift:"endpoint,1,optional" frugal:"1,optional,string" json:"endpoint,omitempty"` - Region *string `thrift:"region,2,optional" frugal:"2,optional,string" json:"region,omitempty"` - Ak *string `thrift:"ak,3,optional" frugal:"3,optional,string" json:"ak,omitempty"` - Sk *string `thrift:"sk,4,optional" frugal:"4,optional,string" json:"sk,omitempty"` - MaxConn int32 `thrift:"max_conn,5,optional" frugal:"5,optional,i32" json:"max_conn,omitempty"` - RequestTimeoutMs int32 `thrift:"request_timeout_ms,6,optional" frugal:"6,optional,i32" json:"request_timeout_ms,omitempty"` - ConnTimeoutMs int32 `thrift:"conn_timeout_ms,7,optional" frugal:"7,optional,i32" json:"conn_timeout_ms,omitempty"` - RootPath *string `thrift:"root_path,8,optional" frugal:"8,optional,string" json:"root_path,omitempty"` - Bucket *string `thrift:"bucket,9,optional" frugal:"9,optional,string" json:"bucket,omitempty"` - UsePathStyle bool `thrift:"use_path_style,10,optional" frugal:"10,optional,bool" json:"use_path_style,omitempty"` + Endpoint *string `thrift:"endpoint,1,optional" frugal:"1,optional,string" json:"endpoint,omitempty"` + Region *string `thrift:"region,2,optional" frugal:"2,optional,string" json:"region,omitempty"` + Ak *string `thrift:"ak,3,optional" frugal:"3,optional,string" json:"ak,omitempty"` + Sk *string `thrift:"sk,4,optional" frugal:"4,optional,string" json:"sk,omitempty"` + MaxConn int32 `thrift:"max_conn,5,optional" frugal:"5,optional,i32" json:"max_conn,omitempty"` + RequestTimeoutMs int32 `thrift:"request_timeout_ms,6,optional" frugal:"6,optional,i32" json:"request_timeout_ms,omitempty"` + ConnTimeoutMs int32 `thrift:"conn_timeout_ms,7,optional" frugal:"7,optional,i32" json:"conn_timeout_ms,omitempty"` + RootPath *string `thrift:"root_path,8,optional" frugal:"8,optional,string" json:"root_path,omitempty"` + Bucket *string `thrift:"bucket,9,optional" frugal:"9,optional,string" json:"bucket,omitempty"` + UsePathStyle bool `thrift:"use_path_style,10,optional" frugal:"10,optional,bool" json:"use_path_style,omitempty"` + Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` + Provider *TObjStorageType `thrift:"provider,12,optional" frugal:"12,optional,TObjStorageType" json:"provider,omitempty"` } func NewTS3StorageParam() *TS3StorageParam { @@ -1814,13 +2344,10 @@ func NewTS3StorageParam() *TS3StorageParam { } func (p *TS3StorageParam) InitDefault() { - *p = TS3StorageParam{ - - MaxConn: 50, - RequestTimeoutMs: 3000, - ConnTimeoutMs: 1000, - UsePathStyle: false, - } + p.MaxConn = 50 + p.RequestTimeoutMs = 3000 + p.ConnTimeoutMs = 1000 + p.UsePathStyle = false } var TS3StorageParam_Endpoint_DEFAULT string @@ -1912,6 +2439,24 @@ func (p *TS3StorageParam) GetUsePathStyle() (v bool) { } return p.UsePathStyle } + +var TS3StorageParam_Token_DEFAULT string + +func (p *TS3StorageParam) GetToken() (v string) { + if !p.IsSetToken() { + return TS3StorageParam_Token_DEFAULT + } + return *p.Token +} + +var TS3StorageParam_Provider_DEFAULT TObjStorageType + +func (p *TS3StorageParam) GetProvider() (v TObjStorageType) { + if !p.IsSetProvider() { + return TS3StorageParam_Provider_DEFAULT + } + return *p.Provider +} func (p *TS3StorageParam) SetEndpoint(val *string) { p.Endpoint = val } @@ -1942,6 +2487,12 @@ func (p *TS3StorageParam) SetBucket(val *string) { func (p *TS3StorageParam) SetUsePathStyle(val bool) { p.UsePathStyle = val } +func (p *TS3StorageParam) SetToken(val *string) { + p.Token = val +} +func (p *TS3StorageParam) SetProvider(val *TObjStorageType) { + p.Provider = val +} var fieldIDToName_TS3StorageParam = map[int16]string{ 1: "endpoint", @@ -1954,6 +2505,8 @@ var fieldIDToName_TS3StorageParam = map[int16]string{ 8: "root_path", 9: "bucket", 10: "use_path_style", + 11: "token", + 12: "provider", } func (p *TS3StorageParam) IsSetEndpoint() bool { @@ -1996,6 +2549,14 @@ func (p *TS3StorageParam) IsSetUsePathStyle() bool { return p.UsePathStyle != TS3StorageParam_UsePathStyle_DEFAULT } +func (p *TS3StorageParam) IsSetToken() bool { + return p.Token != nil +} + +func (p *TS3StorageParam) IsSetProvider() bool { + return p.Provider != nil +} + func (p *TS3StorageParam) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -2020,107 +2581,102 @@ func (p *TS3StorageParam) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2146,92 +2702,136 @@ ReadStructEndError: } func (p *TS3StorageParam) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Endpoint = &v + _field = &v } + p.Endpoint = _field return nil } - func (p *TS3StorageParam) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Region = &v + _field = &v } + p.Region = _field return nil } - func (p *TS3StorageParam) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Ak = &v + _field = &v } + p.Ak = _field return nil } - func (p *TS3StorageParam) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Sk = &v + _field = &v } + p.Sk = _field return nil } - func (p *TS3StorageParam) ReadField5(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.MaxConn = v + _field = v } + p.MaxConn = _field return nil } - func (p *TS3StorageParam) ReadField6(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.RequestTimeoutMs = v + _field = v } + p.RequestTimeoutMs = _field return nil } - func (p *TS3StorageParam) ReadField7(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ConnTimeoutMs = v + _field = v } + p.ConnTimeoutMs = _field return nil } - func (p *TS3StorageParam) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RootPath = &v + _field = &v } + p.RootPath = _field return nil } - func (p *TS3StorageParam) ReadField9(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Bucket = &v + _field = &v } + p.Bucket = _field return nil } - func (p *TS3StorageParam) ReadField10(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UsePathStyle = v + _field = v + } + p.UsePathStyle = _field + return nil +} +func (p *TS3StorageParam) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TS3StorageParam) ReadField12(iprot thrift.TProtocol) error { + + var _field *TObjStorageType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TObjStorageType(v) + _field = &tmp } + p.Provider = _field return nil } @@ -2281,7 +2881,14 @@ func (p *TS3StorageParam) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2490,13 +3097,52 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TS3StorageParam) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TS3StorageParam(%+v)", *p) -} - +func (p *TS3StorageParam) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TS3StorageParam) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetProvider() { + if err = oprot.WriteFieldBegin("provider", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Provider)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TS3StorageParam) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TS3StorageParam(%+v)", *p) + +} + func (p *TS3StorageParam) DeepEqual(ano *TS3StorageParam) bool { if p == ano { return true @@ -2533,6 +3179,12 @@ func (p *TS3StorageParam) DeepEqual(ano *TS3StorageParam) bool { if !p.Field10DeepEqual(ano.UsePathStyle) { return false } + if !p.Field11DeepEqual(ano.Token) { + return false + } + if !p.Field12DeepEqual(ano.Provider) { + return false + } return true } @@ -2636,6 +3288,30 @@ func (p *TS3StorageParam) Field10DeepEqual(src bool) bool { } return true } +func (p *TS3StorageParam) Field11DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TS3StorageParam) Field12DeepEqual(src *TObjStorageType) bool { + + if p.Provider == src { + return true + } else if p.Provider == nil || src == nil { + return false + } + if *p.Provider != *src { + return false + } + return true +} type TStoragePolicy struct { Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` @@ -2651,7 +3327,6 @@ func NewTStoragePolicy() *TStoragePolicy { } func (p *TStoragePolicy) InitDefault() { - *p = TStoragePolicy{} } var TStoragePolicy_Id_DEFAULT int64 @@ -2783,67 +3458,54 @@ func (p *TStoragePolicy) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2869,56 +3531,69 @@ ReadStructEndError: } func (p *TStoragePolicy) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = &v + _field = &v } + p.Id = _field return nil } - func (p *TStoragePolicy) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = &v + _field = &v } + p.Name = _field return nil } - func (p *TStoragePolicy) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = &v + _field = &v } + p.Version = _field return nil } - func (p *TStoragePolicy) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CooldownDatetime = &v + _field = &v } + p.CooldownDatetime = _field return nil } - func (p *TStoragePolicy) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CooldownTtl = &v + _field = &v } + p.CooldownTtl = _field return nil } - func (p *TStoragePolicy) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ResourceId = &v + _field = &v } + p.ResourceId = _field return nil } @@ -2952,7 +3627,6 @@ func (p *TStoragePolicy) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3090,6 +3764,7 @@ func (p *TStoragePolicy) String() string { return "" } return fmt.Sprintf("TStoragePolicy(%+v)", *p) + } func (p *TStoragePolicy) DeepEqual(ano *TStoragePolicy) bool { @@ -3193,10 +3868,11 @@ func (p *TStoragePolicy) Field6DeepEqual(src *int64) bool { } type TStorageResource struct { - Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` - Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` - Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"` - S3StorageParam *TS3StorageParam `thrift:"s3_storage_param,4,optional" frugal:"4,optional,TS3StorageParam" json:"s3_storage_param,omitempty"` + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"` + S3StorageParam *TS3StorageParam `thrift:"s3_storage_param,4,optional" frugal:"4,optional,TS3StorageParam" json:"s3_storage_param,omitempty"` + HdfsStorageParam *plannodes.THdfsParams `thrift:"hdfs_storage_param,5,optional" frugal:"5,optional,plannodes.THdfsParams" json:"hdfs_storage_param,omitempty"` } func NewTStorageResource() *TStorageResource { @@ -3204,7 +3880,6 @@ func NewTStorageResource() *TStorageResource { } func (p *TStorageResource) InitDefault() { - *p = TStorageResource{} } var TStorageResource_Id_DEFAULT int64 @@ -3242,6 +3917,15 @@ func (p *TStorageResource) GetS3StorageParam() (v *TS3StorageParam) { } return p.S3StorageParam } + +var TStorageResource_HdfsStorageParam_DEFAULT *plannodes.THdfsParams + +func (p *TStorageResource) GetHdfsStorageParam() (v *plannodes.THdfsParams) { + if !p.IsSetHdfsStorageParam() { + return TStorageResource_HdfsStorageParam_DEFAULT + } + return p.HdfsStorageParam +} func (p *TStorageResource) SetId(val *int64) { p.Id = val } @@ -3254,12 +3938,16 @@ func (p *TStorageResource) SetVersion(val *int64) { func (p *TStorageResource) SetS3StorageParam(val *TS3StorageParam) { p.S3StorageParam = val } +func (p *TStorageResource) SetHdfsStorageParam(val *plannodes.THdfsParams) { + p.HdfsStorageParam = val +} var fieldIDToName_TStorageResource = map[int16]string{ 1: "id", 2: "name", 3: "version", 4: "s3_storage_param", + 5: "hdfs_storage_param", } func (p *TStorageResource) IsSetId() bool { @@ -3278,6 +3966,10 @@ func (p *TStorageResource) IsSetS3StorageParam() bool { return p.S3StorageParam != nil } +func (p *TStorageResource) IsSetHdfsStorageParam() bool { + return p.HdfsStorageParam != nil +} + func (p *TStorageResource) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -3302,47 +3994,46 @@ func (p *TStorageResource) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3368,37 +4059,52 @@ ReadStructEndError: } func (p *TStorageResource) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = &v + _field = &v } + p.Id = _field return nil } - func (p *TStorageResource) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = &v + _field = &v } + p.Name = _field return nil } - func (p *TStorageResource) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = &v + _field = &v } + p.Version = _field return nil } - func (p *TStorageResource) ReadField4(iprot thrift.TProtocol) error { - p.S3StorageParam = NewTS3StorageParam() - if err := p.S3StorageParam.Read(iprot); err != nil { + _field := NewTS3StorageParam() + if err := _field.Read(iprot); err != nil { + return err + } + p.S3StorageParam = _field + return nil +} +func (p *TStorageResource) ReadField5(iprot thrift.TProtocol) error { + _field := plannodes.NewTHdfsParams() + if err := _field.Read(iprot); err != nil { return err } + p.HdfsStorageParam = _field return nil } @@ -3424,7 +4130,10 @@ func (p *TStorageResource) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3519,11 +4228,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } +func (p *TStorageResource) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHdfsStorageParam() { + if err = oprot.WriteFieldBegin("hdfs_storage_param", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.HdfsStorageParam.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + func (p *TStorageResource) String() string { if p == nil { return "" } return fmt.Sprintf("TStorageResource(%+v)", *p) + } func (p *TStorageResource) DeepEqual(ano *TStorageResource) bool { @@ -3544,6 +4273,9 @@ func (p *TStorageResource) DeepEqual(ano *TStorageResource) bool { if !p.Field4DeepEqual(ano.S3StorageParam) { return false } + if !p.Field5DeepEqual(ano.HdfsStorageParam) { + return false + } return true } @@ -3590,6 +4322,13 @@ func (p *TStorageResource) Field4DeepEqual(src *TS3StorageParam) bool { } return true } +func (p *TStorageResource) Field5DeepEqual(src *plannodes.THdfsParams) bool { + + if !p.HdfsStorageParam.DeepEqual(src) { + return false + } + return true +} type TPushStoragePolicyReq struct { StoragePolicy []*TStoragePolicy `thrift:"storage_policy,1,optional" frugal:"1,optional,list" json:"storage_policy,omitempty"` @@ -3602,7 +4341,6 @@ func NewTPushStoragePolicyReq() *TPushStoragePolicyReq { } func (p *TPushStoragePolicyReq) InitDefault() { - *p = TPushStoragePolicyReq{} } var TPushStoragePolicyReq_StoragePolicy_DEFAULT []*TStoragePolicy @@ -3683,37 +4421,30 @@ func (p *TPushStoragePolicyReq) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3743,48 +4474,55 @@ func (p *TPushStoragePolicyReq) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.StoragePolicy = make([]*TStoragePolicy, 0, size) + _field := make([]*TStoragePolicy, 0, size) + values := make([]TStoragePolicy, size) for i := 0; i < size; i++ { - _elem := NewTStoragePolicy() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.StoragePolicy = append(p.StoragePolicy, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.StoragePolicy = _field return nil } - func (p *TPushStoragePolicyReq) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Resource = make([]*TStorageResource, 0, size) + _field := make([]*TStorageResource, 0, size) + values := make([]TStorageResource, size) for i := 0; i < size; i++ { - _elem := NewTStorageResource() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Resource = append(p.Resource, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Resource = _field return nil } - func (p *TPushStoragePolicyReq) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DroppedStoragePolicy = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -3792,11 +4530,12 @@ func (p *TPushStoragePolicyReq) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.DroppedStoragePolicy = append(p.DroppedStoragePolicy, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DroppedStoragePolicy = _field return nil } @@ -3818,7 +4557,6 @@ func (p *TPushStoragePolicyReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3923,6 +4661,7 @@ func (p *TPushStoragePolicyReq) String() string { return "" } return fmt.Sprintf("TPushStoragePolicyReq(%+v)", *p) + } func (p *TPushStoragePolicyReq) DeepEqual(ano *TPushStoragePolicyReq) bool { @@ -3983,93 +4722,19 @@ func (p *TPushStoragePolicyReq) Field3DeepEqual(src []int64) bool { return true } -type TBinlogConfig struct { - Enable *bool `thrift:"enable,1,optional" frugal:"1,optional,bool" json:"enable,omitempty"` - TtlSeconds *int64 `thrift:"ttl_seconds,2,optional" frugal:"2,optional,i64" json:"ttl_seconds,omitempty"` - MaxBytes *int64 `thrift:"max_bytes,3,optional" frugal:"3,optional,i64" json:"max_bytes,omitempty"` - MaxHistoryNums *int64 `thrift:"max_history_nums,4,optional" frugal:"4,optional,i64" json:"max_history_nums,omitempty"` -} - -func NewTBinlogConfig() *TBinlogConfig { - return &TBinlogConfig{} -} - -func (p *TBinlogConfig) InitDefault() { - *p = TBinlogConfig{} -} - -var TBinlogConfig_Enable_DEFAULT bool - -func (p *TBinlogConfig) GetEnable() (v bool) { - if !p.IsSetEnable() { - return TBinlogConfig_Enable_DEFAULT - } - return *p.Enable -} - -var TBinlogConfig_TtlSeconds_DEFAULT int64 - -func (p *TBinlogConfig) GetTtlSeconds() (v int64) { - if !p.IsSetTtlSeconds() { - return TBinlogConfig_TtlSeconds_DEFAULT - } - return *p.TtlSeconds -} - -var TBinlogConfig_MaxBytes_DEFAULT int64 - -func (p *TBinlogConfig) GetMaxBytes() (v int64) { - if !p.IsSetMaxBytes() { - return TBinlogConfig_MaxBytes_DEFAULT - } - return *p.MaxBytes -} - -var TBinlogConfig_MaxHistoryNums_DEFAULT int64 - -func (p *TBinlogConfig) GetMaxHistoryNums() (v int64) { - if !p.IsSetMaxHistoryNums() { - return TBinlogConfig_MaxHistoryNums_DEFAULT - } - return *p.MaxHistoryNums -} -func (p *TBinlogConfig) SetEnable(val *bool) { - p.Enable = val -} -func (p *TBinlogConfig) SetTtlSeconds(val *int64) { - p.TtlSeconds = val -} -func (p *TBinlogConfig) SetMaxBytes(val *int64) { - p.MaxBytes = val -} -func (p *TBinlogConfig) SetMaxHistoryNums(val *int64) { - p.MaxHistoryNums = val -} - -var fieldIDToName_TBinlogConfig = map[int16]string{ - 1: "enable", - 2: "ttl_seconds", - 3: "max_bytes", - 4: "max_history_nums", -} - -func (p *TBinlogConfig) IsSetEnable() bool { - return p.Enable != nil +type TCleanTrashReq struct { } -func (p *TBinlogConfig) IsSetTtlSeconds() bool { - return p.TtlSeconds != nil +func NewTCleanTrashReq() *TCleanTrashReq { + return &TCleanTrashReq{} } -func (p *TBinlogConfig) IsSetMaxBytes() bool { - return p.MaxBytes != nil +func (p *TCleanTrashReq) InitDefault() { } -func (p *TBinlogConfig) IsSetMaxHistoryNums() bool { - return p.MaxHistoryNums != nil -} +var fieldIDToName_TCleanTrashReq = map[int16]string{} -func (p *TBinlogConfig) Read(iprot thrift.TProtocol) (err error) { +func (p *TCleanTrashReq) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -4086,54 +4751,132 @@ func (p *TBinlogConfig) Read(iprot thrift.TProtocol) (err error) { if fieldTypeId == thrift.STOP { break } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCleanTrashReq) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TCleanTrashReq"); err != nil { + goto WriteStructBeginError + } + if p != nil { + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TCleanTrashReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCleanTrashReq(%+v)", *p) + +} + +func (p *TCleanTrashReq) DeepEqual(ano *TCleanTrashReq) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + return true +} + +type TCleanUDFCacheReq struct { + FunctionSignature *string `thrift:"function_signature,1,optional" frugal:"1,optional,string" json:"function_signature,omitempty"` +} + +func NewTCleanUDFCacheReq() *TCleanUDFCacheReq { + return &TCleanUDFCacheReq{} +} + +func (p *TCleanUDFCacheReq) InitDefault() { +} + +var TCleanUDFCacheReq_FunctionSignature_DEFAULT string + +func (p *TCleanUDFCacheReq) GetFunctionSignature() (v string) { + if !p.IsSetFunctionSignature() { + return TCleanUDFCacheReq_FunctionSignature_DEFAULT + } + return *p.FunctionSignature +} +func (p *TCleanUDFCacheReq) SetFunctionSignature(val *string) { + p.FunctionSignature = val +} + +var fieldIDToName_TCleanUDFCacheReq = map[int16]string{ + 1: "function_signature", +} + +func (p *TCleanUDFCacheReq) IsSetFunctionSignature() bool { + return p.FunctionSignature != nil +} + +func (p *TCleanUDFCacheReq) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4148,7 +4891,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCleanUDFCacheReq[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -4158,45 +4901,21 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBinlogConfig) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.Enable = &v - } - return nil -} - -func (p *TBinlogConfig) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TtlSeconds = &v - } - return nil -} - -func (p *TBinlogConfig) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MaxBytes = &v - } - return nil -} +func (p *TCleanUDFCacheReq) ReadField1(iprot thrift.TProtocol) error { -func (p *TBinlogConfig) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.MaxHistoryNums = &v + _field = &v } + p.FunctionSignature = _field return nil } -func (p *TBinlogConfig) Write(oprot thrift.TProtocol) (err error) { +func (p *TCleanUDFCacheReq) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TBinlogConfig"); err != nil { + if err = oprot.WriteStructBegin("TCleanUDFCacheReq"); err != nil { goto WriteStructBeginError } if p != nil { @@ -4204,19 +4923,6 @@ func (p *TBinlogConfig) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4235,12 +4941,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TBinlogConfig) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetEnable() { - if err = oprot.WriteFieldBegin("enable", thrift.BOOL, 1); err != nil { +func (p *TCleanUDFCacheReq) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFunctionSignature() { + if err = oprot.WriteFieldBegin("function_signature", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.Enable); err != nil { + if err := oprot.WriteString(*p.FunctionSignature); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -4254,428 +4960,800 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TBinlogConfig) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTtlSeconds() { - if err = oprot.WriteFieldBegin("ttl_seconds", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TtlSeconds); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TBinlogConfig) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxBytes() { - if err = oprot.WriteFieldBegin("max_bytes", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.MaxBytes); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TBinlogConfig) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxHistoryNums() { - if err = oprot.WriteFieldBegin("max_history_nums", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.MaxHistoryNums); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TBinlogConfig) String() string { +func (p *TCleanUDFCacheReq) String() string { if p == nil { return "" } - return fmt.Sprintf("TBinlogConfig(%+v)", *p) + return fmt.Sprintf("TCleanUDFCacheReq(%+v)", *p) + } -func (p *TBinlogConfig) DeepEqual(ano *TBinlogConfig) bool { +func (p *TCleanUDFCacheReq) DeepEqual(ano *TCleanUDFCacheReq) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Enable) { - return false - } - if !p.Field2DeepEqual(ano.TtlSeconds) { - return false - } - if !p.Field3DeepEqual(ano.MaxBytes) { - return false - } - if !p.Field4DeepEqual(ano.MaxHistoryNums) { + if !p.Field1DeepEqual(ano.FunctionSignature) { return false } return true } -func (p *TBinlogConfig) Field1DeepEqual(src *bool) bool { +func (p *TCleanUDFCacheReq) Field1DeepEqual(src *string) bool { - if p.Enable == src { + if p.FunctionSignature == src { return true - } else if p.Enable == nil || src == nil { + } else if p.FunctionSignature == nil || src == nil { return false } - if *p.Enable != *src { + if strings.Compare(*p.FunctionSignature, *src) != 0 { return false } return true } -func (p *TBinlogConfig) Field2DeepEqual(src *int64) bool { - if p.TtlSeconds == src { - return true - } else if p.TtlSeconds == nil || src == nil { - return false - } - if *p.TtlSeconds != *src { - return false - } - return true +type TBinlogConfig struct { + Enable *bool `thrift:"enable,1,optional" frugal:"1,optional,bool" json:"enable,omitempty"` + TtlSeconds *int64 `thrift:"ttl_seconds,2,optional" frugal:"2,optional,i64" json:"ttl_seconds,omitempty"` + MaxBytes *int64 `thrift:"max_bytes,3,optional" frugal:"3,optional,i64" json:"max_bytes,omitempty"` + MaxHistoryNums *int64 `thrift:"max_history_nums,4,optional" frugal:"4,optional,i64" json:"max_history_nums,omitempty"` } -func (p *TBinlogConfig) Field3DeepEqual(src *int64) bool { - if p.MaxBytes == src { - return true - } else if p.MaxBytes == nil || src == nil { - return false - } - if *p.MaxBytes != *src { - return false - } - return true +func NewTBinlogConfig() *TBinlogConfig { + return &TBinlogConfig{} } -func (p *TBinlogConfig) Field4DeepEqual(src *int64) bool { - if p.MaxHistoryNums == src { - return true - } else if p.MaxHistoryNums == nil || src == nil { - return false - } - if *p.MaxHistoryNums != *src { - return false - } - return true +func (p *TBinlogConfig) InitDefault() { } -type TCreateTabletReq struct { - TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` - TabletSchema *TTabletSchema `thrift:"tablet_schema,2,required" frugal:"2,required,TTabletSchema" json:"tablet_schema"` - Version *types.TVersion `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"` - VersionHash *types.TVersionHash `thrift:"version_hash,4,optional" frugal:"4,optional,i64" json:"version_hash,omitempty"` - StorageMedium *types.TStorageMedium `thrift:"storage_medium,5,optional" frugal:"5,optional,TStorageMedium" json:"storage_medium,omitempty"` - InRestoreMode *bool `thrift:"in_restore_mode,6,optional" frugal:"6,optional,bool" json:"in_restore_mode,omitempty"` - BaseTabletId *types.TTabletId `thrift:"base_tablet_id,7,optional" frugal:"7,optional,i64" json:"base_tablet_id,omitempty"` - BaseSchemaHash *types.TSchemaHash `thrift:"base_schema_hash,8,optional" frugal:"8,optional,i32" json:"base_schema_hash,omitempty"` - TableId *int64 `thrift:"table_id,9,optional" frugal:"9,optional,i64" json:"table_id,omitempty"` - PartitionId *int64 `thrift:"partition_id,10,optional" frugal:"10,optional,i64" json:"partition_id,omitempty"` - AllocationTerm *int64 `thrift:"allocation_term,11,optional" frugal:"11,optional,i64" json:"allocation_term,omitempty"` - IsEcoMode *bool `thrift:"is_eco_mode,12,optional" frugal:"12,optional,bool" json:"is_eco_mode,omitempty"` - StorageFormat *TStorageFormat `thrift:"storage_format,13,optional" frugal:"13,optional,TStorageFormat" json:"storage_format,omitempty"` - TabletType *TTabletType `thrift:"tablet_type,14,optional" frugal:"14,optional,TTabletType" json:"tablet_type,omitempty"` - CompressionType TCompressionType `thrift:"compression_type,16,optional" frugal:"16,optional,TCompressionType" json:"compression_type,omitempty"` - ReplicaId types.TReplicaId `thrift:"replica_id,17,optional" frugal:"17,optional,i64" json:"replica_id,omitempty"` - EnableUniqueKeyMergeOnWrite bool `thrift:"enable_unique_key_merge_on_write,19,optional" frugal:"19,optional,bool" json:"enable_unique_key_merge_on_write,omitempty"` - StoragePolicyId *int64 `thrift:"storage_policy_id,20,optional" frugal:"20,optional,i64" json:"storage_policy_id,omitempty"` - BinlogConfig *TBinlogConfig `thrift:"binlog_config,21,optional" frugal:"21,optional,TBinlogConfig" json:"binlog_config,omitempty"` - CompactionPolicy string `thrift:"compaction_policy,22,optional" frugal:"22,optional,string" json:"compaction_policy,omitempty"` - TimeSeriesCompactionGoalSizeMbytes int64 `thrift:"time_series_compaction_goal_size_mbytes,23,optional" frugal:"23,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"` - TimeSeriesCompactionFileCountThreshold int64 `thrift:"time_series_compaction_file_count_threshold,24,optional" frugal:"24,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"` - TimeSeriesCompactionTimeThresholdSeconds int64 `thrift:"time_series_compaction_time_threshold_seconds,25,optional" frugal:"25,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"` -} - -func NewTCreateTabletReq() *TCreateTabletReq { - return &TCreateTabletReq{ +var TBinlogConfig_Enable_DEFAULT bool - CompressionType: TCompressionType_LZ4F, - ReplicaId: 0, - EnableUniqueKeyMergeOnWrite: false, - CompactionPolicy: "size_based", - TimeSeriesCompactionGoalSizeMbytes: 1024, - TimeSeriesCompactionFileCountThreshold: 2000, - TimeSeriesCompactionTimeThresholdSeconds: 3600, +func (p *TBinlogConfig) GetEnable() (v bool) { + if !p.IsSetEnable() { + return TBinlogConfig_Enable_DEFAULT } + return *p.Enable } -func (p *TCreateTabletReq) InitDefault() { - *p = TCreateTabletReq{ +var TBinlogConfig_TtlSeconds_DEFAULT int64 - CompressionType: TCompressionType_LZ4F, - ReplicaId: 0, - EnableUniqueKeyMergeOnWrite: false, - CompactionPolicy: "size_based", - TimeSeriesCompactionGoalSizeMbytes: 1024, - TimeSeriesCompactionFileCountThreshold: 2000, - TimeSeriesCompactionTimeThresholdSeconds: 3600, +func (p *TBinlogConfig) GetTtlSeconds() (v int64) { + if !p.IsSetTtlSeconds() { + return TBinlogConfig_TtlSeconds_DEFAULT } + return *p.TtlSeconds } -func (p *TCreateTabletReq) GetTabletId() (v types.TTabletId) { - return p.TabletId -} - -var TCreateTabletReq_TabletSchema_DEFAULT *TTabletSchema +var TBinlogConfig_MaxBytes_DEFAULT int64 -func (p *TCreateTabletReq) GetTabletSchema() (v *TTabletSchema) { - if !p.IsSetTabletSchema() { - return TCreateTabletReq_TabletSchema_DEFAULT +func (p *TBinlogConfig) GetMaxBytes() (v int64) { + if !p.IsSetMaxBytes() { + return TBinlogConfig_MaxBytes_DEFAULT } - return p.TabletSchema + return *p.MaxBytes } -var TCreateTabletReq_Version_DEFAULT types.TVersion +var TBinlogConfig_MaxHistoryNums_DEFAULT int64 -func (p *TCreateTabletReq) GetVersion() (v types.TVersion) { - if !p.IsSetVersion() { - return TCreateTabletReq_Version_DEFAULT +func (p *TBinlogConfig) GetMaxHistoryNums() (v int64) { + if !p.IsSetMaxHistoryNums() { + return TBinlogConfig_MaxHistoryNums_DEFAULT } - return *p.Version + return *p.MaxHistoryNums } - -var TCreateTabletReq_VersionHash_DEFAULT types.TVersionHash - -func (p *TCreateTabletReq) GetVersionHash() (v types.TVersionHash) { - if !p.IsSetVersionHash() { - return TCreateTabletReq_VersionHash_DEFAULT - } - return *p.VersionHash +func (p *TBinlogConfig) SetEnable(val *bool) { + p.Enable = val } - -var TCreateTabletReq_StorageMedium_DEFAULT types.TStorageMedium - -func (p *TCreateTabletReq) GetStorageMedium() (v types.TStorageMedium) { - if !p.IsSetStorageMedium() { - return TCreateTabletReq_StorageMedium_DEFAULT - } - return *p.StorageMedium +func (p *TBinlogConfig) SetTtlSeconds(val *int64) { + p.TtlSeconds = val } - -var TCreateTabletReq_InRestoreMode_DEFAULT bool - -func (p *TCreateTabletReq) GetInRestoreMode() (v bool) { - if !p.IsSetInRestoreMode() { - return TCreateTabletReq_InRestoreMode_DEFAULT - } - return *p.InRestoreMode +func (p *TBinlogConfig) SetMaxBytes(val *int64) { + p.MaxBytes = val } - -var TCreateTabletReq_BaseTabletId_DEFAULT types.TTabletId - -func (p *TCreateTabletReq) GetBaseTabletId() (v types.TTabletId) { - if !p.IsSetBaseTabletId() { - return TCreateTabletReq_BaseTabletId_DEFAULT - } - return *p.BaseTabletId +func (p *TBinlogConfig) SetMaxHistoryNums(val *int64) { + p.MaxHistoryNums = val } -var TCreateTabletReq_BaseSchemaHash_DEFAULT types.TSchemaHash - -func (p *TCreateTabletReq) GetBaseSchemaHash() (v types.TSchemaHash) { - if !p.IsSetBaseSchemaHash() { - return TCreateTabletReq_BaseSchemaHash_DEFAULT - } - return *p.BaseSchemaHash +var fieldIDToName_TBinlogConfig = map[int16]string{ + 1: "enable", + 2: "ttl_seconds", + 3: "max_bytes", + 4: "max_history_nums", } -var TCreateTabletReq_TableId_DEFAULT int64 - -func (p *TCreateTabletReq) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TCreateTabletReq_TableId_DEFAULT - } - return *p.TableId +func (p *TBinlogConfig) IsSetEnable() bool { + return p.Enable != nil } -var TCreateTabletReq_PartitionId_DEFAULT int64 - -func (p *TCreateTabletReq) GetPartitionId() (v int64) { - if !p.IsSetPartitionId() { - return TCreateTabletReq_PartitionId_DEFAULT - } - return *p.PartitionId +func (p *TBinlogConfig) IsSetTtlSeconds() bool { + return p.TtlSeconds != nil } -var TCreateTabletReq_AllocationTerm_DEFAULT int64 - -func (p *TCreateTabletReq) GetAllocationTerm() (v int64) { - if !p.IsSetAllocationTerm() { - return TCreateTabletReq_AllocationTerm_DEFAULT - } - return *p.AllocationTerm +func (p *TBinlogConfig) IsSetMaxBytes() bool { + return p.MaxBytes != nil } -var TCreateTabletReq_IsEcoMode_DEFAULT bool - -func (p *TCreateTabletReq) GetIsEcoMode() (v bool) { - if !p.IsSetIsEcoMode() { - return TCreateTabletReq_IsEcoMode_DEFAULT - } - return *p.IsEcoMode +func (p *TBinlogConfig) IsSetMaxHistoryNums() bool { + return p.MaxHistoryNums != nil } -var TCreateTabletReq_StorageFormat_DEFAULT TStorageFormat +func (p *TBinlogConfig) Read(iprot thrift.TProtocol) (err error) { -func (p *TCreateTabletReq) GetStorageFormat() (v TStorageFormat) { - if !p.IsSetStorageFormat() { - return TCreateTabletReq_StorageFormat_DEFAULT + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return *p.StorageFormat -} -var TCreateTabletReq_TabletType_DEFAULT TTabletType + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } -func (p *TCreateTabletReq) GetTabletType() (v TTabletType) { - if !p.IsSetTabletType() { - return TCreateTabletReq_TabletType_DEFAULT + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return *p.TabletType -} -var TCreateTabletReq_CompressionType_DEFAULT TCompressionType = TCompressionType_LZ4F + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -func (p *TCreateTabletReq) GetCompressionType() (v TCompressionType) { - if !p.IsSetCompressionType() { - return TCreateTabletReq_CompressionType_DEFAULT - } - return p.CompressionType +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -var TCreateTabletReq_ReplicaId_DEFAULT types.TReplicaId = 0 +func (p *TBinlogConfig) ReadField1(iprot thrift.TProtocol) error { -func (p *TCreateTabletReq) GetReplicaId() (v types.TReplicaId) { - if !p.IsSetReplicaId() { - return TCreateTabletReq_ReplicaId_DEFAULT + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v } - return p.ReplicaId + p.Enable = _field + return nil } +func (p *TBinlogConfig) ReadField2(iprot thrift.TProtocol) error { -var TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT bool = false - -func (p *TCreateTabletReq) GetEnableUniqueKeyMergeOnWrite() (v bool) { - if !p.IsSetEnableUniqueKeyMergeOnWrite() { - return TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return p.EnableUniqueKeyMergeOnWrite + p.TtlSeconds = _field + return nil } +func (p *TBinlogConfig) ReadField3(iprot thrift.TProtocol) error { -var TCreateTabletReq_StoragePolicyId_DEFAULT int64 - -func (p *TCreateTabletReq) GetStoragePolicyId() (v int64) { - if !p.IsSetStoragePolicyId() { - return TCreateTabletReq_StoragePolicyId_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return *p.StoragePolicyId + p.MaxBytes = _field + return nil } +func (p *TBinlogConfig) ReadField4(iprot thrift.TProtocol) error { -var TCreateTabletReq_BinlogConfig_DEFAULT *TBinlogConfig - -func (p *TCreateTabletReq) GetBinlogConfig() (v *TBinlogConfig) { - if !p.IsSetBinlogConfig() { - return TCreateTabletReq_BinlogConfig_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return p.BinlogConfig + p.MaxHistoryNums = _field + return nil } -var TCreateTabletReq_CompactionPolicy_DEFAULT string = "size_based" - -func (p *TCreateTabletReq) GetCompactionPolicy() (v string) { - if !p.IsSetCompactionPolicy() { - return TCreateTabletReq_CompactionPolicy_DEFAULT +func (p *TBinlogConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TBinlogConfig"); err != nil { + goto WriteStructBeginError } - return p.CompactionPolicy + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -var TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT int64 = 1024 +func (p *TBinlogConfig) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetEnable() { + if err = oprot.WriteFieldBegin("enable", thrift.BOOL, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Enable); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} -func (p *TCreateTabletReq) GetTimeSeriesCompactionGoalSizeMbytes() (v int64) { - if !p.IsSetTimeSeriesCompactionGoalSizeMbytes() { - return TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT +func (p *TBinlogConfig) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTtlSeconds() { + if err = oprot.WriteFieldBegin("ttl_seconds", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TtlSeconds); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.TimeSeriesCompactionGoalSizeMbytes + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -var TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT int64 = 2000 +func (p *TBinlogConfig) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxBytes() { + if err = oprot.WriteFieldBegin("max_bytes", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.MaxBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} -func (p *TCreateTabletReq) GetTimeSeriesCompactionFileCountThreshold() (v int64) { - if !p.IsSetTimeSeriesCompactionFileCountThreshold() { - return TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT +func (p *TBinlogConfig) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxHistoryNums() { + if err = oprot.WriteFieldBegin("max_history_nums", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.MaxHistoryNums); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.TimeSeriesCompactionFileCountThreshold + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -var TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT int64 = 3600 +func (p *TBinlogConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBinlogConfig(%+v)", *p) -func (p *TCreateTabletReq) GetTimeSeriesCompactionTimeThresholdSeconds() (v int64) { - if !p.IsSetTimeSeriesCompactionTimeThresholdSeconds() { - return TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT +} + +func (p *TBinlogConfig) DeepEqual(ano *TBinlogConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return p.TimeSeriesCompactionTimeThresholdSeconds + if !p.Field1DeepEqual(ano.Enable) { + return false + } + if !p.Field2DeepEqual(ano.TtlSeconds) { + return false + } + if !p.Field3DeepEqual(ano.MaxBytes) { + return false + } + if !p.Field4DeepEqual(ano.MaxHistoryNums) { + return false + } + return true } -func (p *TCreateTabletReq) SetTabletId(val types.TTabletId) { - p.TabletId = val + +func (p *TBinlogConfig) Field1DeepEqual(src *bool) bool { + + if p.Enable == src { + return true + } else if p.Enable == nil || src == nil { + return false + } + if *p.Enable != *src { + return false + } + return true } -func (p *TCreateTabletReq) SetTabletSchema(val *TTabletSchema) { - p.TabletSchema = val +func (p *TBinlogConfig) Field2DeepEqual(src *int64) bool { + + if p.TtlSeconds == src { + return true + } else if p.TtlSeconds == nil || src == nil { + return false + } + if *p.TtlSeconds != *src { + return false + } + return true } -func (p *TCreateTabletReq) SetVersion(val *types.TVersion) { - p.Version = val +func (p *TBinlogConfig) Field3DeepEqual(src *int64) bool { + + if p.MaxBytes == src { + return true + } else if p.MaxBytes == nil || src == nil { + return false + } + if *p.MaxBytes != *src { + return false + } + return true } -func (p *TCreateTabletReq) SetVersionHash(val *types.TVersionHash) { - p.VersionHash = val +func (p *TBinlogConfig) Field4DeepEqual(src *int64) bool { + + if p.MaxHistoryNums == src { + return true + } else if p.MaxHistoryNums == nil || src == nil { + return false + } + if *p.MaxHistoryNums != *src { + return false + } + return true } -func (p *TCreateTabletReq) SetStorageMedium(val *types.TStorageMedium) { - p.StorageMedium = val + +type TCreateTabletReq struct { + TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` + TabletSchema *TTabletSchema `thrift:"tablet_schema,2,required" frugal:"2,required,TTabletSchema" json:"tablet_schema"` + Version *types.TVersion `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"` + VersionHash *types.TVersionHash `thrift:"version_hash,4,optional" frugal:"4,optional,i64" json:"version_hash,omitempty"` + StorageMedium *types.TStorageMedium `thrift:"storage_medium,5,optional" frugal:"5,optional,TStorageMedium" json:"storage_medium,omitempty"` + InRestoreMode *bool `thrift:"in_restore_mode,6,optional" frugal:"6,optional,bool" json:"in_restore_mode,omitempty"` + BaseTabletId *types.TTabletId `thrift:"base_tablet_id,7,optional" frugal:"7,optional,i64" json:"base_tablet_id,omitempty"` + BaseSchemaHash *types.TSchemaHash `thrift:"base_schema_hash,8,optional" frugal:"8,optional,i32" json:"base_schema_hash,omitempty"` + TableId *int64 `thrift:"table_id,9,optional" frugal:"9,optional,i64" json:"table_id,omitempty"` + PartitionId *int64 `thrift:"partition_id,10,optional" frugal:"10,optional,i64" json:"partition_id,omitempty"` + AllocationTerm *int64 `thrift:"allocation_term,11,optional" frugal:"11,optional,i64" json:"allocation_term,omitempty"` + IsEcoMode *bool `thrift:"is_eco_mode,12,optional" frugal:"12,optional,bool" json:"is_eco_mode,omitempty"` + StorageFormat *TStorageFormat `thrift:"storage_format,13,optional" frugal:"13,optional,TStorageFormat" json:"storage_format,omitempty"` + TabletType *TTabletType `thrift:"tablet_type,14,optional" frugal:"14,optional,TTabletType" json:"tablet_type,omitempty"` + CompressionType TCompressionType `thrift:"compression_type,16,optional" frugal:"16,optional,TCompressionType" json:"compression_type,omitempty"` + ReplicaId types.TReplicaId `thrift:"replica_id,17,optional" frugal:"17,optional,i64" json:"replica_id,omitempty"` + EnableUniqueKeyMergeOnWrite bool `thrift:"enable_unique_key_merge_on_write,19,optional" frugal:"19,optional,bool" json:"enable_unique_key_merge_on_write,omitempty"` + StoragePolicyId *int64 `thrift:"storage_policy_id,20,optional" frugal:"20,optional,i64" json:"storage_policy_id,omitempty"` + BinlogConfig *TBinlogConfig `thrift:"binlog_config,21,optional" frugal:"21,optional,TBinlogConfig" json:"binlog_config,omitempty"` + CompactionPolicy string `thrift:"compaction_policy,22,optional" frugal:"22,optional,string" json:"compaction_policy,omitempty"` + TimeSeriesCompactionGoalSizeMbytes int64 `thrift:"time_series_compaction_goal_size_mbytes,23,optional" frugal:"23,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"` + TimeSeriesCompactionFileCountThreshold int64 `thrift:"time_series_compaction_file_count_threshold,24,optional" frugal:"24,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"` + TimeSeriesCompactionTimeThresholdSeconds int64 `thrift:"time_series_compaction_time_threshold_seconds,25,optional" frugal:"25,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"` + TimeSeriesCompactionEmptyRowsetsThreshold int64 `thrift:"time_series_compaction_empty_rowsets_threshold,26,optional" frugal:"26,optional,i64" json:"time_series_compaction_empty_rowsets_threshold,omitempty"` + TimeSeriesCompactionLevelThreshold int64 `thrift:"time_series_compaction_level_threshold,27,optional" frugal:"27,optional,i64" json:"time_series_compaction_level_threshold,omitempty"` + InvertedIndexStorageFormat TInvertedIndexStorageFormat `thrift:"inverted_index_storage_format,28,optional" frugal:"28,optional,TInvertedIndexStorageFormat" json:"inverted_index_storage_format,omitempty"` + InvertedIndexFileStorageFormat types.TInvertedIndexFileStorageFormat `thrift:"inverted_index_file_storage_format,29,optional" frugal:"29,optional,TInvertedIndexFileStorageFormat" json:"inverted_index_file_storage_format,omitempty"` + IsInMemory bool `thrift:"is_in_memory,1000,optional" frugal:"1000,optional,bool" json:"is_in_memory,omitempty"` + IsPersistent bool `thrift:"is_persistent,1001,optional" frugal:"1001,optional,bool" json:"is_persistent,omitempty"` } -func (p *TCreateTabletReq) SetInRestoreMode(val *bool) { - p.InRestoreMode = val + +func NewTCreateTabletReq() *TCreateTabletReq { + return &TCreateTabletReq{ + + CompressionType: TCompressionType_LZ4F, + ReplicaId: 0, + EnableUniqueKeyMergeOnWrite: false, + CompactionPolicy: "size_based", + TimeSeriesCompactionGoalSizeMbytes: 1024, + TimeSeriesCompactionFileCountThreshold: 2000, + TimeSeriesCompactionTimeThresholdSeconds: 3600, + TimeSeriesCompactionEmptyRowsetsThreshold: 5, + TimeSeriesCompactionLevelThreshold: 1, + InvertedIndexStorageFormat: TInvertedIndexStorageFormat_DEFAULT, + InvertedIndexFileStorageFormat: types.TInvertedIndexFileStorageFormat_V2, + IsInMemory: false, + IsPersistent: false, + } } -func (p *TCreateTabletReq) SetBaseTabletId(val *types.TTabletId) { - p.BaseTabletId = val + +func (p *TCreateTabletReq) InitDefault() { + p.CompressionType = TCompressionType_LZ4F + p.ReplicaId = 0 + p.EnableUniqueKeyMergeOnWrite = false + p.CompactionPolicy = "size_based" + p.TimeSeriesCompactionGoalSizeMbytes = 1024 + p.TimeSeriesCompactionFileCountThreshold = 2000 + p.TimeSeriesCompactionTimeThresholdSeconds = 3600 + p.TimeSeriesCompactionEmptyRowsetsThreshold = 5 + p.TimeSeriesCompactionLevelThreshold = 1 + p.InvertedIndexStorageFormat = TInvertedIndexStorageFormat_DEFAULT + p.InvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat_V2 + p.IsInMemory = false + p.IsPersistent = false } -func (p *TCreateTabletReq) SetBaseSchemaHash(val *types.TSchemaHash) { - p.BaseSchemaHash = val + +func (p *TCreateTabletReq) GetTabletId() (v types.TTabletId) { + return p.TabletId } -func (p *TCreateTabletReq) SetTableId(val *int64) { - p.TableId = val + +var TCreateTabletReq_TabletSchema_DEFAULT *TTabletSchema + +func (p *TCreateTabletReq) GetTabletSchema() (v *TTabletSchema) { + if !p.IsSetTabletSchema() { + return TCreateTabletReq_TabletSchema_DEFAULT + } + return p.TabletSchema } -func (p *TCreateTabletReq) SetPartitionId(val *int64) { - p.PartitionId = val + +var TCreateTabletReq_Version_DEFAULT types.TVersion + +func (p *TCreateTabletReq) GetVersion() (v types.TVersion) { + if !p.IsSetVersion() { + return TCreateTabletReq_Version_DEFAULT + } + return *p.Version } -func (p *TCreateTabletReq) SetAllocationTerm(val *int64) { - p.AllocationTerm = val + +var TCreateTabletReq_VersionHash_DEFAULT types.TVersionHash + +func (p *TCreateTabletReq) GetVersionHash() (v types.TVersionHash) { + if !p.IsSetVersionHash() { + return TCreateTabletReq_VersionHash_DEFAULT + } + return *p.VersionHash } -func (p *TCreateTabletReq) SetIsEcoMode(val *bool) { - p.IsEcoMode = val + +var TCreateTabletReq_StorageMedium_DEFAULT types.TStorageMedium + +func (p *TCreateTabletReq) GetStorageMedium() (v types.TStorageMedium) { + if !p.IsSetStorageMedium() { + return TCreateTabletReq_StorageMedium_DEFAULT + } + return *p.StorageMedium +} + +var TCreateTabletReq_InRestoreMode_DEFAULT bool + +func (p *TCreateTabletReq) GetInRestoreMode() (v bool) { + if !p.IsSetInRestoreMode() { + return TCreateTabletReq_InRestoreMode_DEFAULT + } + return *p.InRestoreMode +} + +var TCreateTabletReq_BaseTabletId_DEFAULT types.TTabletId + +func (p *TCreateTabletReq) GetBaseTabletId() (v types.TTabletId) { + if !p.IsSetBaseTabletId() { + return TCreateTabletReq_BaseTabletId_DEFAULT + } + return *p.BaseTabletId +} + +var TCreateTabletReq_BaseSchemaHash_DEFAULT types.TSchemaHash + +func (p *TCreateTabletReq) GetBaseSchemaHash() (v types.TSchemaHash) { + if !p.IsSetBaseSchemaHash() { + return TCreateTabletReq_BaseSchemaHash_DEFAULT + } + return *p.BaseSchemaHash +} + +var TCreateTabletReq_TableId_DEFAULT int64 + +func (p *TCreateTabletReq) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TCreateTabletReq_TableId_DEFAULT + } + return *p.TableId +} + +var TCreateTabletReq_PartitionId_DEFAULT int64 + +func (p *TCreateTabletReq) GetPartitionId() (v int64) { + if !p.IsSetPartitionId() { + return TCreateTabletReq_PartitionId_DEFAULT + } + return *p.PartitionId +} + +var TCreateTabletReq_AllocationTerm_DEFAULT int64 + +func (p *TCreateTabletReq) GetAllocationTerm() (v int64) { + if !p.IsSetAllocationTerm() { + return TCreateTabletReq_AllocationTerm_DEFAULT + } + return *p.AllocationTerm +} + +var TCreateTabletReq_IsEcoMode_DEFAULT bool + +func (p *TCreateTabletReq) GetIsEcoMode() (v bool) { + if !p.IsSetIsEcoMode() { + return TCreateTabletReq_IsEcoMode_DEFAULT + } + return *p.IsEcoMode +} + +var TCreateTabletReq_StorageFormat_DEFAULT TStorageFormat + +func (p *TCreateTabletReq) GetStorageFormat() (v TStorageFormat) { + if !p.IsSetStorageFormat() { + return TCreateTabletReq_StorageFormat_DEFAULT + } + return *p.StorageFormat +} + +var TCreateTabletReq_TabletType_DEFAULT TTabletType + +func (p *TCreateTabletReq) GetTabletType() (v TTabletType) { + if !p.IsSetTabletType() { + return TCreateTabletReq_TabletType_DEFAULT + } + return *p.TabletType +} + +var TCreateTabletReq_CompressionType_DEFAULT TCompressionType = TCompressionType_LZ4F + +func (p *TCreateTabletReq) GetCompressionType() (v TCompressionType) { + if !p.IsSetCompressionType() { + return TCreateTabletReq_CompressionType_DEFAULT + } + return p.CompressionType +} + +var TCreateTabletReq_ReplicaId_DEFAULT types.TReplicaId = 0 + +func (p *TCreateTabletReq) GetReplicaId() (v types.TReplicaId) { + if !p.IsSetReplicaId() { + return TCreateTabletReq_ReplicaId_DEFAULT + } + return p.ReplicaId +} + +var TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT bool = false + +func (p *TCreateTabletReq) GetEnableUniqueKeyMergeOnWrite() (v bool) { + if !p.IsSetEnableUniqueKeyMergeOnWrite() { + return TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT + } + return p.EnableUniqueKeyMergeOnWrite +} + +var TCreateTabletReq_StoragePolicyId_DEFAULT int64 + +func (p *TCreateTabletReq) GetStoragePolicyId() (v int64) { + if !p.IsSetStoragePolicyId() { + return TCreateTabletReq_StoragePolicyId_DEFAULT + } + return *p.StoragePolicyId +} + +var TCreateTabletReq_BinlogConfig_DEFAULT *TBinlogConfig + +func (p *TCreateTabletReq) GetBinlogConfig() (v *TBinlogConfig) { + if !p.IsSetBinlogConfig() { + return TCreateTabletReq_BinlogConfig_DEFAULT + } + return p.BinlogConfig +} + +var TCreateTabletReq_CompactionPolicy_DEFAULT string = "size_based" + +func (p *TCreateTabletReq) GetCompactionPolicy() (v string) { + if !p.IsSetCompactionPolicy() { + return TCreateTabletReq_CompactionPolicy_DEFAULT + } + return p.CompactionPolicy +} + +var TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT int64 = 1024 + +func (p *TCreateTabletReq) GetTimeSeriesCompactionGoalSizeMbytes() (v int64) { + if !p.IsSetTimeSeriesCompactionGoalSizeMbytes() { + return TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT + } + return p.TimeSeriesCompactionGoalSizeMbytes +} + +var TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT int64 = 2000 + +func (p *TCreateTabletReq) GetTimeSeriesCompactionFileCountThreshold() (v int64) { + if !p.IsSetTimeSeriesCompactionFileCountThreshold() { + return TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT + } + return p.TimeSeriesCompactionFileCountThreshold +} + +var TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT int64 = 3600 + +func (p *TCreateTabletReq) GetTimeSeriesCompactionTimeThresholdSeconds() (v int64) { + if !p.IsSetTimeSeriesCompactionTimeThresholdSeconds() { + return TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT + } + return p.TimeSeriesCompactionTimeThresholdSeconds +} + +var TCreateTabletReq_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT int64 = 5 + +func (p *TCreateTabletReq) GetTimeSeriesCompactionEmptyRowsetsThreshold() (v int64) { + if !p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + return TCreateTabletReq_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT + } + return p.TimeSeriesCompactionEmptyRowsetsThreshold +} + +var TCreateTabletReq_TimeSeriesCompactionLevelThreshold_DEFAULT int64 = 1 + +func (p *TCreateTabletReq) GetTimeSeriesCompactionLevelThreshold() (v int64) { + if !p.IsSetTimeSeriesCompactionLevelThreshold() { + return TCreateTabletReq_TimeSeriesCompactionLevelThreshold_DEFAULT + } + return p.TimeSeriesCompactionLevelThreshold +} + +var TCreateTabletReq_InvertedIndexStorageFormat_DEFAULT TInvertedIndexStorageFormat = TInvertedIndexStorageFormat_DEFAULT + +func (p *TCreateTabletReq) GetInvertedIndexStorageFormat() (v TInvertedIndexStorageFormat) { + if !p.IsSetInvertedIndexStorageFormat() { + return TCreateTabletReq_InvertedIndexStorageFormat_DEFAULT + } + return p.InvertedIndexStorageFormat +} + +var TCreateTabletReq_InvertedIndexFileStorageFormat_DEFAULT types.TInvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat_V2 + +func (p *TCreateTabletReq) GetInvertedIndexFileStorageFormat() (v types.TInvertedIndexFileStorageFormat) { + if !p.IsSetInvertedIndexFileStorageFormat() { + return TCreateTabletReq_InvertedIndexFileStorageFormat_DEFAULT + } + return p.InvertedIndexFileStorageFormat +} + +var TCreateTabletReq_IsInMemory_DEFAULT bool = false + +func (p *TCreateTabletReq) GetIsInMemory() (v bool) { + if !p.IsSetIsInMemory() { + return TCreateTabletReq_IsInMemory_DEFAULT + } + return p.IsInMemory +} + +var TCreateTabletReq_IsPersistent_DEFAULT bool = false + +func (p *TCreateTabletReq) GetIsPersistent() (v bool) { + if !p.IsSetIsPersistent() { + return TCreateTabletReq_IsPersistent_DEFAULT + } + return p.IsPersistent +} +func (p *TCreateTabletReq) SetTabletId(val types.TTabletId) { + p.TabletId = val +} +func (p *TCreateTabletReq) SetTabletSchema(val *TTabletSchema) { + p.TabletSchema = val +} +func (p *TCreateTabletReq) SetVersion(val *types.TVersion) { + p.Version = val +} +func (p *TCreateTabletReq) SetVersionHash(val *types.TVersionHash) { + p.VersionHash = val +} +func (p *TCreateTabletReq) SetStorageMedium(val *types.TStorageMedium) { + p.StorageMedium = val +} +func (p *TCreateTabletReq) SetInRestoreMode(val *bool) { + p.InRestoreMode = val +} +func (p *TCreateTabletReq) SetBaseTabletId(val *types.TTabletId) { + p.BaseTabletId = val +} +func (p *TCreateTabletReq) SetBaseSchemaHash(val *types.TSchemaHash) { + p.BaseSchemaHash = val +} +func (p *TCreateTabletReq) SetTableId(val *int64) { + p.TableId = val +} +func (p *TCreateTabletReq) SetPartitionId(val *int64) { + p.PartitionId = val +} +func (p *TCreateTabletReq) SetAllocationTerm(val *int64) { + p.AllocationTerm = val +} +func (p *TCreateTabletReq) SetIsEcoMode(val *bool) { + p.IsEcoMode = val } func (p *TCreateTabletReq) SetStorageFormat(val *TStorageFormat) { p.StorageFormat = val @@ -4710,31 +5788,55 @@ func (p *TCreateTabletReq) SetTimeSeriesCompactionFileCountThreshold(val int64) func (p *TCreateTabletReq) SetTimeSeriesCompactionTimeThresholdSeconds(val int64) { p.TimeSeriesCompactionTimeThresholdSeconds = val } +func (p *TCreateTabletReq) SetTimeSeriesCompactionEmptyRowsetsThreshold(val int64) { + p.TimeSeriesCompactionEmptyRowsetsThreshold = val +} +func (p *TCreateTabletReq) SetTimeSeriesCompactionLevelThreshold(val int64) { + p.TimeSeriesCompactionLevelThreshold = val +} +func (p *TCreateTabletReq) SetInvertedIndexStorageFormat(val TInvertedIndexStorageFormat) { + p.InvertedIndexStorageFormat = val +} +func (p *TCreateTabletReq) SetInvertedIndexFileStorageFormat(val types.TInvertedIndexFileStorageFormat) { + p.InvertedIndexFileStorageFormat = val +} +func (p *TCreateTabletReq) SetIsInMemory(val bool) { + p.IsInMemory = val +} +func (p *TCreateTabletReq) SetIsPersistent(val bool) { + p.IsPersistent = val +} var fieldIDToName_TCreateTabletReq = map[int16]string{ - 1: "tablet_id", - 2: "tablet_schema", - 3: "version", - 4: "version_hash", - 5: "storage_medium", - 6: "in_restore_mode", - 7: "base_tablet_id", - 8: "base_schema_hash", - 9: "table_id", - 10: "partition_id", - 11: "allocation_term", - 12: "is_eco_mode", - 13: "storage_format", - 14: "tablet_type", - 16: "compression_type", - 17: "replica_id", - 19: "enable_unique_key_merge_on_write", - 20: "storage_policy_id", - 21: "binlog_config", - 22: "compaction_policy", - 23: "time_series_compaction_goal_size_mbytes", - 24: "time_series_compaction_file_count_threshold", - 25: "time_series_compaction_time_threshold_seconds", + 1: "tablet_id", + 2: "tablet_schema", + 3: "version", + 4: "version_hash", + 5: "storage_medium", + 6: "in_restore_mode", + 7: "base_tablet_id", + 8: "base_schema_hash", + 9: "table_id", + 10: "partition_id", + 11: "allocation_term", + 12: "is_eco_mode", + 13: "storage_format", + 14: "tablet_type", + 16: "compression_type", + 17: "replica_id", + 19: "enable_unique_key_merge_on_write", + 20: "storage_policy_id", + 21: "binlog_config", + 22: "compaction_policy", + 23: "time_series_compaction_goal_size_mbytes", + 24: "time_series_compaction_file_count_threshold", + 25: "time_series_compaction_time_threshold_seconds", + 26: "time_series_compaction_empty_rowsets_threshold", + 27: "time_series_compaction_level_threshold", + 28: "inverted_index_storage_format", + 29: "inverted_index_file_storage_format", + 1000: "is_in_memory", + 1001: "is_persistent", } func (p *TCreateTabletReq) IsSetTabletSchema() bool { @@ -4825,6 +5927,30 @@ func (p *TCreateTabletReq) IsSetTimeSeriesCompactionTimeThresholdSeconds() bool return p.TimeSeriesCompactionTimeThresholdSeconds != TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT } +func (p *TCreateTabletReq) IsSetTimeSeriesCompactionEmptyRowsetsThreshold() bool { + return p.TimeSeriesCompactionEmptyRowsetsThreshold != TCreateTabletReq_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT +} + +func (p *TCreateTabletReq) IsSetTimeSeriesCompactionLevelThreshold() bool { + return p.TimeSeriesCompactionLevelThreshold != TCreateTabletReq_TimeSeriesCompactionLevelThreshold_DEFAULT +} + +func (p *TCreateTabletReq) IsSetInvertedIndexStorageFormat() bool { + return p.InvertedIndexStorageFormat != TCreateTabletReq_InvertedIndexStorageFormat_DEFAULT +} + +func (p *TCreateTabletReq) IsSetInvertedIndexFileStorageFormat() bool { + return p.InvertedIndexFileStorageFormat != TCreateTabletReq_InvertedIndexFileStorageFormat_DEFAULT +} + +func (p *TCreateTabletReq) IsSetIsInMemory() bool { + return p.IsInMemory != TCreateTabletReq_IsInMemory_DEFAULT +} + +func (p *TCreateTabletReq) IsSetIsPersistent() bool { + return p.IsPersistent != TCreateTabletReq_IsPersistent_DEFAULT +} + func (p *TCreateTabletReq) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -4852,10 +5978,8 @@ func (p *TCreateTabletReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -4863,227 +5987,230 @@ func (p *TCreateTabletReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletSchema = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.BOOL { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I32 { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I32 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.I32 { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.I64 { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.BOOL { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.I64 { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: if fieldTypeId == thrift.STRUCT { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 22: if fieldTypeId == thrift.STRING { if err = p.ReadField22(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 23: if fieldTypeId == thrift.I64 { if err = p.ReadField23(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 24: if fieldTypeId == thrift.I64 { if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 25: if fieldTypeId == thrift.I64 { if err = p.ReadField25(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 26: + if fieldTypeId == thrift.I64 { + if err = p.ReadField26(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 27: + if fieldTypeId == thrift.I64 { + if err = p.ReadField27(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 28: + if fieldTypeId == thrift.I32 { + if err = p.ReadField28(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 29: + if fieldTypeId == thrift.I32 { + if err = p.ReadField29(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1001: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1001(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5120,210 +6247,319 @@ RequiredFieldNotSetError: } func (p *TCreateTabletReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TCreateTabletReq) ReadField2(iprot thrift.TProtocol) error { - p.TabletSchema = NewTTabletSchema() - if err := p.TabletSchema.Read(iprot); err != nil { + _field := NewTTabletSchema() + if err := _field.Read(iprot); err != nil { return err } + p.TabletSchema = _field return nil } - func (p *TCreateTabletReq) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = &v + _field = &v } + p.Version = _field return nil } - func (p *TCreateTabletReq) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionHash = &v + _field = &v } + p.VersionHash = _field return nil } - func (p *TCreateTabletReq) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TStorageMedium if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TStorageMedium(v) - p.StorageMedium = &tmp + _field = &tmp } + p.StorageMedium = _field return nil } - func (p *TCreateTabletReq) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.InRestoreMode = &v + _field = &v } + p.InRestoreMode = _field return nil } - func (p *TCreateTabletReq) ReadField7(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BaseTabletId = &v + _field = &v } + p.BaseTabletId = _field return nil } - func (p *TCreateTabletReq) ReadField8(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BaseSchemaHash = &v + _field = &v } + p.BaseSchemaHash = _field return nil } - func (p *TCreateTabletReq) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = &v + _field = &v } + p.TableId = _field return nil } - func (p *TCreateTabletReq) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionId = &v + _field = &v } + p.PartitionId = _field return nil } - func (p *TCreateTabletReq) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AllocationTerm = &v + _field = &v } + p.AllocationTerm = _field return nil } - func (p *TCreateTabletReq) ReadField12(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsEcoMode = &v + _field = &v } + p.IsEcoMode = _field return nil } - func (p *TCreateTabletReq) ReadField13(iprot thrift.TProtocol) error { + + var _field *TStorageFormat if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TStorageFormat(v) - p.StorageFormat = &tmp + _field = &tmp } + p.StorageFormat = _field return nil } - func (p *TCreateTabletReq) ReadField14(iprot thrift.TProtocol) error { + + var _field *TTabletType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TTabletType(v) - p.TabletType = &tmp + _field = &tmp } + p.TabletType = _field return nil } - func (p *TCreateTabletReq) ReadField16(iprot thrift.TProtocol) error { + + var _field TCompressionType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.CompressionType = TCompressionType(v) + _field = TCompressionType(v) } + p.CompressionType = _field return nil } - func (p *TCreateTabletReq) ReadField17(iprot thrift.TProtocol) error { + + var _field types.TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplicaId = v + _field = v } + p.ReplicaId = _field return nil } - func (p *TCreateTabletReq) ReadField19(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.EnableUniqueKeyMergeOnWrite = v + _field = v } + p.EnableUniqueKeyMergeOnWrite = _field return nil } - func (p *TCreateTabletReq) ReadField20(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.StoragePolicyId = &v + _field = &v } + p.StoragePolicyId = _field return nil } - func (p *TCreateTabletReq) ReadField21(iprot thrift.TProtocol) error { - p.BinlogConfig = NewTBinlogConfig() - if err := p.BinlogConfig.Read(iprot); err != nil { + _field := NewTBinlogConfig() + if err := _field.Read(iprot); err != nil { return err } + p.BinlogConfig = _field return nil } - func (p *TCreateTabletReq) ReadField22(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.CompactionPolicy = v + _field = v } + p.CompactionPolicy = _field return nil } - func (p *TCreateTabletReq) ReadField23(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TimeSeriesCompactionGoalSizeMbytes = v + _field = v } + p.TimeSeriesCompactionGoalSizeMbytes = _field return nil } - func (p *TCreateTabletReq) ReadField24(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TimeSeriesCompactionFileCountThreshold = v + _field = v } + p.TimeSeriesCompactionFileCountThreshold = _field return nil } - func (p *TCreateTabletReq) ReadField25(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TimeSeriesCompactionTimeThresholdSeconds = _field + return nil +} +func (p *TCreateTabletReq) ReadField26(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TimeSeriesCompactionTimeThresholdSeconds = v + _field = v } + p.TimeSeriesCompactionEmptyRowsetsThreshold = _field + return nil +} +func (p *TCreateTabletReq) ReadField27(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TimeSeriesCompactionLevelThreshold = _field + return nil +} +func (p *TCreateTabletReq) ReadField28(iprot thrift.TProtocol) error { + + var _field TInvertedIndexStorageFormat + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TInvertedIndexStorageFormat(v) + } + p.InvertedIndexStorageFormat = _field + return nil +} +func (p *TCreateTabletReq) ReadField29(iprot thrift.TProtocol) error { + + var _field types.TInvertedIndexFileStorageFormat + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TInvertedIndexFileStorageFormat(v) + } + p.InvertedIndexFileStorageFormat = _field + return nil +} +func (p *TCreateTabletReq) ReadField1000(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsInMemory = _field + return nil +} +func (p *TCreateTabletReq) ReadField1001(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsPersistent = _field return nil } @@ -5425,7 +6661,30 @@ func (p *TCreateTabletReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 25 goto WriteFieldError } - + if err = p.writeField26(oprot); err != nil { + fieldId = 26 + goto WriteFieldError + } + if err = p.writeField27(oprot); err != nil { + fieldId = 27 + goto WriteFieldError + } + if err = p.writeField28(oprot); err != nil { + fieldId = 28 + goto WriteFieldError + } + if err = p.writeField29(oprot); err != nil { + fieldId = 29 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5877,39 +7136,154 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) } -func (p *TCreateTabletReq) String() string { - if p == nil { - return "" +func (p *TCreateTabletReq) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + if err = oprot.WriteFieldBegin("time_series_compaction_empty_rowsets_threshold", thrift.I64, 26); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TimeSeriesCompactionEmptyRowsetsThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return fmt.Sprintf("TCreateTabletReq(%+v)", *p) + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) } -func (p *TCreateTabletReq) DeepEqual(ano *TCreateTabletReq) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.TabletId) { - return false - } - if !p.Field2DeepEqual(ano.TabletSchema) { - return false - } - if !p.Field3DeepEqual(ano.Version) { - return false - } - if !p.Field4DeepEqual(ano.VersionHash) { - return false +func (p *TCreateTabletReq) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeSeriesCompactionLevelThreshold() { + if err = oprot.WriteFieldBegin("time_series_compaction_level_threshold", thrift.I64, 27); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TimeSeriesCompactionLevelThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if !p.Field5DeepEqual(ano.StorageMedium) { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) +} + +func (p *TCreateTabletReq) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetInvertedIndexStorageFormat() { + if err = oprot.WriteFieldBegin("inverted_index_storage_format", thrift.I32, 28); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.InvertedIndexStorageFormat)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if !p.Field6DeepEqual(ano.InRestoreMode) { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) +} + +func (p *TCreateTabletReq) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetInvertedIndexFileStorageFormat() { + if err = oprot.WriteFieldBegin("inverted_index_file_storage_format", thrift.I32, 29); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.InvertedIndexFileStorageFormat)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if !p.Field7DeepEqual(ano.BaseTabletId) { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) +} + +func (p *TCreateTabletReq) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetIsInMemory() { + if err = oprot.WriteFieldBegin("is_in_memory", thrift.BOOL, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsInMemory); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TCreateTabletReq) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetIsPersistent() { + if err = oprot.WriteFieldBegin("is_persistent", thrift.BOOL, 1001); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsPersistent); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) +} + +func (p *TCreateTabletReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCreateTabletReq(%+v)", *p) + +} + +func (p *TCreateTabletReq) DeepEqual(ano *TCreateTabletReq) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TabletId) { + return false + } + if !p.Field2DeepEqual(ano.TabletSchema) { + return false + } + if !p.Field3DeepEqual(ano.Version) { + return false + } + if !p.Field4DeepEqual(ano.VersionHash) { + return false + } + if !p.Field5DeepEqual(ano.StorageMedium) { + return false + } + if !p.Field6DeepEqual(ano.InRestoreMode) { + return false + } + if !p.Field7DeepEqual(ano.BaseTabletId) { + return false } if !p.Field8DeepEqual(ano.BaseSchemaHash) { return false @@ -5959,6 +7333,24 @@ func (p *TCreateTabletReq) DeepEqual(ano *TCreateTabletReq) bool { if !p.Field25DeepEqual(ano.TimeSeriesCompactionTimeThresholdSeconds) { return false } + if !p.Field26DeepEqual(ano.TimeSeriesCompactionEmptyRowsetsThreshold) { + return false + } + if !p.Field27DeepEqual(ano.TimeSeriesCompactionLevelThreshold) { + return false + } + if !p.Field28DeepEqual(ano.InvertedIndexStorageFormat) { + return false + } + if !p.Field29DeepEqual(ano.InvertedIndexFileStorageFormat) { + return false + } + if !p.Field1000DeepEqual(ano.IsInMemory) { + return false + } + if !p.Field1001DeepEqual(ano.IsPersistent) { + return false + } return true } @@ -6188,6 +7580,48 @@ func (p *TCreateTabletReq) Field25DeepEqual(src int64) bool { } return true } +func (p *TCreateTabletReq) Field26DeepEqual(src int64) bool { + + if p.TimeSeriesCompactionEmptyRowsetsThreshold != src { + return false + } + return true +} +func (p *TCreateTabletReq) Field27DeepEqual(src int64) bool { + + if p.TimeSeriesCompactionLevelThreshold != src { + return false + } + return true +} +func (p *TCreateTabletReq) Field28DeepEqual(src TInvertedIndexStorageFormat) bool { + + if p.InvertedIndexStorageFormat != src { + return false + } + return true +} +func (p *TCreateTabletReq) Field29DeepEqual(src types.TInvertedIndexFileStorageFormat) bool { + + if p.InvertedIndexFileStorageFormat != src { + return false + } + return true +} +func (p *TCreateTabletReq) Field1000DeepEqual(src bool) bool { + + if p.IsInMemory != src { + return false + } + return true +} +func (p *TCreateTabletReq) Field1001DeepEqual(src bool) bool { + + if p.IsPersistent != src { + return false + } + return true +} type TDropTabletReq struct { TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` @@ -6205,11 +7639,8 @@ func NewTDropTabletReq() *TDropTabletReq { } func (p *TDropTabletReq) InitDefault() { - *p = TDropTabletReq{ - - ReplicaId: 0, - IsDropTableOrPartition: false, - } + p.ReplicaId = 0 + p.IsDropTableOrPartition = false } func (p *TDropTabletReq) GetTabletId() (v types.TTabletId) { @@ -6300,47 +7731,38 @@ func (p *TDropTabletReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6372,38 +7794,47 @@ RequiredFieldNotSetError: } func (p *TDropTabletReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TDropTabletReq) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = &v + _field = &v } + p.SchemaHash = _field return nil } - func (p *TDropTabletReq) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplicaId = v + _field = v } + p.ReplicaId = _field return nil } - func (p *TDropTabletReq) ReadField4(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDropTableOrPartition = v + _field = v } + p.IsDropTableOrPartition = _field return nil } @@ -6429,7 +7860,6 @@ func (p *TDropTabletReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6527,6 +7957,7 @@ func (p *TDropTabletReq) String() string { return "" } return fmt.Sprintf("TDropTabletReq(%+v)", *p) + } func (p *TDropTabletReq) DeepEqual(ano *TDropTabletReq) bool { @@ -6595,7 +8026,6 @@ func NewTAlterTabletReq() *TAlterTabletReq { } func (p *TAlterTabletReq) InitDefault() { - *p = TAlterTabletReq{} } func (p *TAlterTabletReq) GetBaseTabletId() (v types.TTabletId) { @@ -6662,10 +8092,8 @@ func (p *TAlterTabletReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBaseTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -6673,10 +8101,8 @@ func (p *TAlterTabletReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBaseSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -6684,17 +8110,14 @@ func (p *TAlterTabletReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNewTabletReq_ = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6736,28 +8159,33 @@ RequiredFieldNotSetError: } func (p *TAlterTabletReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BaseTabletId = v + _field = v } + p.BaseTabletId = _field return nil } - func (p *TAlterTabletReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BaseSchemaHash = v + _field = v } + p.BaseSchemaHash = _field return nil } - func (p *TAlterTabletReq) ReadField3(iprot thrift.TProtocol) error { - p.NewTabletReq_ = NewTCreateTabletReq() - if err := p.NewTabletReq_.Read(iprot); err != nil { + _field := NewTCreateTabletReq() + if err := _field.Read(iprot); err != nil { return err } + p.NewTabletReq_ = _field return nil } @@ -6779,7 +8207,6 @@ func (p *TAlterTabletReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6854,6 +8281,7 @@ func (p *TAlterTabletReq) String() string { return "" } return fmt.Sprintf("TAlterTabletReq(%+v)", *p) + } func (p *TAlterTabletReq) DeepEqual(ano *TAlterTabletReq) bool { @@ -6907,7 +8335,6 @@ func NewTAlterMaterializedViewParam() *TAlterMaterializedViewParam { } func (p *TAlterMaterializedViewParam) InitDefault() { - *p = TAlterMaterializedViewParam{} } func (p *TAlterMaterializedViewParam) GetColumnName() (v string) { @@ -6981,37 +8408,30 @@ func (p *TAlterMaterializedViewParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7043,28 +8463,33 @@ RequiredFieldNotSetError: } func (p *TAlterMaterializedViewParam) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnName = v + _field = v } + p.ColumnName = _field return nil } - func (p *TAlterMaterializedViewParam) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.OriginColumnName = &v + _field = &v } + p.OriginColumnName = _field return nil } - func (p *TAlterMaterializedViewParam) ReadField3(iprot thrift.TProtocol) error { - p.MvExpr = exprs.NewTExpr() - if err := p.MvExpr.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.MvExpr = _field return nil } @@ -7086,7 +8511,6 @@ func (p *TAlterMaterializedViewParam) Write(oprot thrift.TProtocol) (err error) fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7165,6 +8589,7 @@ func (p *TAlterMaterializedViewParam) String() string { return "" } return fmt.Sprintf("TAlterMaterializedViewParam(%+v)", *p) + } func (p *TAlterMaterializedViewParam) DeepEqual(ano *TAlterMaterializedViewParam) bool { @@ -7224,6 +8649,9 @@ type TAlterTabletReqV2 struct { DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,9,optional" frugal:"9,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` Columns []*descriptors.TColumn `thrift:"columns,10,optional" frugal:"10,optional,list" json:"columns,omitempty"` BeExecVersion int32 `thrift:"be_exec_version,11,optional" frugal:"11,optional,i32" json:"be_exec_version,omitempty"` + JobId *int64 `thrift:"job_id,1000,optional" frugal:"1000,optional,i64" json:"job_id,omitempty"` + Expiration *int64 `thrift:"expiration,1001,optional" frugal:"1001,optional,i64" json:"expiration,omitempty"` + StorageVaultId *string `thrift:"storage_vault_id,1002,optional" frugal:"1002,optional,string" json:"storage_vault_id,omitempty"` } func NewTAlterTabletReqV2() *TAlterTabletReqV2 { @@ -7235,11 +8663,8 @@ func NewTAlterTabletReqV2() *TAlterTabletReqV2 { } func (p *TAlterTabletReqV2) InitDefault() { - *p = TAlterTabletReqV2{ - - AlterTabletType: TAlterTabletType_SCHEMA_CHANGE, - BeExecVersion: 0, - } + p.AlterTabletType = TAlterTabletType_SCHEMA_CHANGE + p.BeExecVersion = 0 } func (p *TAlterTabletReqV2) GetBaseTabletId() (v types.TTabletId) { @@ -7320,6 +8745,33 @@ func (p *TAlterTabletReqV2) GetBeExecVersion() (v int32) { } return p.BeExecVersion } + +var TAlterTabletReqV2_JobId_DEFAULT int64 + +func (p *TAlterTabletReqV2) GetJobId() (v int64) { + if !p.IsSetJobId() { + return TAlterTabletReqV2_JobId_DEFAULT + } + return *p.JobId +} + +var TAlterTabletReqV2_Expiration_DEFAULT int64 + +func (p *TAlterTabletReqV2) GetExpiration() (v int64) { + if !p.IsSetExpiration() { + return TAlterTabletReqV2_Expiration_DEFAULT + } + return *p.Expiration +} + +var TAlterTabletReqV2_StorageVaultId_DEFAULT string + +func (p *TAlterTabletReqV2) GetStorageVaultId() (v string) { + if !p.IsSetStorageVaultId() { + return TAlterTabletReqV2_StorageVaultId_DEFAULT + } + return *p.StorageVaultId +} func (p *TAlterTabletReqV2) SetBaseTabletId(val types.TTabletId) { p.BaseTabletId = val } @@ -7353,19 +8805,31 @@ func (p *TAlterTabletReqV2) SetColumns(val []*descriptors.TColumn) { func (p *TAlterTabletReqV2) SetBeExecVersion(val int32) { p.BeExecVersion = val } +func (p *TAlterTabletReqV2) SetJobId(val *int64) { + p.JobId = val +} +func (p *TAlterTabletReqV2) SetExpiration(val *int64) { + p.Expiration = val +} +func (p *TAlterTabletReqV2) SetStorageVaultId(val *string) { + p.StorageVaultId = val +} var fieldIDToName_TAlterTabletReqV2 = map[int16]string{ - 1: "base_tablet_id", - 2: "new_tablet_id", - 3: "base_schema_hash", - 4: "new_schema_hash", - 5: "alter_version", - 6: "alter_version_hash", - 7: "materialized_view_params", - 8: "alter_tablet_type", - 9: "desc_tbl", - 10: "columns", - 11: "be_exec_version", + 1: "base_tablet_id", + 2: "new_tablet_id", + 3: "base_schema_hash", + 4: "new_schema_hash", + 5: "alter_version", + 6: "alter_version_hash", + 7: "materialized_view_params", + 8: "alter_tablet_type", + 9: "desc_tbl", + 10: "columns", + 11: "be_exec_version", + 1000: "job_id", + 1001: "expiration", + 1002: "storage_vault_id", } func (p *TAlterTabletReqV2) IsSetAlterVersion() bool { @@ -7396,6 +8860,18 @@ func (p *TAlterTabletReqV2) IsSetBeExecVersion() bool { return p.BeExecVersion != TAlterTabletReqV2_BeExecVersion_DEFAULT } +func (p *TAlterTabletReqV2) IsSetJobId() bool { + return p.JobId != nil +} + +func (p *TAlterTabletReqV2) IsSetExpiration() bool { + return p.Expiration != nil +} + +func (p *TAlterTabletReqV2) IsSetStorageVaultId() bool { + return p.StorageVaultId != nil +} + func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -7425,10 +8901,8 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBaseTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -7436,10 +8910,8 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNewTabletId_ = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -7447,10 +8919,8 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBaseSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -7458,87 +8928,94 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNewSchemaHash_ = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.LIST { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1001: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1001(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1002: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1002(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7585,134 +9062,190 @@ RequiredFieldNotSetError: } func (p *TAlterTabletReqV2) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BaseTabletId = v + _field = v } + p.BaseTabletId = _field return nil } - func (p *TAlterTabletReqV2) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.NewTabletId_ = v + _field = v } + p.NewTabletId_ = _field return nil } - func (p *TAlterTabletReqV2) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BaseSchemaHash = v + _field = v } + p.BaseSchemaHash = _field return nil } - func (p *TAlterTabletReqV2) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NewSchemaHash_ = v + _field = v } + p.NewSchemaHash_ = _field return nil } - func (p *TAlterTabletReqV2) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AlterVersion = &v + _field = &v } + p.AlterVersion = _field return nil } - func (p *TAlterTabletReqV2) ReadField6(iprot thrift.TProtocol) error { + + var _field *types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AlterVersionHash = &v + _field = &v } + p.AlterVersionHash = _field return nil } - func (p *TAlterTabletReqV2) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.MaterializedViewParams = make([]*TAlterMaterializedViewParam, 0, size) + _field := make([]*TAlterMaterializedViewParam, 0, size) + values := make([]TAlterMaterializedViewParam, size) for i := 0; i < size; i++ { - _elem := NewTAlterMaterializedViewParam() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.MaterializedViewParams = append(p.MaterializedViewParams, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.MaterializedViewParams = _field return nil } - func (p *TAlterTabletReqV2) ReadField8(iprot thrift.TProtocol) error { + + var _field TAlterTabletType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.AlterTabletType = TAlterTabletType(v) + _field = TAlterTabletType(v) } + p.AlterTabletType = _field return nil } - func (p *TAlterTabletReqV2) ReadField9(iprot thrift.TProtocol) error { - p.DescTbl = descriptors.NewTDescriptorTable() - if err := p.DescTbl.Read(iprot); err != nil { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { return err } + p.DescTbl = _field return nil } - func (p *TAlterTabletReqV2) ReadField10(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]*descriptors.TColumn, 0, size) + _field := make([]*descriptors.TColumn, 0, size) + values := make([]descriptors.TColumn, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TAlterTabletReqV2) ReadField11(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BeExecVersion = v + _field = v } + p.BeExecVersion = _field return nil } +func (p *TAlterTabletReqV2) ReadField1000(iprot thrift.TProtocol) error { -func (p *TAlterTabletReqV2) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TAlterTabletReqV2"); err != nil { - goto WriteStructBeginError + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError + p.JobId = _field + return nil +} +func (p *TAlterTabletReqV2) ReadField1001(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Expiration = _field + return nil +} +func (p *TAlterTabletReqV2) ReadField1002(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.StorageVaultId = _field + return nil +} + +func (p *TAlterTabletReqV2) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TAlterTabletReqV2"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } if err = p.writeField2(oprot); err != nil { fieldId = 2 @@ -7754,7 +9287,18 @@ func (p *TAlterTabletReqV2) Write(oprot thrift.TProtocol) (err error) { fieldId = 11 goto WriteFieldError } - + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 + goto WriteFieldError + } + if err = p.writeField1002(oprot); err != nil { + fieldId = 1002 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7990,11 +9534,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } +func (p *TAlterTabletReqV2) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetJobId() { + if err = oprot.WriteFieldBegin("job_id", thrift.I64, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.JobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TAlterTabletReqV2) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetExpiration() { + if err = oprot.WriteFieldBegin("expiration", thrift.I64, 1001); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Expiration); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) +} + +func (p *TAlterTabletReqV2) writeField1002(oprot thrift.TProtocol) (err error) { + if p.IsSetStorageVaultId() { + if err = oprot.WriteFieldBegin("storage_vault_id", thrift.STRING, 1002); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.StorageVaultId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1002 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1002 end error: ", p), err) +} + func (p *TAlterTabletReqV2) String() string { if p == nil { return "" } return fmt.Sprintf("TAlterTabletReqV2(%+v)", *p) + } func (p *TAlterTabletReqV2) DeepEqual(ano *TAlterTabletReqV2) bool { @@ -8036,6 +9638,15 @@ func (p *TAlterTabletReqV2) DeepEqual(ano *TAlterTabletReqV2) bool { if !p.Field11DeepEqual(ano.BeExecVersion) { return false } + if !p.Field1000DeepEqual(ano.JobId) { + return false + } + if !p.Field1001DeepEqual(ano.Expiration) { + return false + } + if !p.Field1002DeepEqual(ano.StorageVaultId) { + return false + } return true } @@ -8138,6 +9749,42 @@ func (p *TAlterTabletReqV2) Field11DeepEqual(src int32) bool { } return true } +func (p *TAlterTabletReqV2) Field1000DeepEqual(src *int64) bool { + + if p.JobId == src { + return true + } else if p.JobId == nil || src == nil { + return false + } + if *p.JobId != *src { + return false + } + return true +} +func (p *TAlterTabletReqV2) Field1001DeepEqual(src *int64) bool { + + if p.Expiration == src { + return true + } else if p.Expiration == nil || src == nil { + return false + } + if *p.Expiration != *src { + return false + } + return true +} +func (p *TAlterTabletReqV2) Field1002DeepEqual(src *string) bool { + + if p.StorageVaultId == src { + return true + } else if p.StorageVaultId == nil || src == nil { + return false + } + if strings.Compare(*p.StorageVaultId, *src) != 0 { + return false + } + return true +} type TAlterInvertedIndexReq struct { TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` @@ -8161,11 +9808,8 @@ func NewTAlterInvertedIndexReq() *TAlterInvertedIndexReq { } func (p *TAlterInvertedIndexReq) InitDefault() { - *p = TAlterInvertedIndexReq{ - - AlterTabletType: TAlterTabletType_SCHEMA_CHANGE, - IsDropOp: false, - } + p.AlterTabletType = TAlterTabletType_SCHEMA_CHANGE + p.IsDropOp = false } func (p *TAlterInvertedIndexReq) GetTabletId() (v types.TTabletId) { @@ -8350,10 +9994,8 @@ func (p *TAlterInvertedIndexReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -8361,97 +10003,78 @@ func (p *TAlterInvertedIndexReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8488,125 +10111,149 @@ RequiredFieldNotSetError: } func (p *TAlterInvertedIndexReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AlterVersion = &v + _field = &v } + p.AlterVersion = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField4(iprot thrift.TProtocol) error { + + var _field TAlterTabletType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.AlterTabletType = TAlterTabletType(v) + _field = TAlterTabletType(v) } + p.AlterTabletType = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField5(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDropOp = v + _field = v } + p.IsDropOp = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.AlterInvertedIndexes = make([]*descriptors.TOlapTableIndex, 0, size) + _field := make([]*descriptors.TOlapTableIndex, 0, size) + values := make([]descriptors.TOlapTableIndex, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTOlapTableIndex() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.AlterInvertedIndexes = append(p.AlterInvertedIndexes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.AlterInvertedIndexes = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.IndexesDesc = make([]*descriptors.TOlapTableIndex, 0, size) + _field := make([]*descriptors.TOlapTableIndex, 0, size) + values := make([]descriptors.TOlapTableIndex, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTOlapTableIndex() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.IndexesDesc = append(p.IndexesDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.IndexesDesc = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]*descriptors.TColumn, 0, size) + _field := make([]*descriptors.TColumn, 0, size) + values := make([]descriptors.TColumn, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.JobId = &v + _field = &v } + p.JobId = _field return nil } - func (p *TAlterInvertedIndexReq) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Expiration = &v + _field = &v } + p.Expiration = _field return nil } @@ -8656,7 +10303,6 @@ func (p *TAlterInvertedIndexReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8890,6 +10536,7 @@ func (p *TAlterInvertedIndexReq) String() string { return "" } return fmt.Sprintf("TAlterInvertedIndexReq(%+v)", *p) + } func (p *TAlterInvertedIndexReq) DeepEqual(ano *TAlterInvertedIndexReq) bool { @@ -9045,7 +10692,6 @@ func NewTTabletGcBinlogInfo() *TTabletGcBinlogInfo { } func (p *TTabletGcBinlogInfo) InitDefault() { - *p = TTabletGcBinlogInfo{} } var TTabletGcBinlogInfo_TabletId_DEFAULT types.TTabletId @@ -9109,27 +10755,22 @@ func (p *TTabletGcBinlogInfo) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9155,20 +10796,25 @@ ReadStructEndError: } func (p *TTabletGcBinlogInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = &v + _field = &v } + p.TabletId = _field return nil } - func (p *TTabletGcBinlogInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = &v + _field = &v } + p.Version = _field return nil } @@ -9186,7 +10832,6 @@ func (p *TTabletGcBinlogInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9248,6 +10893,7 @@ func (p *TTabletGcBinlogInfo) String() string { return "" } return fmt.Sprintf("TTabletGcBinlogInfo(%+v)", *p) + } func (p *TTabletGcBinlogInfo) DeepEqual(ano *TTabletGcBinlogInfo) bool { @@ -9299,7 +10945,6 @@ func NewTGcBinlogReq() *TGcBinlogReq { } func (p *TGcBinlogReq) InitDefault() { - *p = TGcBinlogReq{} } var TGcBinlogReq_TabletGcBinlogInfos_DEFAULT []*TTabletGcBinlogInfo @@ -9346,17 +10991,14 @@ func (p *TGcBinlogReq) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9386,18 +11028,22 @@ func (p *TGcBinlogReq) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.TabletGcBinlogInfos = make([]*TTabletGcBinlogInfo, 0, size) + _field := make([]*TTabletGcBinlogInfo, 0, size) + values := make([]TTabletGcBinlogInfo, size) for i := 0; i < size; i++ { - _elem := NewTTabletGcBinlogInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TabletGcBinlogInfos = append(p.TabletGcBinlogInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletGcBinlogInfos = _field return nil } @@ -9411,7 +11057,6 @@ func (p *TGcBinlogReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9462,6 +11107,7 @@ func (p *TGcBinlogReq) String() string { return "" } return fmt.Sprintf("TGcBinlogReq(%+v)", *p) + } func (p *TGcBinlogReq) DeepEqual(ano *TGcBinlogReq) bool { @@ -9503,7 +11149,6 @@ func NewTStorageMigrationReqV2() *TStorageMigrationReqV2 { } func (p *TStorageMigrationReqV2) InitDefault() { - *p = TStorageMigrationReqV2{} } var TStorageMigrationReqV2_BaseTabletId_DEFAULT types.TTabletId @@ -9618,57 +11263,46 @@ func (p *TStorageMigrationReqV2) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9694,47 +11328,58 @@ ReadStructEndError: } func (p *TStorageMigrationReqV2) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BaseTabletId = &v + _field = &v } + p.BaseTabletId = _field return nil } - func (p *TStorageMigrationReqV2) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.NewTabletId_ = &v + _field = &v } + p.NewTabletId_ = _field return nil } - func (p *TStorageMigrationReqV2) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BaseSchemaHash = &v + _field = &v } + p.BaseSchemaHash = _field return nil } - func (p *TStorageMigrationReqV2) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NewSchemaHash_ = &v + _field = &v } + p.NewSchemaHash_ = _field return nil } - func (p *TStorageMigrationReqV2) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MigrationVersion = &v + _field = &v } + p.MigrationVersion = _field return nil } @@ -9764,7 +11409,6 @@ func (p *TStorageMigrationReqV2) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9883,6 +11527,7 @@ func (p *TStorageMigrationReqV2) String() string { return "" } return fmt.Sprintf("TStorageMigrationReqV2(%+v)", *p) + } func (p *TStorageMigrationReqV2) DeepEqual(ano *TStorageMigrationReqV2) bool { @@ -9980,7 +11625,6 @@ func NewTClusterInfo() *TClusterInfo { } func (p *TClusterInfo) InitDefault() { - *p = TClusterInfo{} } func (p *TClusterInfo) GetUser() (v string) { @@ -10029,10 +11673,8 @@ func (p *TClusterInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -10040,17 +11682,14 @@ func (p *TClusterInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPassword = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10087,20 +11726,25 @@ RequiredFieldNotSetError: } func (p *TClusterInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TClusterInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Password = v + _field = v } + p.Password = _field return nil } @@ -10118,7 +11762,6 @@ func (p *TClusterInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10176,6 +11819,7 @@ func (p *TClusterInfo) String() string { return "" } return fmt.Sprintf("TClusterInfo(%+v)", *p) + } func (p *TClusterInfo) DeepEqual(ano *TClusterInfo) bool { @@ -10225,6 +11869,8 @@ type TPushReq struct { BrokerScanRange *plannodes.TBrokerScanRange `thrift:"broker_scan_range,14,optional" frugal:"14,optional,plannodes.TBrokerScanRange" json:"broker_scan_range,omitempty"` DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,15,optional" frugal:"15,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` ColumnsDesc []*descriptors.TColumn `thrift:"columns_desc,16,optional" frugal:"16,optional,list" json:"columns_desc,omitempty"` + StorageVaultId *string `thrift:"storage_vault_id,17,optional" frugal:"17,optional,string" json:"storage_vault_id,omitempty"` + SchemaVersion *int32 `thrift:"schema_version,18,optional" frugal:"18,optional,i32" json:"schema_version,omitempty"` } func NewTPushReq() *TPushReq { @@ -10232,7 +11878,6 @@ func NewTPushReq() *TPushReq { } func (p *TPushReq) InitDefault() { - *p = TPushReq{} } func (p *TPushReq) GetTabletId() (v types.TTabletId) { @@ -10348,6 +11993,24 @@ func (p *TPushReq) GetColumnsDesc() (v []*descriptors.TColumn) { } return p.ColumnsDesc } + +var TPushReq_StorageVaultId_DEFAULT string + +func (p *TPushReq) GetStorageVaultId() (v string) { + if !p.IsSetStorageVaultId() { + return TPushReq_StorageVaultId_DEFAULT + } + return *p.StorageVaultId +} + +var TPushReq_SchemaVersion_DEFAULT int32 + +func (p *TPushReq) GetSchemaVersion() (v int32) { + if !p.IsSetSchemaVersion() { + return TPushReq_SchemaVersion_DEFAULT + } + return *p.SchemaVersion +} func (p *TPushReq) SetTabletId(val types.TTabletId) { p.TabletId = val } @@ -10396,6 +12059,12 @@ func (p *TPushReq) SetDescTbl(val *descriptors.TDescriptorTable) { func (p *TPushReq) SetColumnsDesc(val []*descriptors.TColumn) { p.ColumnsDesc = val } +func (p *TPushReq) SetStorageVaultId(val *string) { + p.StorageVaultId = val +} +func (p *TPushReq) SetSchemaVersion(val *int32) { + p.SchemaVersion = val +} var fieldIDToName_TPushReq = map[int16]string{ 1: "tablet_id", @@ -10414,6 +12083,8 @@ var fieldIDToName_TPushReq = map[int16]string{ 14: "broker_scan_range", 15: "desc_tbl", 16: "columns_desc", + 17: "storage_vault_id", + 18: "schema_version", } func (p *TPushReq) IsSetHttpFilePath() bool { @@ -10456,6 +12127,14 @@ func (p *TPushReq) IsSetColumnsDesc() bool { return p.ColumnsDesc != nil } +func (p *TPushReq) IsSetStorageVaultId() bool { + return p.StorageVaultId != nil +} + +func (p *TPushReq) IsSetSchemaVersion() bool { + return p.SchemaVersion != nil +} + func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -10487,10 +12166,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -10498,10 +12175,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -10509,10 +12184,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -10520,10 +12193,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersionHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -10531,10 +12202,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTimeout = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { @@ -10542,117 +12211,110 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPushType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.LIST { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRUCT { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRUCT { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.LIST { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { + } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } - } - + case 17: + if fieldTypeId == thrift.STRING { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.I32 { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10709,166 +12371,219 @@ RequiredFieldNotSetError: } func (p *TPushReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TPushReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TPushReq) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TPushReq) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionHash = v + _field = v } + p.VersionHash = _field return nil } - func (p *TPushReq) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Timeout = v + _field = v } + p.Timeout = _field return nil } - func (p *TPushReq) ReadField6(iprot thrift.TProtocol) error { + + var _field types.TPushType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.PushType = types.TPushType(v) + _field = types.TPushType(v) } + p.PushType = _field return nil } - func (p *TPushReq) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HttpFilePath = &v + _field = &v } + p.HttpFilePath = _field return nil } - func (p *TPushReq) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.HttpFileSize = &v + _field = &v } + p.HttpFileSize = _field return nil } - func (p *TPushReq) ReadField9(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DeleteConditions = make([]*palointernalservice.TCondition, 0, size) + _field := make([]*palointernalservice.TCondition, 0, size) + values := make([]palointernalservice.TCondition, size) for i := 0; i < size; i++ { - _elem := palointernalservice.NewTCondition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.DeleteConditions = append(p.DeleteConditions, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DeleteConditions = _field return nil } - func (p *TPushReq) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NeedDecompress = &v + _field = &v } + p.NeedDecompress = _field return nil } - func (p *TPushReq) ReadField11(iprot thrift.TProtocol) error { + + var _field *types.TTransactionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TransactionId = &v + _field = &v } + p.TransactionId = _field return nil } - func (p *TPushReq) ReadField12(iprot thrift.TProtocol) error { + + var _field *types.TPartitionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionId = &v + _field = &v } + p.PartitionId = _field return nil } - func (p *TPushReq) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsSchemaChanging = &v + _field = &v } + p.IsSchemaChanging = _field return nil } - func (p *TPushReq) ReadField14(iprot thrift.TProtocol) error { - p.BrokerScanRange = plannodes.NewTBrokerScanRange() - if err := p.BrokerScanRange.Read(iprot); err != nil { + _field := plannodes.NewTBrokerScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerScanRange = _field return nil } - func (p *TPushReq) ReadField15(iprot thrift.TProtocol) error { - p.DescTbl = descriptors.NewTDescriptorTable() - if err := p.DescTbl.Read(iprot); err != nil { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { return err } + p.DescTbl = _field return nil } - func (p *TPushReq) ReadField16(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnsDesc = make([]*descriptors.TColumn, 0, size) + _field := make([]*descriptors.TColumn, 0, size) + values := make([]descriptors.TColumn, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnsDesc = append(p.ColumnsDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnsDesc = _field + return nil +} +func (p *TPushReq) ReadField17(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.StorageVaultId = _field + return nil +} +func (p *TPushReq) ReadField18(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SchemaVersion = _field return nil } @@ -10942,7 +12657,14 @@ func (p *TPushReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 16 goto WriteFieldError } - + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11269,11 +12991,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } +func (p *TPushReq) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetStorageVaultId() { + if err = oprot.WriteFieldBegin("storage_vault_id", thrift.STRING, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.StorageVaultId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TPushReq) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaVersion() { + if err = oprot.WriteFieldBegin("schema_version", thrift.I32, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SchemaVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + func (p *TPushReq) String() string { if p == nil { return "" } return fmt.Sprintf("TPushReq(%+v)", *p) + } func (p *TPushReq) DeepEqual(ano *TPushReq) bool { @@ -11330,6 +13091,12 @@ func (p *TPushReq) DeepEqual(ano *TPushReq) bool { if !p.Field16DeepEqual(ano.ColumnsDesc) { return false } + if !p.Field17DeepEqual(ano.StorageVaultId) { + return false + } + if !p.Field18DeepEqual(ano.SchemaVersion) { + return false + } return true } @@ -11487,13 +13254,37 @@ func (p *TPushReq) Field16DeepEqual(src []*descriptors.TColumn) bool { } return true } +func (p *TPushReq) Field17DeepEqual(src *string) bool { + + if p.StorageVaultId == src { + return true + } else if p.StorageVaultId == nil || src == nil { + return false + } + if strings.Compare(*p.StorageVaultId, *src) != 0 { + return false + } + return true +} +func (p *TPushReq) Field18DeepEqual(src *int32) bool { + + if p.SchemaVersion == src { + return true + } else if p.SchemaVersion == nil || src == nil { + return false + } + if *p.SchemaVersion != *src { + return false + } + return true +} type TCloneReq struct { TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"` SrcBackends []*types.TBackend `thrift:"src_backends,3,required" frugal:"3,required,list" json:"src_backends"` StorageMedium *types.TStorageMedium `thrift:"storage_medium,4,optional" frugal:"4,optional,TStorageMedium" json:"storage_medium,omitempty"` - CommittedVersion *types.TVersion `thrift:"committed_version,5,optional" frugal:"5,optional,i64" json:"committed_version,omitempty"` + Version *types.TVersion `thrift:"version,5,optional" frugal:"5,optional,i64" json:"version,omitempty"` CommittedVersionHash *types.TVersionHash `thrift:"committed_version_hash,6,optional" frugal:"6,optional,i64" json:"committed_version_hash,omitempty"` TaskVersion *int32 `thrift:"task_version,7,optional" frugal:"7,optional,i32" json:"task_version,omitempty"` SrcPathHash *int64 `thrift:"src_path_hash,8,optional" frugal:"8,optional,i64" json:"src_path_hash,omitempty"` @@ -11501,20 +13292,20 @@ type TCloneReq struct { TimeoutS *int32 `thrift:"timeout_s,10,optional" frugal:"10,optional,i32" json:"timeout_s,omitempty"` ReplicaId types.TReplicaId `thrift:"replica_id,11,optional" frugal:"11,optional,i64" json:"replica_id,omitempty"` PartitionId *int64 `thrift:"partition_id,12,optional" frugal:"12,optional,i64" json:"partition_id,omitempty"` + TableId int64 `thrift:"table_id,13,optional" frugal:"13,optional,i64" json:"table_id,omitempty"` } func NewTCloneReq() *TCloneReq { return &TCloneReq{ ReplicaId: 0, + TableId: -1, } } func (p *TCloneReq) InitDefault() { - *p = TCloneReq{ - - ReplicaId: 0, - } + p.ReplicaId = 0 + p.TableId = -1 } func (p *TCloneReq) GetTabletId() (v types.TTabletId) { @@ -11538,13 +13329,13 @@ func (p *TCloneReq) GetStorageMedium() (v types.TStorageMedium) { return *p.StorageMedium } -var TCloneReq_CommittedVersion_DEFAULT types.TVersion +var TCloneReq_Version_DEFAULT types.TVersion -func (p *TCloneReq) GetCommittedVersion() (v types.TVersion) { - if !p.IsSetCommittedVersion() { - return TCloneReq_CommittedVersion_DEFAULT +func (p *TCloneReq) GetVersion() (v types.TVersion) { + if !p.IsSetVersion() { + return TCloneReq_Version_DEFAULT } - return *p.CommittedVersion + return *p.Version } var TCloneReq_CommittedVersionHash_DEFAULT types.TVersionHash @@ -11609,6 +13400,15 @@ func (p *TCloneReq) GetPartitionId() (v int64) { } return *p.PartitionId } + +var TCloneReq_TableId_DEFAULT int64 = -1 + +func (p *TCloneReq) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TCloneReq_TableId_DEFAULT + } + return p.TableId +} func (p *TCloneReq) SetTabletId(val types.TTabletId) { p.TabletId = val } @@ -11621,8 +13421,8 @@ func (p *TCloneReq) SetSrcBackends(val []*types.TBackend) { func (p *TCloneReq) SetStorageMedium(val *types.TStorageMedium) { p.StorageMedium = val } -func (p *TCloneReq) SetCommittedVersion(val *types.TVersion) { - p.CommittedVersion = val +func (p *TCloneReq) SetVersion(val *types.TVersion) { + p.Version = val } func (p *TCloneReq) SetCommittedVersionHash(val *types.TVersionHash) { p.CommittedVersionHash = val @@ -11645,13 +13445,16 @@ func (p *TCloneReq) SetReplicaId(val types.TReplicaId) { func (p *TCloneReq) SetPartitionId(val *int64) { p.PartitionId = val } +func (p *TCloneReq) SetTableId(val int64) { + p.TableId = val +} var fieldIDToName_TCloneReq = map[int16]string{ 1: "tablet_id", 2: "schema_hash", 3: "src_backends", 4: "storage_medium", - 5: "committed_version", + 5: "version", 6: "committed_version_hash", 7: "task_version", 8: "src_path_hash", @@ -11659,14 +13462,15 @@ var fieldIDToName_TCloneReq = map[int16]string{ 10: "timeout_s", 11: "replica_id", 12: "partition_id", + 13: "table_id", } func (p *TCloneReq) IsSetStorageMedium() bool { return p.StorageMedium != nil } -func (p *TCloneReq) IsSetCommittedVersion() bool { - return p.CommittedVersion != nil +func (p *TCloneReq) IsSetVersion() bool { + return p.Version != nil } func (p *TCloneReq) IsSetCommittedVersionHash() bool { @@ -11697,6 +13501,10 @@ func (p *TCloneReq) IsSetPartitionId() bool { return p.PartitionId != nil } +func (p *TCloneReq) IsSetTableId() bool { + return p.TableId != TCloneReq_TableId_DEFAULT +} + func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -11725,10 +13533,8 @@ func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -11736,10 +13542,8 @@ func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -11747,107 +13551,94 @@ func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSrcBackends = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I32 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I64 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11889,122 +13680,159 @@ RequiredFieldNotSetError: } func (p *TCloneReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TCloneReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TCloneReq) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SrcBackends = make([]*types.TBackend, 0, size) + _field := make([]*types.TBackend, 0, size) + values := make([]types.TBackend, size) for i := 0; i < size; i++ { - _elem := types.NewTBackend() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SrcBackends = append(p.SrcBackends, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SrcBackends = _field return nil } - func (p *TCloneReq) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TStorageMedium if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TStorageMedium(v) - p.StorageMedium = &tmp + _field = &tmp } + p.StorageMedium = _field return nil } - func (p *TCloneReq) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CommittedVersion = &v + _field = &v } + p.Version = _field return nil } - func (p *TCloneReq) ReadField6(iprot thrift.TProtocol) error { + + var _field *types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CommittedVersionHash = &v + _field = &v } + p.CommittedVersionHash = _field return nil } - func (p *TCloneReq) ReadField7(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TaskVersion = &v + _field = &v } + p.TaskVersion = _field return nil } - func (p *TCloneReq) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.SrcPathHash = &v + _field = &v } + p.SrcPathHash = _field return nil } - func (p *TCloneReq) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DestPathHash = &v + _field = &v } + p.DestPathHash = _field return nil } - func (p *TCloneReq) ReadField10(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TimeoutS = &v + _field = &v } + p.TimeoutS = _field return nil } - func (p *TCloneReq) ReadField11(iprot thrift.TProtocol) error { + + var _field types.TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplicaId = v + _field = v } + p.ReplicaId = _field return nil } - func (p *TCloneReq) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionId = &v + _field = &v } + p.PartitionId = _field + return nil +} +func (p *TCloneReq) ReadField13(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TableId = _field return nil } @@ -12062,7 +13890,10 @@ func (p *TCloneReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } - + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12160,11 +13991,11 @@ WriteFieldEndError: } func (p *TCloneReq) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetCommittedVersion() { - if err = oprot.WriteFieldBegin("committed_version", thrift.I64, 5); err != nil { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I64, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.CommittedVersion); err != nil { + if err := oprot.WriteI64(*p.Version); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -12311,11 +14142,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TCloneReq) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + func (p *TCloneReq) String() string { if p == nil { return "" } return fmt.Sprintf("TCloneReq(%+v)", *p) + } func (p *TCloneReq) DeepEqual(ano *TCloneReq) bool { @@ -12336,7 +14187,7 @@ func (p *TCloneReq) DeepEqual(ano *TCloneReq) bool { if !p.Field4DeepEqual(ano.StorageMedium) { return false } - if !p.Field5DeepEqual(ano.CommittedVersion) { + if !p.Field5DeepEqual(ano.Version) { return false } if !p.Field6DeepEqual(ano.CommittedVersionHash) { @@ -12360,6 +14211,9 @@ func (p *TCloneReq) DeepEqual(ano *TCloneReq) bool { if !p.Field12DeepEqual(ano.PartitionId) { return false } + if !p.Field13DeepEqual(ano.TableId) { + return false + } return true } @@ -12404,12 +14258,12 @@ func (p *TCloneReq) Field4DeepEqual(src *types.TStorageMedium) bool { } func (p *TCloneReq) Field5DeepEqual(src *types.TVersion) bool { - if p.CommittedVersion == src { + if p.Version == src { return true - } else if p.CommittedVersion == nil || src == nil { + } else if p.Version == nil || src == nil { return false } - if *p.CommittedVersion != *src { + if *p.Version != *src { return false } return true @@ -12493,6 +14347,13 @@ func (p *TCloneReq) Field12DeepEqual(src *int64) bool { } return true } +func (p *TCloneReq) Field13DeepEqual(src int64) bool { + + if p.TableId != src { + return false + } + return true +} type TCompactionReq struct { TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"` @@ -12505,7 +14366,6 @@ func NewTCompactionReq() *TCompactionReq { } func (p *TCompactionReq) InitDefault() { - *p = TCompactionReq{} } var TCompactionReq_TabletId_DEFAULT types.TTabletId @@ -12586,37 +14446,30 @@ func (p *TCompactionReq) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12642,29 +14495,36 @@ ReadStructEndError: } func (p *TCompactionReq) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = &v + _field = &v } + p.TabletId = _field return nil } - func (p *TCompactionReq) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = &v + _field = &v } + p.SchemaHash = _field return nil } - func (p *TCompactionReq) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Type = &v + _field = &v } + p.Type = _field return nil } @@ -12686,7 +14546,6 @@ func (p *TCompactionReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12767,6 +14626,7 @@ func (p *TCompactionReq) String() string { return "" } return fmt.Sprintf("TCompactionReq(%+v)", *p) + } func (p *TCompactionReq) DeepEqual(ano *TCompactionReq) bool { @@ -12836,7 +14696,6 @@ func NewTStorageMediumMigrateReq() *TStorageMediumMigrateReq { } func (p *TStorageMediumMigrateReq) InitDefault() { - *p = TStorageMediumMigrateReq{} } func (p *TStorageMediumMigrateReq) GetTabletId() (v types.TTabletId) { @@ -12911,10 +14770,8 @@ func (p *TStorageMediumMigrateReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -12922,10 +14779,8 @@ func (p *TStorageMediumMigrateReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -12933,27 +14788,22 @@ func (p *TStorageMediumMigrateReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStorageMedium = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12995,38 +14845,47 @@ RequiredFieldNotSetError: } func (p *TStorageMediumMigrateReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TStorageMediumMigrateReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TStorageMediumMigrateReq) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TStorageMedium if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StorageMedium = types.TStorageMedium(v) + _field = types.TStorageMedium(v) } + p.StorageMedium = _field return nil } - func (p *TStorageMediumMigrateReq) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DataDir = &v + _field = &v } + p.DataDir = _field return nil } @@ -13052,7 +14911,6 @@ func (p *TStorageMediumMigrateReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13146,6 +15004,7 @@ func (p *TStorageMediumMigrateReq) String() string { return "" } return fmt.Sprintf("TStorageMediumMigrateReq(%+v)", *p) + } func (p *TStorageMediumMigrateReq) DeepEqual(ano *TStorageMediumMigrateReq) bool { @@ -13215,7 +15074,6 @@ func NewTCancelDeleteDataReq() *TCancelDeleteDataReq { } func (p *TCancelDeleteDataReq) InitDefault() { - *p = TCancelDeleteDataReq{} } func (p *TCancelDeleteDataReq) GetTabletId() (v types.TTabletId) { @@ -13282,10 +15140,8 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -13293,10 +15149,8 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -13304,10 +15158,8 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -13315,17 +15167,14 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersionHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13372,38 +15221,47 @@ RequiredFieldNotSetError: } func (p *TCancelDeleteDataReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TCancelDeleteDataReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TCancelDeleteDataReq) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TCancelDeleteDataReq) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionHash = v + _field = v } + p.VersionHash = _field return nil } @@ -13429,7 +15287,6 @@ func (p *TCancelDeleteDataReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13521,6 +15378,7 @@ func (p *TCancelDeleteDataReq) String() string { return "" } return fmt.Sprintf("TCancelDeleteDataReq(%+v)", *p) + } func (p *TCancelDeleteDataReq) DeepEqual(ano *TCancelDeleteDataReq) bool { @@ -13585,7 +15443,6 @@ func NewTCheckConsistencyReq() *TCheckConsistencyReq { } func (p *TCheckConsistencyReq) InitDefault() { - *p = TCheckConsistencyReq{} } func (p *TCheckConsistencyReq) GetTabletId() (v types.TTabletId) { @@ -13652,10 +15509,8 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -13663,10 +15518,8 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -13674,10 +15527,8 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -13685,17 +15536,14 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersionHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13742,38 +15590,47 @@ RequiredFieldNotSetError: } func (p *TCheckConsistencyReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TCheckConsistencyReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TCheckConsistencyReq) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TCheckConsistencyReq) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionHash = v + _field = v } + p.VersionHash = _field return nil } @@ -13799,7 +15656,6 @@ func (p *TCheckConsistencyReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13891,6 +15747,7 @@ func (p *TCheckConsistencyReq) String() string { return "" } return fmt.Sprintf("TCheckConsistencyReq(%+v)", *p) + } func (p *TCheckConsistencyReq) DeepEqual(ano *TCheckConsistencyReq) bool { @@ -13960,10 +15817,7 @@ func NewTUploadReq() *TUploadReq { } func (p *TUploadReq) InitDefault() { - *p = TUploadReq{ - - StorageBackend: types.TStorageBackendType_BROKER, - } + p.StorageBackend = types.TStorageBackendType_BROKER } func (p *TUploadReq) GetJobId() (v int64) { @@ -14081,10 +15935,8 @@ func (p *TUploadReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetJobId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.MAP { @@ -14092,10 +15944,8 @@ func (p *TUploadReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSrcDestMap = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -14103,47 +15953,38 @@ func (p *TUploadReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBrokerAddr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14185,20 +16026,22 @@ RequiredFieldNotSetError: } func (p *TUploadReq) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.JobId = v + _field = v } + p.JobId = _field return nil } - func (p *TUploadReq) ReadField2(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.SrcDestMap = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -14214,28 +16057,28 @@ func (p *TUploadReq) ReadField2(iprot thrift.TProtocol) error { _val = v } - p.SrcDestMap[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.SrcDestMap = _field return nil } - func (p *TUploadReq) ReadField3(iprot thrift.TProtocol) error { - p.BrokerAddr = types.NewTNetworkAddress() - if err := p.BrokerAddr.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerAddr = _field return nil } - func (p *TUploadReq) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.BrokerProp = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -14251,29 +16094,34 @@ func (p *TUploadReq) ReadField4(iprot thrift.TProtocol) error { _val = v } - p.BrokerProp[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.BrokerProp = _field return nil } - func (p *TUploadReq) ReadField5(iprot thrift.TProtocol) error { + + var _field types.TStorageBackendType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StorageBackend = types.TStorageBackendType(v) + _field = types.TStorageBackendType(v) } + p.StorageBackend = _field return nil } - func (p *TUploadReq) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Location = &v + _field = &v } + p.Location = _field return nil } @@ -14307,7 +16155,6 @@ func (p *TUploadReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14351,11 +16198,9 @@ func (p *TUploadReq) writeField2(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.SrcDestMap { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -14399,11 +16244,9 @@ func (p *TUploadReq) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.BrokerProp { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -14465,6 +16308,7 @@ func (p *TUploadReq) String() string { return "" } return fmt.Sprintf("TUploadReq(%+v)", *p) + } func (p *TUploadReq) DeepEqual(ano *TUploadReq) bool { @@ -14569,7 +16413,6 @@ func NewTRemoteTabletSnapshot() *TRemoteTabletSnapshot { } func (p *TRemoteTabletSnapshot) InitDefault() { - *p = TRemoteTabletSnapshot{} } var TRemoteTabletSnapshot_LocalTabletId_DEFAULT int64 @@ -14718,77 +16561,62 @@ func (p *TRemoteTabletSnapshot) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14814,64 +16642,77 @@ ReadStructEndError: } func (p *TRemoteTabletSnapshot) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LocalTabletId = &v + _field = &v } + p.LocalTabletId = _field return nil } - func (p *TRemoteTabletSnapshot) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LocalSnapshotPath = &v + _field = &v } + p.LocalSnapshotPath = _field return nil } - func (p *TRemoteTabletSnapshot) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RemoteTabletId = &v + _field = &v } + p.RemoteTabletId = _field return nil } - func (p *TRemoteTabletSnapshot) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RemoteBeId = &v + _field = &v } + p.RemoteBeId = _field return nil } - func (p *TRemoteTabletSnapshot) ReadField5(iprot thrift.TProtocol) error { - p.RemoteBeAddr = types.NewTNetworkAddress() - if err := p.RemoteBeAddr.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.RemoteBeAddr = _field return nil } - func (p *TRemoteTabletSnapshot) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RemoteSnapshotPath = &v + _field = &v } + p.RemoteSnapshotPath = _field return nil } - func (p *TRemoteTabletSnapshot) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RemoteToken = &v + _field = &v } + p.RemoteToken = _field return nil } @@ -14909,7 +16750,6 @@ func (p *TRemoteTabletSnapshot) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15066,6 +16906,7 @@ func (p *TRemoteTabletSnapshot) String() string { return "" } return fmt.Sprintf("TRemoteTabletSnapshot(%+v)", *p) + } func (p *TRemoteTabletSnapshot) DeepEqual(ano *TRemoteTabletSnapshot) bool { @@ -15196,10 +17037,7 @@ func NewTDownloadReq() *TDownloadReq { } func (p *TDownloadReq) InitDefault() { - *p = TDownloadReq{ - - StorageBackend: types.TStorageBackendType_BROKER, - } + p.StorageBackend = types.TStorageBackendType_BROKER } func (p *TDownloadReq) GetJobId() (v int64) { @@ -15334,10 +17172,8 @@ func (p *TDownloadReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetJobId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.MAP { @@ -15345,10 +17181,8 @@ func (p *TDownloadReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSrcDestMap = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -15356,57 +17190,46 @@ func (p *TDownloadReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBrokerAddr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15448,20 +17271,22 @@ RequiredFieldNotSetError: } func (p *TDownloadReq) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.JobId = v + _field = v } + p.JobId = _field return nil } - func (p *TDownloadReq) ReadField2(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.SrcDestMap = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -15477,28 +17302,28 @@ func (p *TDownloadReq) ReadField2(iprot thrift.TProtocol) error { _val = v } - p.SrcDestMap[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.SrcDestMap = _field return nil } - func (p *TDownloadReq) ReadField3(iprot thrift.TProtocol) error { - p.BrokerAddr = types.NewTNetworkAddress() - if err := p.BrokerAddr.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerAddr = _field return nil } - func (p *TDownloadReq) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.BrokerProp = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -15514,49 +17339,57 @@ func (p *TDownloadReq) ReadField4(iprot thrift.TProtocol) error { _val = v } - p.BrokerProp[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.BrokerProp = _field return nil } - func (p *TDownloadReq) ReadField5(iprot thrift.TProtocol) error { + + var _field types.TStorageBackendType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StorageBackend = types.TStorageBackendType(v) + _field = types.TStorageBackendType(v) } + p.StorageBackend = _field return nil } - func (p *TDownloadReq) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Location = &v + _field = &v } + p.Location = _field return nil } - func (p *TDownloadReq) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RemoteTabletSnapshots = make([]*TRemoteTabletSnapshot, 0, size) + _field := make([]*TRemoteTabletSnapshot, 0, size) + values := make([]TRemoteTabletSnapshot, size) for i := 0; i < size; i++ { - _elem := NewTRemoteTabletSnapshot() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.RemoteTabletSnapshots = append(p.RemoteTabletSnapshots, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RemoteTabletSnapshots = _field return nil } @@ -15594,7 +17427,6 @@ func (p *TDownloadReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15638,11 +17470,9 @@ func (p *TDownloadReq) writeField2(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.SrcDestMap { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -15686,11 +17516,9 @@ func (p *TDownloadReq) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.BrokerProp { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -15779,6 +17607,7 @@ func (p *TDownloadReq) String() string { return "" } return fmt.Sprintf("TDownloadReq(%+v)", *p) + } func (p *TDownloadReq) DeepEqual(ano *TDownloadReq) bool { @@ -15898,6 +17727,7 @@ type TSnapshotRequest struct { StartVersion *types.TVersion `thrift:"start_version,11,optional" frugal:"11,optional,i64" json:"start_version,omitempty"` EndVersion *types.TVersion `thrift:"end_version,12,optional" frugal:"12,optional,i64" json:"end_version,omitempty"` IsCopyBinlog *bool `thrift:"is_copy_binlog,13,optional" frugal:"13,optional,bool" json:"is_copy_binlog,omitempty"` + RefTabletId *types.TTabletId `thrift:"ref_tablet_id,14,optional" frugal:"14,optional,i64" json:"ref_tablet_id,omitempty"` } func NewTSnapshotRequest() *TSnapshotRequest { @@ -15908,10 +17738,7 @@ func NewTSnapshotRequest() *TSnapshotRequest { } func (p *TSnapshotRequest) InitDefault() { - *p = TSnapshotRequest{ - - PreferredSnapshotVersion: int32(types.TPREFER_SNAPSHOT_REQ_VERSION), - } + p.PreferredSnapshotVersion = int32(types.TPREFER_SNAPSHOT_REQ_VERSION) } func (p *TSnapshotRequest) GetTabletId() (v types.TTabletId) { @@ -16020,6 +17847,15 @@ func (p *TSnapshotRequest) GetIsCopyBinlog() (v bool) { } return *p.IsCopyBinlog } + +var TSnapshotRequest_RefTabletId_DEFAULT types.TTabletId + +func (p *TSnapshotRequest) GetRefTabletId() (v types.TTabletId) { + if !p.IsSetRefTabletId() { + return TSnapshotRequest_RefTabletId_DEFAULT + } + return *p.RefTabletId +} func (p *TSnapshotRequest) SetTabletId(val types.TTabletId) { p.TabletId = val } @@ -16059,6 +17895,9 @@ func (p *TSnapshotRequest) SetEndVersion(val *types.TVersion) { func (p *TSnapshotRequest) SetIsCopyBinlog(val *bool) { p.IsCopyBinlog = val } +func (p *TSnapshotRequest) SetRefTabletId(val *types.TTabletId) { + p.RefTabletId = val +} var fieldIDToName_TSnapshotRequest = map[int16]string{ 1: "tablet_id", @@ -16074,6 +17913,7 @@ var fieldIDToName_TSnapshotRequest = map[int16]string{ 11: "start_version", 12: "end_version", 13: "is_copy_binlog", + 14: "ref_tablet_id", } func (p *TSnapshotRequest) IsSetVersion() bool { @@ -16120,6 +17960,10 @@ func (p *TSnapshotRequest) IsSetIsCopyBinlog() bool { return p.IsCopyBinlog != nil } +func (p *TSnapshotRequest) IsSetRefTabletId() bool { + return p.RefTabletId != nil +} + func (p *TSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -16147,10 +17991,8 @@ func (p *TSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -16158,295 +18000,977 @@ func (p *TSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.BOOL { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I32 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTabletId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetSchemaHash { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotRequest[fieldId])) +} + +func (p *TSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TabletId = _field + return nil +} +func (p *TSnapshotRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.SchemaHash = _field + return nil +} +func (p *TSnapshotRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Version = _field + return nil +} +func (p *TSnapshotRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TVersionHash + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.VersionHash = _field + return nil +} +func (p *TSnapshotRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Timeout = _field + return nil +} +func (p *TSnapshotRequest) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]types.TVersion, 0, size) + for i := 0; i < size; i++ { + + var _elem types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.MissingVersion = _field + return nil +} +func (p *TSnapshotRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ListFiles = _field + return nil +} +func (p *TSnapshotRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.AllowIncrementalClone = _field + return nil +} +func (p *TSnapshotRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.PreferredSnapshotVersion = _field + return nil +} +func (p *TSnapshotRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsCopyTabletTask = _field + return nil +} +func (p *TSnapshotRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field *types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.StartVersion = _field + return nil +} +func (p *TSnapshotRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field *types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.EndVersion = _field + return nil +} +func (p *TSnapshotRequest) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsCopyBinlog = _field + return nil +} +func (p *TSnapshotRequest) ReadField14(iprot thrift.TProtocol) error { + + var _field *types.TTabletId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RefTabletId = _field + return nil +} + +func (p *TSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSnapshotRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.SchemaHash); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetVersionHash() { + if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.VersionHash); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeout() { + if err = oprot.WriteFieldBegin("timeout", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Timeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMissingVersion() { + if err = oprot.WriteFieldBegin("missing_version", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.MissingVersion)); err != nil { + return err + } + for _, v := range p.MissingVersion { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetListFiles() { + if err = oprot.WriteFieldBegin("list_files", thrift.BOOL, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.ListFiles); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetAllowIncrementalClone() { + if err = oprot.WriteFieldBegin("allow_incremental_clone", thrift.BOOL, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.AllowIncrementalClone); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetPreferredSnapshotVersion() { + if err = oprot.WriteFieldBegin("preferred_snapshot_version", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.PreferredSnapshotVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetIsCopyTabletTask() { + if err = oprot.WriteFieldBegin("is_copy_tablet_task", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsCopyTabletTask); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetStartVersion() { + if err = oprot.WriteFieldBegin("start_version", thrift.I64, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.StartVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetEndVersion() { + if err = oprot.WriteFieldBegin("end_version", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.EndVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetIsCopyBinlog() { + if err = oprot.WriteFieldBegin("is_copy_binlog", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsCopyBinlog); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TSnapshotRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetRefTabletId() { + if err = oprot.WriteFieldBegin("ref_tablet_id", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RefTabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TSnapshotRequest) String() string { + if p == nil { + return "" } + return fmt.Sprintf("TSnapshotRequest(%+v)", *p) - if !issetTabletId { - fieldId = 1 - goto RequiredFieldNotSetError +} + +func (p *TSnapshotRequest) DeepEqual(ano *TSnapshotRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TabletId) { + return false + } + if !p.Field2DeepEqual(ano.SchemaHash) { + return false + } + if !p.Field3DeepEqual(ano.Version) { + return false + } + if !p.Field4DeepEqual(ano.VersionHash) { + return false + } + if !p.Field5DeepEqual(ano.Timeout) { + return false + } + if !p.Field6DeepEqual(ano.MissingVersion) { + return false + } + if !p.Field7DeepEqual(ano.ListFiles) { + return false + } + if !p.Field8DeepEqual(ano.AllowIncrementalClone) { + return false + } + if !p.Field9DeepEqual(ano.PreferredSnapshotVersion) { + return false + } + if !p.Field10DeepEqual(ano.IsCopyTabletTask) { + return false + } + if !p.Field11DeepEqual(ano.StartVersion) { + return false + } + if !p.Field12DeepEqual(ano.EndVersion) { + return false + } + if !p.Field13DeepEqual(ano.IsCopyBinlog) { + return false + } + if !p.Field14DeepEqual(ano.RefTabletId) { + return false } + return true +} - if !issetSchemaHash { - fieldId = 2 - goto RequiredFieldNotSetError +func (p *TSnapshotRequest) Field1DeepEqual(src types.TTabletId) bool { + + if p.TabletId != src { + return false } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotRequest[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + return true +} +func (p *TSnapshotRequest) Field2DeepEqual(src types.TSchemaHash) bool { -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotRequest[fieldId])) + if p.SchemaHash != src { + return false + } + return true } +func (p *TSnapshotRequest) Field3DeepEqual(src *types.TVersion) bool { -func (p *TSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TabletId = v + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false } - return nil + if *p.Version != *src { + return false + } + return true } +func (p *TSnapshotRequest) Field4DeepEqual(src *types.TVersionHash) bool { -func (p *TSnapshotRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.SchemaHash = v + if p.VersionHash == src { + return true + } else if p.VersionHash == nil || src == nil { + return false } - return nil + if *p.VersionHash != *src { + return false + } + return true } +func (p *TSnapshotRequest) Field5DeepEqual(src *int64) bool { -func (p *TSnapshotRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Version = &v + if p.Timeout == src { + return true + } else if p.Timeout == nil || src == nil { + return false } - return nil + if *p.Timeout != *src { + return false + } + return true } +func (p *TSnapshotRequest) Field6DeepEqual(src []types.TVersion) bool { -func (p *TSnapshotRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.VersionHash = &v + if len(p.MissingVersion) != len(src) { + return false } - return nil + for i, v := range p.MissingVersion { + _src := src[i] + if v != _src { + return false + } + } + return true } +func (p *TSnapshotRequest) Field7DeepEqual(src *bool) bool { -func (p *TSnapshotRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Timeout = &v + if p.ListFiles == src { + return true + } else if p.ListFiles == nil || src == nil { + return false } - return nil + if *p.ListFiles != *src { + return false + } + return true } +func (p *TSnapshotRequest) Field8DeepEqual(src *bool) bool { -func (p *TSnapshotRequest) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err + if p.AllowIncrementalClone == src { + return true + } else if p.AllowIncrementalClone == nil || src == nil { + return false } - p.MissingVersion = make([]types.TVersion, 0, size) - for i := 0; i < size; i++ { - var _elem types.TVersion - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _elem = v - } + if *p.AllowIncrementalClone != *src { + return false + } + return true +} +func (p *TSnapshotRequest) Field9DeepEqual(src int32) bool { - p.MissingVersion = append(p.MissingVersion, _elem) + if p.PreferredSnapshotVersion != src { + return false } - if err := iprot.ReadListEnd(); err != nil { - return err + return true +} +func (p *TSnapshotRequest) Field10DeepEqual(src *bool) bool { + + if p.IsCopyTabletTask == src { + return true + } else if p.IsCopyTabletTask == nil || src == nil { + return false } - return nil + if *p.IsCopyTabletTask != *src { + return false + } + return true +} +func (p *TSnapshotRequest) Field11DeepEqual(src *types.TVersion) bool { + + if p.StartVersion == src { + return true + } else if p.StartVersion == nil || src == nil { + return false + } + if *p.StartVersion != *src { + return false + } + return true +} +func (p *TSnapshotRequest) Field12DeepEqual(src *types.TVersion) bool { + + if p.EndVersion == src { + return true + } else if p.EndVersion == nil || src == nil { + return false + } + if *p.EndVersion != *src { + return false + } + return true +} +func (p *TSnapshotRequest) Field13DeepEqual(src *bool) bool { + + if p.IsCopyBinlog == src { + return true + } else if p.IsCopyBinlog == nil || src == nil { + return false + } + if *p.IsCopyBinlog != *src { + return false + } + return true +} +func (p *TSnapshotRequest) Field14DeepEqual(src *types.TTabletId) bool { + + if p.RefTabletId == src { + return true + } else if p.RefTabletId == nil || src == nil { + return false + } + if *p.RefTabletId != *src { + return false + } + return true +} + +type TReleaseSnapshotRequest struct { + SnapshotPath string `thrift:"snapshot_path,1,required" frugal:"1,required,string" json:"snapshot_path"` +} + +func NewTReleaseSnapshotRequest() *TReleaseSnapshotRequest { + return &TReleaseSnapshotRequest{} } -func (p *TSnapshotRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.ListFiles = &v - } - return nil +func (p *TReleaseSnapshotRequest) InitDefault() { } -func (p *TSnapshotRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.AllowIncrementalClone = &v - } - return nil +func (p *TReleaseSnapshotRequest) GetSnapshotPath() (v string) { + return p.SnapshotPath +} +func (p *TReleaseSnapshotRequest) SetSnapshotPath(val string) { + p.SnapshotPath = val } -func (p *TSnapshotRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.PreferredSnapshotVersion = v - } - return nil +var fieldIDToName_TReleaseSnapshotRequest = map[int16]string{ + 1: "snapshot_path", } -func (p *TSnapshotRequest) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.IsCopyTabletTask = &v +func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetSnapshotPath bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return nil -} -func (p *TSnapshotRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.StartVersion = &v + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetSnapshotPath = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return nil -} -func (p *TSnapshotRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.EndVersion = &v + if !issetSnapshotPath { + fieldId = 1 + goto RequiredFieldNotSetError } return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReleaseSnapshotRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReleaseSnapshotRequest[fieldId])) } -func (p *TSnapshotRequest) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TReleaseSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.IsCopyBinlog = &v + _field = v } + p.SnapshotPath = _field return nil } -func (p *TSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TReleaseSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TSnapshotRequest"); err != nil { + if err = oprot.WriteStructBegin("TReleaseSnapshotRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16454,55 +18978,6 @@ func (p *TSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16521,11 +18996,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil { +func (p *TReleaseSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.TabletId); err != nil { + if err := oprot.WriteString(p.SnapshotPath); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -16538,466 +19013,676 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil { - goto WriteFieldBeginError +func (p *TReleaseSnapshotRequest) String() string { + if p == nil { + return "" } - if err := oprot.WriteI32(p.SchemaHash); err != nil { - return err + return fmt.Sprintf("TReleaseSnapshotRequest(%+v)", *p) + +} + +func (p *TReleaseSnapshotRequest) DeepEqual(ano *TReleaseSnapshotRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if !p.Field1DeepEqual(ano.SnapshotPath) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return true } -func (p *TSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetVersion() { - if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Version); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TReleaseSnapshotRequest) Field1DeepEqual(src string) bool { + + if strings.Compare(p.SnapshotPath, src) != 0 { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return true } -func (p *TSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetVersionHash() { - if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.VersionHash); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +type TClearRemoteFileReq struct { + RemoteFilePath string `thrift:"remote_file_path,1,required" frugal:"1,required,string" json:"remote_file_path"` + RemoteSourceProperties map[string]string `thrift:"remote_source_properties,2,required" frugal:"2,required,map" json:"remote_source_properties"` +} + +func NewTClearRemoteFileReq() *TClearRemoteFileReq { + return &TClearRemoteFileReq{} +} + +func (p *TClearRemoteFileReq) InitDefault() { +} + +func (p *TClearRemoteFileReq) GetRemoteFilePath() (v string) { + return p.RemoteFilePath +} + +func (p *TClearRemoteFileReq) GetRemoteSourceProperties() (v map[string]string) { + return p.RemoteSourceProperties +} +func (p *TClearRemoteFileReq) SetRemoteFilePath(val string) { + p.RemoteFilePath = val +} +func (p *TClearRemoteFileReq) SetRemoteSourceProperties(val map[string]string) { + p.RemoteSourceProperties = val +} + +var fieldIDToName_TClearRemoteFileReq = map[int16]string{ + 1: "remote_file_path", + 2: "remote_source_properties", } -func (p *TSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeout() { - if err = oprot.WriteFieldBegin("timeout", thrift.I64, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Timeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetRemoteFilePath bool = false + var issetRemoteSourceProperties bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} -func (p *TSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetMissingVersion() { - if err = oprot.WriteFieldBegin("missing_version", thrift.LIST, 6); err != nil { - goto WriteFieldBeginError + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteListBegin(thrift.I64, len(p.MissingVersion)); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - for _, v := range p.MissingVersion { - if err := oprot.WriteI64(v); err != nil { - return err + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetRemoteFilePath = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.MAP { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetRemoteSourceProperties = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } -func (p *TSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetListFiles() { - if err = oprot.WriteFieldBegin("list_files", thrift.BOOL, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.ListFiles); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !issetRemoteFilePath { + fieldId = 1 + goto RequiredFieldNotSetError } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} -func (p *TSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetAllowIncrementalClone() { - if err = oprot.WriteFieldBegin("allow_incremental_clone", thrift.BOOL, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.AllowIncrementalClone); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !issetRemoteSourceProperties { + fieldId = 2 + goto RequiredFieldNotSetError } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TClearRemoteFileReq[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TClearRemoteFileReq[fieldId])) } -func (p *TSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetPreferredSnapshotVersion() { - if err = oprot.WriteFieldBegin("preferred_snapshot_version", thrift.I32, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.PreferredSnapshotVersion); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TClearRemoteFileReq) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v } + p.RemoteFilePath = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } - -func (p *TSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetIsCopyTabletTask() { - if err = oprot.WriteFieldBegin("is_copy_tablet_task", thrift.BOOL, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.IsCopyTabletTask); err != nil { +func (p *TClearRemoteFileReq) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _key = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err } + p.RemoteSourceProperties = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TSnapshotRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetStartVersion() { - if err = oprot.WriteFieldBegin("start_version", thrift.I64, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.StartVersion); err != nil { - return err +func (p *TClearRemoteFileReq) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TClearRemoteFileReq"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TSnapshotRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetEndVersion() { - if err = oprot.WriteFieldBegin("end_version", thrift.I64, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.EndVersion); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TClearRemoteFileReq) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("remote_file_path", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.RemoteFilePath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TSnapshotRequest) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetIsCopyBinlog() { - if err = oprot.WriteFieldBegin("is_copy_binlog", thrift.BOOL, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.IsCopyBinlog); err != nil { +func (p *TClearRemoteFileReq) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("remote_source_properties", thrift.MAP, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.RemoteSourceProperties)); err != nil { + return err + } + for k, v := range p.RemoteSourceProperties { + if err := oprot.WriteString(k); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err := oprot.WriteString(v); err != nil { + return err } } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TSnapshotRequest) String() string { +func (p *TClearRemoteFileReq) String() string { if p == nil { return "" } - return fmt.Sprintf("TSnapshotRequest(%+v)", *p) + return fmt.Sprintf("TClearRemoteFileReq(%+v)", *p) + } -func (p *TSnapshotRequest) DeepEqual(ano *TSnapshotRequest) bool { +func (p *TClearRemoteFileReq) DeepEqual(ano *TClearRemoteFileReq) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TabletId) { - return false - } - if !p.Field2DeepEqual(ano.SchemaHash) { + if !p.Field1DeepEqual(ano.RemoteFilePath) { return false } - if !p.Field3DeepEqual(ano.Version) { + if !p.Field2DeepEqual(ano.RemoteSourceProperties) { return false } - if !p.Field4DeepEqual(ano.VersionHash) { + return true +} + +func (p *TClearRemoteFileReq) Field1DeepEqual(src string) bool { + + if strings.Compare(p.RemoteFilePath, src) != 0 { return false } - if !p.Field5DeepEqual(ano.Timeout) { + return true +} +func (p *TClearRemoteFileReq) Field2DeepEqual(src map[string]string) bool { + + if len(p.RemoteSourceProperties) != len(src) { return false } - if !p.Field6DeepEqual(ano.MissingVersion) { - return false + for k, v := range p.RemoteSourceProperties { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } } - if !p.Field7DeepEqual(ano.ListFiles) { - return false + return true +} + +type TPartitionVersionInfo struct { + PartitionId types.TPartitionId `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"` + Version types.TVersion `thrift:"version,2,required" frugal:"2,required,i64" json:"version"` + VersionHash types.TVersionHash `thrift:"version_hash,3,required" frugal:"3,required,i64" json:"version_hash"` +} + +func NewTPartitionVersionInfo() *TPartitionVersionInfo { + return &TPartitionVersionInfo{} +} + +func (p *TPartitionVersionInfo) InitDefault() { +} + +func (p *TPartitionVersionInfo) GetPartitionId() (v types.TPartitionId) { + return p.PartitionId +} + +func (p *TPartitionVersionInfo) GetVersion() (v types.TVersion) { + return p.Version +} + +func (p *TPartitionVersionInfo) GetVersionHash() (v types.TVersionHash) { + return p.VersionHash +} +func (p *TPartitionVersionInfo) SetPartitionId(val types.TPartitionId) { + p.PartitionId = val +} +func (p *TPartitionVersionInfo) SetVersion(val types.TVersion) { + p.Version = val +} +func (p *TPartitionVersionInfo) SetVersionHash(val types.TVersionHash) { + p.VersionHash = val +} + +var fieldIDToName_TPartitionVersionInfo = map[int16]string{ + 1: "partition_id", + 2: "version", + 3: "version_hash", +} + +func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionId bool = false + var issetVersion bool = false + var issetVersionHash bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if !p.Field8DeepEqual(ano.AllowIncrementalClone) { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetPartitionId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetVersionHash = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if !p.Field9DeepEqual(ano.PreferredSnapshotVersion) { - return false + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - if !p.Field10DeepEqual(ano.IsCopyTabletTask) { - return false + + if !issetPartitionId { + fieldId = 1 + goto RequiredFieldNotSetError } - if !p.Field11DeepEqual(ano.StartVersion) { - return false + + if !issetVersion { + fieldId = 2 + goto RequiredFieldNotSetError } - if !p.Field12DeepEqual(ano.EndVersion) { - return false + + if !issetVersionHash { + fieldId = 3 + goto RequiredFieldNotSetError } - if !p.Field13DeepEqual(ano.IsCopyBinlog) { - return false + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionVersionInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPartitionVersionInfo[fieldId])) +} + +func (p *TPartitionVersionInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TPartitionId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v } - return true + p.PartitionId = _field + return nil } +func (p *TPartitionVersionInfo) ReadField2(iprot thrift.TProtocol) error { -func (p *TSnapshotRequest) Field1DeepEqual(src types.TTabletId) bool { + var _field types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.Version = _field + return nil +} +func (p *TPartitionVersionInfo) ReadField3(iprot thrift.TProtocol) error { - if p.TabletId != src { - return false + var _field types.TVersionHash + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v } - return true + p.VersionHash = _field + return nil } -func (p *TSnapshotRequest) Field2DeepEqual(src types.TSchemaHash) bool { - if p.SchemaHash != src { - return false +func (p *TPartitionVersionInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPartitionVersionInfo"); err != nil { + goto WriteStructBeginError } - return true + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TSnapshotRequest) Field3DeepEqual(src *types.TVersion) bool { - if p.Version == src { - return true - } else if p.Version == nil || src == nil { - return false +func (p *TPartitionVersionInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError } - if *p.Version != *src { - return false + if err := oprot.WriteI64(p.PartitionId); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TSnapshotRequest) Field4DeepEqual(src *types.TVersionHash) bool { - if p.VersionHash == src { - return true - } else if p.VersionHash == nil || src == nil { - return false +func (p *TPartitionVersionInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("version", thrift.I64, 2); err != nil { + goto WriteFieldBeginError } - if *p.VersionHash != *src { - return false + if err := oprot.WriteI64(p.Version); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TSnapshotRequest) Field5DeepEqual(src *int64) bool { - if p.Timeout == src { - return true - } else if p.Timeout == nil || src == nil { - return false +func (p *TPartitionVersionInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 3); err != nil { + goto WriteFieldBeginError } - if *p.Timeout != *src { - return false + if err := oprot.WriteI64(p.VersionHash); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TSnapshotRequest) Field6DeepEqual(src []types.TVersion) bool { - if len(p.MissingVersion) != len(src) { - return false - } - for i, v := range p.MissingVersion { - _src := src[i] - if v != _src { - return false - } +func (p *TPartitionVersionInfo) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TPartitionVersionInfo(%+v)", *p) + } -func (p *TSnapshotRequest) Field7DeepEqual(src *bool) bool { - if p.ListFiles == src { +func (p *TPartitionVersionInfo) DeepEqual(ano *TPartitionVersionInfo) bool { + if p == ano { return true - } else if p.ListFiles == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.ListFiles != *src { + if !p.Field1DeepEqual(ano.PartitionId) { return false } - return true -} -func (p *TSnapshotRequest) Field8DeepEqual(src *bool) bool { - - if p.AllowIncrementalClone == src { - return true - } else if p.AllowIncrementalClone == nil || src == nil { + if !p.Field2DeepEqual(ano.Version) { return false } - if *p.AllowIncrementalClone != *src { + if !p.Field3DeepEqual(ano.VersionHash) { return false } return true } -func (p *TSnapshotRequest) Field9DeepEqual(src int32) bool { - if p.PreferredSnapshotVersion != src { +func (p *TPartitionVersionInfo) Field1DeepEqual(src types.TPartitionId) bool { + + if p.PartitionId != src { return false } return true } -func (p *TSnapshotRequest) Field10DeepEqual(src *bool) bool { +func (p *TPartitionVersionInfo) Field2DeepEqual(src types.TVersion) bool { - if p.IsCopyTabletTask == src { - return true - } else if p.IsCopyTabletTask == nil || src == nil { - return false - } - if *p.IsCopyTabletTask != *src { + if p.Version != src { return false } return true } -func (p *TSnapshotRequest) Field11DeepEqual(src *types.TVersion) bool { +func (p *TPartitionVersionInfo) Field3DeepEqual(src types.TVersionHash) bool { - if p.StartVersion == src { - return true - } else if p.StartVersion == nil || src == nil { - return false - } - if *p.StartVersion != *src { + if p.VersionHash != src { return false } return true } -func (p *TSnapshotRequest) Field12DeepEqual(src *types.TVersion) bool { - if p.EndVersion == src { - return true - } else if p.EndVersion == nil || src == nil { - return false - } - if *p.EndVersion != *src { - return false - } - return true +type TMoveDirReq struct { + TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` + SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"` + Src string `thrift:"src,3,required" frugal:"3,required,string" json:"src"` + JobId int64 `thrift:"job_id,4,required" frugal:"4,required,i64" json:"job_id"` + Overwrite bool `thrift:"overwrite,5,required" frugal:"5,required,bool" json:"overwrite"` } -func (p *TSnapshotRequest) Field13DeepEqual(src *bool) bool { - if p.IsCopyBinlog == src { - return true - } else if p.IsCopyBinlog == nil || src == nil { - return false - } - if *p.IsCopyBinlog != *src { - return false - } - return true +func NewTMoveDirReq() *TMoveDirReq { + return &TMoveDirReq{} } -type TReleaseSnapshotRequest struct { - SnapshotPath string `thrift:"snapshot_path,1,required" frugal:"1,required,string" json:"snapshot_path"` +func (p *TMoveDirReq) InitDefault() { } -func NewTReleaseSnapshotRequest() *TReleaseSnapshotRequest { - return &TReleaseSnapshotRequest{} +func (p *TMoveDirReq) GetTabletId() (v types.TTabletId) { + return p.TabletId } -func (p *TReleaseSnapshotRequest) InitDefault() { - *p = TReleaseSnapshotRequest{} +func (p *TMoveDirReq) GetSchemaHash() (v types.TSchemaHash) { + return p.SchemaHash } -func (p *TReleaseSnapshotRequest) GetSnapshotPath() (v string) { - return p.SnapshotPath +func (p *TMoveDirReq) GetSrc() (v string) { + return p.Src } -func (p *TReleaseSnapshotRequest) SetSnapshotPath(val string) { - p.SnapshotPath = val + +func (p *TMoveDirReq) GetJobId() (v int64) { + return p.JobId } -var fieldIDToName_TReleaseSnapshotRequest = map[int16]string{ - 1: "snapshot_path", +func (p *TMoveDirReq) GetOverwrite() (v bool) { + return p.Overwrite +} +func (p *TMoveDirReq) SetTabletId(val types.TTabletId) { + p.TabletId = val +} +func (p *TMoveDirReq) SetSchemaHash(val types.TSchemaHash) { + p.SchemaHash = val +} +func (p *TMoveDirReq) SetSrc(val string) { + p.Src = val +} +func (p *TMoveDirReq) SetJobId(val int64) { + p.JobId = val +} +func (p *TMoveDirReq) SetOverwrite(val bool) { + p.Overwrite = val } -func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { +var fieldIDToName_TMoveDirReq = map[int16]string{ + 1: "tablet_id", + 2: "schema_hash", + 3: "src", + 4: "job_id", + 5: "overwrite", +} + +func (p *TMoveDirReq) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetSnapshotPath bool = false + var issetTabletId bool = false + var issetSchemaHash bool = false + var issetSrc bool = false + var issetJobId bool = false + var issetOverwrite bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -17014,22 +19699,55 @@ func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetSnapshotPath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetTabletId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetSchemaHash = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetSrc = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetJobId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + issetOverwrite = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17038,8 +19756,28 @@ func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetSnapshotPath { - fieldId = 1 + if !issetTabletId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetSchemaHash { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetSrc { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetJobId { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetOverwrite { + fieldId = 5 goto RequiredFieldNotSetError } return nil @@ -17048,7 +19786,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReleaseSnapshotRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMoveDirReq[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17057,21 +19795,68 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReleaseSnapshotRequest[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMoveDirReq[fieldId])) } -func (p *TReleaseSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { +func (p *TMoveDirReq) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TabletId = _field + return nil +} +func (p *TMoveDirReq) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.SchemaHash = _field + return nil +} +func (p *TMoveDirReq) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SnapshotPath = v + _field = v } + p.Src = _field return nil } +func (p *TMoveDirReq) ReadField4(iprot thrift.TProtocol) error { -func (p *TReleaseSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.JobId = _field + return nil +} +func (p *TMoveDirReq) ReadField5(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.Overwrite = _field + return nil +} + +func (p *TMoveDirReq) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TReleaseSnapshotRequest"); err != nil { + if err = oprot.WriteStructBegin("TMoveDirReq"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17079,7 +19864,22 @@ func (p *TReleaseSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17098,11 +19898,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TReleaseSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil { +func (p *TMoveDirReq) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.SnapshotPath); err != nil { + if err := oprot.WriteI64(p.TabletId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17115,71 +19915,219 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TReleaseSnapshotRequest) String() string { +func (p *TMoveDirReq) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.SchemaHash); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TMoveDirReq) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("src", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Src); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TMoveDirReq) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("job_id", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.JobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TMoveDirReq) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("overwrite", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.Overwrite); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TMoveDirReq) String() string { if p == nil { return "" } - return fmt.Sprintf("TReleaseSnapshotRequest(%+v)", *p) + return fmt.Sprintf("TMoveDirReq(%+v)", *p) + } -func (p *TReleaseSnapshotRequest) DeepEqual(ano *TReleaseSnapshotRequest) bool { +func (p *TMoveDirReq) DeepEqual(ano *TMoveDirReq) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.SnapshotPath) { + if !p.Field1DeepEqual(ano.TabletId) { + return false + } + if !p.Field2DeepEqual(ano.SchemaHash) { + return false + } + if !p.Field3DeepEqual(ano.Src) { + return false + } + if !p.Field4DeepEqual(ano.JobId) { + return false + } + if !p.Field5DeepEqual(ano.Overwrite) { return false } return true } -func (p *TReleaseSnapshotRequest) Field1DeepEqual(src string) bool { +func (p *TMoveDirReq) Field1DeepEqual(src types.TTabletId) bool { - if strings.Compare(p.SnapshotPath, src) != 0 { + if p.TabletId != src { return false } return true } +func (p *TMoveDirReq) Field2DeepEqual(src types.TSchemaHash) bool { -type TClearRemoteFileReq struct { - RemoteFilePath string `thrift:"remote_file_path,1,required" frugal:"1,required,string" json:"remote_file_path"` - RemoteSourceProperties map[string]string `thrift:"remote_source_properties,2,required" frugal:"2,required,map" json:"remote_source_properties"` + if p.SchemaHash != src { + return false + } + return true } +func (p *TMoveDirReq) Field3DeepEqual(src string) bool { -func NewTClearRemoteFileReq() *TClearRemoteFileReq { - return &TClearRemoteFileReq{} + if strings.Compare(p.Src, src) != 0 { + return false + } + return true } +func (p *TMoveDirReq) Field4DeepEqual(src int64) bool { -func (p *TClearRemoteFileReq) InitDefault() { - *p = TClearRemoteFileReq{} + if p.JobId != src { + return false + } + return true } +func (p *TMoveDirReq) Field5DeepEqual(src bool) bool { -func (p *TClearRemoteFileReq) GetRemoteFilePath() (v string) { - return p.RemoteFilePath + if p.Overwrite != src { + return false + } + return true } -func (p *TClearRemoteFileReq) GetRemoteSourceProperties() (v map[string]string) { - return p.RemoteSourceProperties +type TPublishVersionRequest struct { + TransactionId types.TTransactionId `thrift:"transaction_id,1,required" frugal:"1,required,i64" json:"transaction_id"` + PartitionVersionInfos []*TPartitionVersionInfo `thrift:"partition_version_infos,2,required" frugal:"2,required,list" json:"partition_version_infos"` + StrictMode bool `thrift:"strict_mode,3,optional" frugal:"3,optional,bool" json:"strict_mode,omitempty"` + BaseTabletIds []types.TTabletId `thrift:"base_tablet_ids,4,optional" frugal:"4,optional,set" json:"base_tablet_ids,omitempty"` } -func (p *TClearRemoteFileReq) SetRemoteFilePath(val string) { - p.RemoteFilePath = val + +func NewTPublishVersionRequest() *TPublishVersionRequest { + return &TPublishVersionRequest{ + + StrictMode: false, + } } -func (p *TClearRemoteFileReq) SetRemoteSourceProperties(val map[string]string) { - p.RemoteSourceProperties = val + +func (p *TPublishVersionRequest) InitDefault() { + p.StrictMode = false } -var fieldIDToName_TClearRemoteFileReq = map[int16]string{ - 1: "remote_file_path", - 2: "remote_source_properties", +func (p *TPublishVersionRequest) GetTransactionId() (v types.TTransactionId) { + return p.TransactionId +} + +func (p *TPublishVersionRequest) GetPartitionVersionInfos() (v []*TPartitionVersionInfo) { + return p.PartitionVersionInfos +} + +var TPublishVersionRequest_StrictMode_DEFAULT bool = false + +func (p *TPublishVersionRequest) GetStrictMode() (v bool) { + if !p.IsSetStrictMode() { + return TPublishVersionRequest_StrictMode_DEFAULT + } + return p.StrictMode +} + +var TPublishVersionRequest_BaseTabletIds_DEFAULT []types.TTabletId + +func (p *TPublishVersionRequest) GetBaseTabletIds() (v []types.TTabletId) { + if !p.IsSetBaseTabletIds() { + return TPublishVersionRequest_BaseTabletIds_DEFAULT + } + return p.BaseTabletIds +} +func (p *TPublishVersionRequest) SetTransactionId(val types.TTransactionId) { + p.TransactionId = val +} +func (p *TPublishVersionRequest) SetPartitionVersionInfos(val []*TPartitionVersionInfo) { + p.PartitionVersionInfos = val +} +func (p *TPublishVersionRequest) SetStrictMode(val bool) { + p.StrictMode = val +} +func (p *TPublishVersionRequest) SetBaseTabletIds(val []types.TTabletId) { + p.BaseTabletIds = val +} + +var fieldIDToName_TPublishVersionRequest = map[int16]string{ + 1: "transaction_id", + 2: "partition_version_infos", + 3: "strict_mode", + 4: "base_tablet_ids", +} + +func (p *TPublishVersionRequest) IsSetStrictMode() bool { + return p.StrictMode != TPublishVersionRequest_StrictMode_DEFAULT +} + +func (p *TPublishVersionRequest) IsSetBaseTabletIds() bool { + return p.BaseTabletIds != nil } -func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) { +func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetRemoteFilePath bool = false - var issetRemoteSourceProperties bool = false + var issetTransactionId bool = false + var issetPartitionVersionInfos bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -17196,33 +20144,44 @@ func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetRemoteFilePath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetTransactionId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetRemoteSourceProperties = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetPartitionVersionInfos = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.SET { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17231,12 +20190,12 @@ func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetRemoteFilePath { + if !issetTransactionId { fieldId = 1 goto RequiredFieldNotSetError } - if !issetRemoteSourceProperties { + if !issetPartitionVersionInfos { fieldId = 2 goto RequiredFieldNotSetError } @@ -17246,7 +20205,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TClearRemoteFileReq[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17255,50 +20214,81 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TClearRemoteFileReq[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId])) } -func (p *TClearRemoteFileReq) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TPublishVersionRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTransactionId + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RemoteFilePath = v + _field = v } + p.TransactionId = _field return nil } - -func (p *TClearRemoteFileReq) ReadField2(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() +func (p *TPublishVersionRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RemoteSourceProperties = make(map[string]string, size) + _field := make([]*TPartitionVersionInfo, 0, size) + values := make([]TPartitionVersionInfo, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err - } else { - _key = v } - var _val string - if v, err := iprot.ReadString(); err != nil { + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PartitionVersionInfos = _field + return nil +} +func (p *TPublishVersionRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.StrictMode = _field + return nil +} +func (p *TPublishVersionRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return err + } + _field := make([]types.TTabletId, 0, size) + for i := 0; i < size; i++ { + + var _elem types.TTabletId + if v, err := iprot.ReadI64(); err != nil { return err } else { - _val = v + _elem = v } - p.RemoteSourceProperties[_key] = _val + _field = append(_field, _elem) } - if err := iprot.ReadMapEnd(); err != nil { + if err := iprot.ReadSetEnd(); err != nil { return err } + p.BaseTabletIds = _field return nil } -func (p *TClearRemoteFileReq) Write(oprot thrift.TProtocol) (err error) { +func (p *TPublishVersionRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TClearRemoteFileReq"); err != nil { + if err = oprot.WriteStructBegin("TPublishVersionRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17310,7 +20300,14 @@ func (p *TClearRemoteFileReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17329,11 +20326,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TClearRemoteFileReq) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("remote_file_path", thrift.STRING, 1); err != nil { +func (p *TPublishVersionRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("transaction_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.RemoteFilePath); err != nil { + if err := oprot.WriteI64(p.TransactionId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17346,24 +20343,19 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TClearRemoteFileReq) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("remote_source_properties", thrift.MAP, 2); err != nil { +func (p *TPublishVersionRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partition_version_infos", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.RemoteSourceProperties)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PartitionVersionInfos)); err != nil { return err } - for k, v := range p.RemoteSourceProperties { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteString(v); err != nil { + for _, v := range p.PartitionVersionInfos { + if err := v.Write(oprot); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17376,97 +20368,161 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TClearRemoteFileReq) String() string { +func (p *TPublishVersionRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetStrictMode() { + if err = oprot.WriteFieldBegin("strict_mode", thrift.BOOL, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.StrictMode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPublishVersionRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseTabletIds() { + if err = oprot.WriteFieldBegin("base_tablet_ids", thrift.SET, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteSetBegin(thrift.I64, len(p.BaseTabletIds)); err != nil { + return err + } + for i := 0; i < len(p.BaseTabletIds); i++ { + for j := i + 1; j < len(p.BaseTabletIds); j++ { + if func(tgt, src types.TTabletId) bool { + if tgt != src { + return false + } + return true + }(p.BaseTabletIds[i], p.BaseTabletIds[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.BaseTabletIds[i])) + } + } + } + for _, v := range p.BaseTabletIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteSetEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPublishVersionRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TClearRemoteFileReq(%+v)", *p) + return fmt.Sprintf("TPublishVersionRequest(%+v)", *p) + } -func (p *TClearRemoteFileReq) DeepEqual(ano *TClearRemoteFileReq) bool { +func (p *TPublishVersionRequest) DeepEqual(ano *TPublishVersionRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.RemoteFilePath) { + if !p.Field1DeepEqual(ano.TransactionId) { return false } - if !p.Field2DeepEqual(ano.RemoteSourceProperties) { + if !p.Field2DeepEqual(ano.PartitionVersionInfos) { + return false + } + if !p.Field3DeepEqual(ano.StrictMode) { + return false + } + if !p.Field4DeepEqual(ano.BaseTabletIds) { return false } return true } -func (p *TClearRemoteFileReq) Field1DeepEqual(src string) bool { +func (p *TPublishVersionRequest) Field1DeepEqual(src types.TTransactionId) bool { - if strings.Compare(p.RemoteFilePath, src) != 0 { + if p.TransactionId != src { return false } return true } -func (p *TClearRemoteFileReq) Field2DeepEqual(src map[string]string) bool { +func (p *TPublishVersionRequest) Field2DeepEqual(src []*TPartitionVersionInfo) bool { - if len(p.RemoteSourceProperties) != len(src) { + if len(p.PartitionVersionInfos) != len(src) { return false } - for k, v := range p.RemoteSourceProperties { - _src := src[k] - if strings.Compare(v, _src) != 0 { + for i, v := range p.PartitionVersionInfos { + _src := src[i] + if !v.DeepEqual(_src) { return false } } return true } +func (p *TPublishVersionRequest) Field3DeepEqual(src bool) bool { -type TPartitionVersionInfo struct { - PartitionId types.TPartitionId `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"` - Version types.TVersion `thrift:"version,2,required" frugal:"2,required,i64" json:"version"` - VersionHash types.TVersionHash `thrift:"version_hash,3,required" frugal:"3,required,i64" json:"version_hash"` + if p.StrictMode != src { + return false + } + return true } +func (p *TPublishVersionRequest) Field4DeepEqual(src []types.TTabletId) bool { -func NewTPartitionVersionInfo() *TPartitionVersionInfo { - return &TPartitionVersionInfo{} + if len(p.BaseTabletIds) != len(src) { + return false + } + for i, v := range p.BaseTabletIds { + _src := src[i] + if v != _src { + return false + } + } + return true } -func (p *TPartitionVersionInfo) InitDefault() { - *p = TPartitionVersionInfo{} +type TVisibleVersionReq struct { + PartitionVersion map[types.TPartitionId]types.TVersion `thrift:"partition_version,1,required" frugal:"1,required,map" json:"partition_version"` } -func (p *TPartitionVersionInfo) GetPartitionId() (v types.TPartitionId) { - return p.PartitionId +func NewTVisibleVersionReq() *TVisibleVersionReq { + return &TVisibleVersionReq{} } -func (p *TPartitionVersionInfo) GetVersion() (v types.TVersion) { - return p.Version +func (p *TVisibleVersionReq) InitDefault() { } -func (p *TPartitionVersionInfo) GetVersionHash() (v types.TVersionHash) { - return p.VersionHash -} -func (p *TPartitionVersionInfo) SetPartitionId(val types.TPartitionId) { - p.PartitionId = val -} -func (p *TPartitionVersionInfo) SetVersion(val types.TVersion) { - p.Version = val +func (p *TVisibleVersionReq) GetPartitionVersion() (v map[types.TPartitionId]types.TVersion) { + return p.PartitionVersion } -func (p *TPartitionVersionInfo) SetVersionHash(val types.TVersionHash) { - p.VersionHash = val +func (p *TVisibleVersionReq) SetPartitionVersion(val map[types.TPartitionId]types.TVersion) { + p.PartitionVersion = val } -var fieldIDToName_TPartitionVersionInfo = map[int16]string{ - 1: "partition_id", - 2: "version", - 3: "version_hash", +var fieldIDToName_TVisibleVersionReq = map[int16]string{ + 1: "partition_version", } -func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) { +func (p *TVisibleVersionReq) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetPartitionId bool = false - var issetVersion bool = false - var issetVersionHash bool = false + var issetPartitionVersion bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -17475,52 +20531,27 @@ func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) { for { _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetPartitionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetVersionHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPartitionVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17529,27 +20560,17 @@ func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetPartitionId { + if !issetPartitionVersion { fieldId = 1 goto RequiredFieldNotSetError } - - if !issetVersion { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetVersionHash { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionVersionInfo[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TVisibleVersionReq[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17558,39 +20579,42 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPartitionVersionInfo[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TVisibleVersionReq[fieldId])) } -func (p *TPartitionVersionInfo) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TVisibleVersionReq) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { return err - } else { - p.PartitionId = v } - return nil -} + _field := make(map[types.TPartitionId]types.TVersion, size) + for i := 0; i < size; i++ { + var _key types.TPartitionId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } -func (p *TPartitionVersionInfo) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Version = v - } - return nil -} + var _val types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _val = v + } -func (p *TPartitionVersionInfo) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.VersionHash = v } + p.PartitionVersion = _field return nil } -func (p *TPartitionVersionInfo) Write(oprot thrift.TProtocol) (err error) { +func (p *TVisibleVersionReq) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TPartitionVersionInfo"); err != nil { + if err = oprot.WriteStructBegin("TVisibleVersionReq"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17598,15 +20622,6 @@ func (p *TPartitionVersionInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17625,45 +20640,22 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPartitionVersionInfo) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.PartitionId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TPartitionVersionInfo) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("version", thrift.I64, 2); err != nil { +func (p *TVisibleVersionReq) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partition_version", thrift.MAP, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.Version); err != nil { + if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.PartitionVersion)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TPartitionVersionInfo) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 3); err != nil { - goto WriteFieldBeginError + for k, v := range p.PartitionVersion { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteI64(v); err != nil { + return err + } } - if err := oprot.WriteI64(p.VersionHash); err != nil { + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17671,126 +20663,164 @@ func (p *TPartitionVersionInfo) writeField3(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPartitionVersionInfo) String() string { +func (p *TVisibleVersionReq) String() string { if p == nil { return "" } - return fmt.Sprintf("TPartitionVersionInfo(%+v)", *p) + return fmt.Sprintf("TVisibleVersionReq(%+v)", *p) + } -func (p *TPartitionVersionInfo) DeepEqual(ano *TPartitionVersionInfo) bool { +func (p *TVisibleVersionReq) DeepEqual(ano *TVisibleVersionReq) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.PartitionId) { - return false - } - if !p.Field2DeepEqual(ano.Version) { - return false - } - if !p.Field3DeepEqual(ano.VersionHash) { + if !p.Field1DeepEqual(ano.PartitionVersion) { return false } return true } -func (p *TPartitionVersionInfo) Field1DeepEqual(src types.TPartitionId) bool { +func (p *TVisibleVersionReq) Field1DeepEqual(src map[types.TPartitionId]types.TVersion) bool { - if p.PartitionId != src { + if len(p.PartitionVersion) != len(src) { return false } + for k, v := range p.PartitionVersion { + _src := src[k] + if v != _src { + return false + } + } return true } -func (p *TPartitionVersionInfo) Field2DeepEqual(src types.TVersion) bool { - if p.Version != src { - return false - } - return true +type TCalcDeleteBitmapPartitionInfo struct { + PartitionId types.TPartitionId `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"` + Version types.TVersion `thrift:"version,2,required" frugal:"2,required,i64" json:"version"` + TabletIds []types.TTabletId `thrift:"tablet_ids,3,required" frugal:"3,required,list" json:"tablet_ids"` + BaseCompactionCnts []int64 `thrift:"base_compaction_cnts,4,optional" frugal:"4,optional,list" json:"base_compaction_cnts,omitempty"` + CumulativeCompactionCnts []int64 `thrift:"cumulative_compaction_cnts,5,optional" frugal:"5,optional,list" json:"cumulative_compaction_cnts,omitempty"` + CumulativePoints []int64 `thrift:"cumulative_points,6,optional" frugal:"6,optional,list" json:"cumulative_points,omitempty"` + SubTxnIds []int64 `thrift:"sub_txn_ids,7,optional" frugal:"7,optional,list" json:"sub_txn_ids,omitempty"` } -func (p *TPartitionVersionInfo) Field3DeepEqual(src types.TVersionHash) bool { - if p.VersionHash != src { - return false - } - return true +func NewTCalcDeleteBitmapPartitionInfo() *TCalcDeleteBitmapPartitionInfo { + return &TCalcDeleteBitmapPartitionInfo{} } -type TMoveDirReq struct { - TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` - SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"` - Src string `thrift:"src,3,required" frugal:"3,required,string" json:"src"` - JobId int64 `thrift:"job_id,4,required" frugal:"4,required,i64" json:"job_id"` - Overwrite bool `thrift:"overwrite,5,required" frugal:"5,required,bool" json:"overwrite"` +func (p *TCalcDeleteBitmapPartitionInfo) InitDefault() { } -func NewTMoveDirReq() *TMoveDirReq { - return &TMoveDirReq{} +func (p *TCalcDeleteBitmapPartitionInfo) GetPartitionId() (v types.TPartitionId) { + return p.PartitionId } -func (p *TMoveDirReq) InitDefault() { - *p = TMoveDirReq{} +func (p *TCalcDeleteBitmapPartitionInfo) GetVersion() (v types.TVersion) { + return p.Version } -func (p *TMoveDirReq) GetTabletId() (v types.TTabletId) { - return p.TabletId +func (p *TCalcDeleteBitmapPartitionInfo) GetTabletIds() (v []types.TTabletId) { + return p.TabletIds } -func (p *TMoveDirReq) GetSchemaHash() (v types.TSchemaHash) { - return p.SchemaHash +var TCalcDeleteBitmapPartitionInfo_BaseCompactionCnts_DEFAULT []int64 + +func (p *TCalcDeleteBitmapPartitionInfo) GetBaseCompactionCnts() (v []int64) { + if !p.IsSetBaseCompactionCnts() { + return TCalcDeleteBitmapPartitionInfo_BaseCompactionCnts_DEFAULT + } + return p.BaseCompactionCnts } -func (p *TMoveDirReq) GetSrc() (v string) { - return p.Src +var TCalcDeleteBitmapPartitionInfo_CumulativeCompactionCnts_DEFAULT []int64 + +func (p *TCalcDeleteBitmapPartitionInfo) GetCumulativeCompactionCnts() (v []int64) { + if !p.IsSetCumulativeCompactionCnts() { + return TCalcDeleteBitmapPartitionInfo_CumulativeCompactionCnts_DEFAULT + } + return p.CumulativeCompactionCnts } -func (p *TMoveDirReq) GetJobId() (v int64) { - return p.JobId +var TCalcDeleteBitmapPartitionInfo_CumulativePoints_DEFAULT []int64 + +func (p *TCalcDeleteBitmapPartitionInfo) GetCumulativePoints() (v []int64) { + if !p.IsSetCumulativePoints() { + return TCalcDeleteBitmapPartitionInfo_CumulativePoints_DEFAULT + } + return p.CumulativePoints } -func (p *TMoveDirReq) GetOverwrite() (v bool) { - return p.Overwrite +var TCalcDeleteBitmapPartitionInfo_SubTxnIds_DEFAULT []int64 + +func (p *TCalcDeleteBitmapPartitionInfo) GetSubTxnIds() (v []int64) { + if !p.IsSetSubTxnIds() { + return TCalcDeleteBitmapPartitionInfo_SubTxnIds_DEFAULT + } + return p.SubTxnIds } -func (p *TMoveDirReq) SetTabletId(val types.TTabletId) { - p.TabletId = val +func (p *TCalcDeleteBitmapPartitionInfo) SetPartitionId(val types.TPartitionId) { + p.PartitionId = val } -func (p *TMoveDirReq) SetSchemaHash(val types.TSchemaHash) { - p.SchemaHash = val +func (p *TCalcDeleteBitmapPartitionInfo) SetVersion(val types.TVersion) { + p.Version = val } -func (p *TMoveDirReq) SetSrc(val string) { - p.Src = val +func (p *TCalcDeleteBitmapPartitionInfo) SetTabletIds(val []types.TTabletId) { + p.TabletIds = val } -func (p *TMoveDirReq) SetJobId(val int64) { - p.JobId = val +func (p *TCalcDeleteBitmapPartitionInfo) SetBaseCompactionCnts(val []int64) { + p.BaseCompactionCnts = val } -func (p *TMoveDirReq) SetOverwrite(val bool) { - p.Overwrite = val +func (p *TCalcDeleteBitmapPartitionInfo) SetCumulativeCompactionCnts(val []int64) { + p.CumulativeCompactionCnts = val +} +func (p *TCalcDeleteBitmapPartitionInfo) SetCumulativePoints(val []int64) { + p.CumulativePoints = val +} +func (p *TCalcDeleteBitmapPartitionInfo) SetSubTxnIds(val []int64) { + p.SubTxnIds = val } -var fieldIDToName_TMoveDirReq = map[int16]string{ - 1: "tablet_id", - 2: "schema_hash", - 3: "src", - 4: "job_id", - 5: "overwrite", +var fieldIDToName_TCalcDeleteBitmapPartitionInfo = map[int16]string{ + 1: "partition_id", + 2: "version", + 3: "tablet_ids", + 4: "base_compaction_cnts", + 5: "cumulative_compaction_cnts", + 6: "cumulative_points", + 7: "sub_txn_ids", } -func (p *TMoveDirReq) Read(iprot thrift.TProtocol) (err error) { +func (p *TCalcDeleteBitmapPartitionInfo) IsSetBaseCompactionCnts() bool { + return p.BaseCompactionCnts != nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) IsSetCumulativeCompactionCnts() bool { + return p.CumulativeCompactionCnts != nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) IsSetCumulativePoints() bool { + return p.CumulativePoints != nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) IsSetSubTxnIds() bool { + return p.SubTxnIds != nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetTabletId bool = false - var issetSchemaHash bool = false - var issetSrc bool = false - var issetJobId bool = false - var issetOverwrite bool = false + var issetPartitionId bool = false + var issetVersion bool = false + var issetTabletIds bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -17811,92 +20841,85 @@ func (p *TMoveDirReq) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPartitionId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetSrc = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetTabletIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetJobId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - issetOverwrite = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } } if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - if !issetTabletId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetSchemaHash { - fieldId = 2 - goto RequiredFieldNotSetError + goto ReadStructEndError } - if !issetSrc { - fieldId = 3 + if !issetPartitionId { + fieldId = 1 goto RequiredFieldNotSetError } - if !issetJobId { - fieldId = 4 + if !issetVersion { + fieldId = 2 goto RequiredFieldNotSetError } - if !issetOverwrite { - fieldId = 5 + if !issetTabletIds { + fieldId = 3 goto RequiredFieldNotSetError } return nil @@ -17905,7 +20928,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMoveDirReq[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17914,57 +20937,150 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMoveDirReq[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId])) } -func (p *TMoveDirReq) ReadField1(iprot thrift.TProtocol) error { +func (p *TCalcDeleteBitmapPartitionInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TPartitionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.PartitionId = _field return nil } +func (p *TCalcDeleteBitmapPartitionInfo) ReadField2(iprot thrift.TProtocol) error { -func (p *TMoveDirReq) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field types.TVersion + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.Version = _field return nil } +func (p *TCalcDeleteBitmapPartitionInfo) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]types.TTabletId, 0, size) + for i := 0; i < size; i++ { -func (p *TMoveDirReq) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _elem types.TTabletId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.Src = v } + p.TabletIds = _field return nil } +func (p *TCalcDeleteBitmapPartitionInfo) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { -func (p *TMoveDirReq) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.JobId = v } + p.BaseCompactionCnts = _field return nil } +func (p *TCalcDeleteBitmapPartitionInfo) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { -func (p *TMoveDirReq) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.Overwrite = v } + p.CumulativeCompactionCnts = _field return nil } +func (p *TCalcDeleteBitmapPartitionInfo) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { -func (p *TMoveDirReq) Write(oprot thrift.TProtocol) (err error) { + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.CumulativePoints = _field + return nil +} +func (p *TCalcDeleteBitmapPartitionInfo) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SubTxnIds = _field + return nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TMoveDirReq"); err != nil { + if err = oprot.WriteStructBegin("TCalcDeleteBitmapPartitionInfo"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17988,7 +21104,14 @@ func (p *TMoveDirReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18007,11 +21130,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TMoveDirReq) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil { +func (p *TCalcDeleteBitmapPartitionInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.TabletId); err != nil { + if err := oprot.WriteI64(p.PartitionId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18024,11 +21147,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TMoveDirReq) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil { +func (p *TCalcDeleteBitmapPartitionInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("version", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(p.SchemaHash); err != nil { + if err := oprot.WriteI64(p.Version); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18041,11 +21164,19 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TMoveDirReq) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("src", thrift.STRING, 3); err != nil { +func (p *TCalcDeleteBitmapPartitionInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.Src); err != nil { + if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { + return err + } + for _, v := range p.TabletIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18058,15 +21189,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TMoveDirReq) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("job_id", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.JobId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TCalcDeleteBitmapPartitionInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseCompactionCnts() { + if err = oprot.WriteFieldBegin("base_compaction_cnts", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.BaseCompactionCnts)); err != nil { + return err + } + for _, v := range p.BaseCompactionCnts { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -18075,15 +21216,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TMoveDirReq) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("overwrite", thrift.BOOL, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.Overwrite); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TCalcDeleteBitmapPartitionInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCumulativeCompactionCnts() { + if err = oprot.WriteFieldBegin("cumulative_compaction_cnts", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.CumulativeCompactionCnts)); err != nil { + return err + } + for _, v := range p.CumulativeCompactionCnts { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -18092,135 +21243,215 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TMoveDirReq) String() string { +func (p *TCalcDeleteBitmapPartitionInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetCumulativePoints() { + if err = oprot.WriteFieldBegin("cumulative_points", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.CumulativePoints)); err != nil { + return err + } + for _, v := range p.CumulativePoints { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TCalcDeleteBitmapPartitionInfo) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnIds() { + if err = oprot.WriteFieldBegin("sub_txn_ids", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.SubTxnIds)); err != nil { + return err + } + for _, v := range p.SubTxnIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TCalcDeleteBitmapPartitionInfo) String() string { if p == nil { return "" } - return fmt.Sprintf("TMoveDirReq(%+v)", *p) + return fmt.Sprintf("TCalcDeleteBitmapPartitionInfo(%+v)", *p) + } -func (p *TMoveDirReq) DeepEqual(ano *TMoveDirReq) bool { +func (p *TCalcDeleteBitmapPartitionInfo) DeepEqual(ano *TCalcDeleteBitmapPartitionInfo) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TabletId) { + if !p.Field1DeepEqual(ano.PartitionId) { return false } - if !p.Field2DeepEqual(ano.SchemaHash) { + if !p.Field2DeepEqual(ano.Version) { return false } - if !p.Field3DeepEqual(ano.Src) { + if !p.Field3DeepEqual(ano.TabletIds) { return false } - if !p.Field4DeepEqual(ano.JobId) { + if !p.Field4DeepEqual(ano.BaseCompactionCnts) { return false } - if !p.Field5DeepEqual(ano.Overwrite) { + if !p.Field5DeepEqual(ano.CumulativeCompactionCnts) { + return false + } + if !p.Field6DeepEqual(ano.CumulativePoints) { + return false + } + if !p.Field7DeepEqual(ano.SubTxnIds) { return false } return true } -func (p *TMoveDirReq) Field1DeepEqual(src types.TTabletId) bool { +func (p *TCalcDeleteBitmapPartitionInfo) Field1DeepEqual(src types.TPartitionId) bool { + + if p.PartitionId != src { + return false + } + return true +} +func (p *TCalcDeleteBitmapPartitionInfo) Field2DeepEqual(src types.TVersion) bool { + + if p.Version != src { + return false + } + return true +} +func (p *TCalcDeleteBitmapPartitionInfo) Field3DeepEqual(src []types.TTabletId) bool { - if p.TabletId != src { + if len(p.TabletIds) != len(src) { return false } + for i, v := range p.TabletIds { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TMoveDirReq) Field2DeepEqual(src types.TSchemaHash) bool { +func (p *TCalcDeleteBitmapPartitionInfo) Field4DeepEqual(src []int64) bool { - if p.SchemaHash != src { + if len(p.BaseCompactionCnts) != len(src) { return false } + for i, v := range p.BaseCompactionCnts { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TMoveDirReq) Field3DeepEqual(src string) bool { +func (p *TCalcDeleteBitmapPartitionInfo) Field5DeepEqual(src []int64) bool { - if strings.Compare(p.Src, src) != 0 { + if len(p.CumulativeCompactionCnts) != len(src) { return false } + for i, v := range p.CumulativeCompactionCnts { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TMoveDirReq) Field4DeepEqual(src int64) bool { +func (p *TCalcDeleteBitmapPartitionInfo) Field6DeepEqual(src []int64) bool { - if p.JobId != src { + if len(p.CumulativePoints) != len(src) { return false } + for i, v := range p.CumulativePoints { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TMoveDirReq) Field5DeepEqual(src bool) bool { +func (p *TCalcDeleteBitmapPartitionInfo) Field7DeepEqual(src []int64) bool { - if p.Overwrite != src { + if len(p.SubTxnIds) != len(src) { return false } + for i, v := range p.SubTxnIds { + _src := src[i] + if v != _src { + return false + } + } return true } -type TPublishVersionRequest struct { - TransactionId types.TTransactionId `thrift:"transaction_id,1,required" frugal:"1,required,i64" json:"transaction_id"` - PartitionVersionInfos []*TPartitionVersionInfo `thrift:"partition_version_infos,2,required" frugal:"2,required,list" json:"partition_version_infos"` - StrictMode bool `thrift:"strict_mode,3,optional" frugal:"3,optional,bool" json:"strict_mode,omitempty"` +type TCalcDeleteBitmapRequest struct { + TransactionId types.TTransactionId `thrift:"transaction_id,1,required" frugal:"1,required,i64" json:"transaction_id"` + Partitions []*TCalcDeleteBitmapPartitionInfo `thrift:"partitions,2,required" frugal:"2,required,list" json:"partitions"` } -func NewTPublishVersionRequest() *TPublishVersionRequest { - return &TPublishVersionRequest{ - - StrictMode: false, - } +func NewTCalcDeleteBitmapRequest() *TCalcDeleteBitmapRequest { + return &TCalcDeleteBitmapRequest{} } -func (p *TPublishVersionRequest) InitDefault() { - *p = TPublishVersionRequest{ - - StrictMode: false, - } +func (p *TCalcDeleteBitmapRequest) InitDefault() { } -func (p *TPublishVersionRequest) GetTransactionId() (v types.TTransactionId) { +func (p *TCalcDeleteBitmapRequest) GetTransactionId() (v types.TTransactionId) { return p.TransactionId } -func (p *TPublishVersionRequest) GetPartitionVersionInfos() (v []*TPartitionVersionInfo) { - return p.PartitionVersionInfos -} - -var TPublishVersionRequest_StrictMode_DEFAULT bool = false - -func (p *TPublishVersionRequest) GetStrictMode() (v bool) { - if !p.IsSetStrictMode() { - return TPublishVersionRequest_StrictMode_DEFAULT - } - return p.StrictMode +func (p *TCalcDeleteBitmapRequest) GetPartitions() (v []*TCalcDeleteBitmapPartitionInfo) { + return p.Partitions } -func (p *TPublishVersionRequest) SetTransactionId(val types.TTransactionId) { +func (p *TCalcDeleteBitmapRequest) SetTransactionId(val types.TTransactionId) { p.TransactionId = val } -func (p *TPublishVersionRequest) SetPartitionVersionInfos(val []*TPartitionVersionInfo) { - p.PartitionVersionInfos = val -} -func (p *TPublishVersionRequest) SetStrictMode(val bool) { - p.StrictMode = val +func (p *TCalcDeleteBitmapRequest) SetPartitions(val []*TCalcDeleteBitmapPartitionInfo) { + p.Partitions = val } -var fieldIDToName_TPublishVersionRequest = map[int16]string{ +var fieldIDToName_TCalcDeleteBitmapRequest = map[int16]string{ 1: "transaction_id", - 2: "partition_version_infos", - 3: "strict_mode", -} - -func (p *TPublishVersionRequest) IsSetStrictMode() bool { - return p.StrictMode != TPublishVersionRequest_StrictMode_DEFAULT + 2: "partitions", } -func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TCalcDeleteBitmapRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 var issetTransactionId bool = false - var issetPartitionVersionInfos bool = false + var issetPartitions bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -18242,38 +21473,23 @@ func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTransactionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetPartitionVersionInfos = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPartitions = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18287,7 +21503,7 @@ func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) { goto RequiredFieldNotSetError } - if !issetPartitionVersionInfos { + if !issetPartitions { fieldId = 2 goto RequiredFieldNotSetError } @@ -18297,7 +21513,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18306,50 +21522,47 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapRequest[fieldId])) } -func (p *TPublishVersionRequest) ReadField1(iprot thrift.TProtocol) error { +func (p *TCalcDeleteBitmapRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTransactionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TransactionId = v + _field = v } + p.TransactionId = _field return nil } - -func (p *TPublishVersionRequest) ReadField2(iprot thrift.TProtocol) error { +func (p *TCalcDeleteBitmapRequest) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionVersionInfos = make([]*TPartitionVersionInfo, 0, size) + _field := make([]*TCalcDeleteBitmapPartitionInfo, 0, size) + values := make([]TCalcDeleteBitmapPartitionInfo, size) for i := 0; i < size; i++ { - _elem := NewTPartitionVersionInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionVersionInfos = append(p.PartitionVersionInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Partitions = _field return nil } -func (p *TPublishVersionRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.StrictMode = v - } - return nil -} - -func (p *TPublishVersionRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TCalcDeleteBitmapRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TPublishVersionRequest"); err != nil { + if err = oprot.WriteStructBegin("TCalcDeleteBitmapRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18361,11 +21574,6 @@ func (p *TPublishVersionRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18384,7 +21592,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPublishVersionRequest) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TCalcDeleteBitmapRequest) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("transaction_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } @@ -18401,14 +21609,14 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPublishVersionRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("partition_version_infos", thrift.LIST, 2); err != nil { +func (p *TCalcDeleteBitmapRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PartitionVersionInfos)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { return err } - for _, v := range p.PartitionVersionInfos { + for _, v := range p.Partitions { if err := v.Write(oprot); err != nil { return err } @@ -18426,33 +21634,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPublishVersionRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetStrictMode() { - if err = oprot.WriteFieldBegin("strict_mode", thrift.BOOL, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.StrictMode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TPublishVersionRequest) String() string { +func (p *TCalcDeleteBitmapRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TPublishVersionRequest(%+v)", *p) + return fmt.Sprintf("TCalcDeleteBitmapRequest(%+v)", *p) + } -func (p *TPublishVersionRequest) DeepEqual(ano *TPublishVersionRequest) bool { +func (p *TCalcDeleteBitmapRequest) DeepEqual(ano *TCalcDeleteBitmapRequest) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18461,28 +21651,25 @@ func (p *TPublishVersionRequest) DeepEqual(ano *TPublishVersionRequest) bool { if !p.Field1DeepEqual(ano.TransactionId) { return false } - if !p.Field2DeepEqual(ano.PartitionVersionInfos) { - return false - } - if !p.Field3DeepEqual(ano.StrictMode) { + if !p.Field2DeepEqual(ano.Partitions) { return false } return true } -func (p *TPublishVersionRequest) Field1DeepEqual(src types.TTransactionId) bool { +func (p *TCalcDeleteBitmapRequest) Field1DeepEqual(src types.TTransactionId) bool { if p.TransactionId != src { return false } return true } -func (p *TPublishVersionRequest) Field2DeepEqual(src []*TPartitionVersionInfo) bool { +func (p *TCalcDeleteBitmapRequest) Field2DeepEqual(src []*TCalcDeleteBitmapPartitionInfo) bool { - if len(p.PartitionVersionInfos) != len(src) { + if len(p.Partitions) != len(src) { return false } - for i, v := range p.PartitionVersionInfos { + for i, v := range p.Partitions { _src := src[i] if !v.DeepEqual(_src) { return false @@ -18490,13 +21677,6 @@ func (p *TPublishVersionRequest) Field2DeepEqual(src []*TPartitionVersionInfo) b } return true } -func (p *TPublishVersionRequest) Field3DeepEqual(src bool) bool { - - if p.StrictMode != src { - return false - } - return true -} type TClearAlterTaskRequest struct { TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` @@ -18508,7 +21688,6 @@ func NewTClearAlterTaskRequest() *TClearAlterTaskRequest { } func (p *TClearAlterTaskRequest) InitDefault() { - *p = TClearAlterTaskRequest{} } func (p *TClearAlterTaskRequest) GetTabletId() (v types.TTabletId) { @@ -18557,10 +21736,8 @@ func (p *TClearAlterTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -18568,17 +21745,14 @@ func (p *TClearAlterTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18615,20 +21789,25 @@ RequiredFieldNotSetError: } func (p *TClearAlterTaskRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TClearAlterTaskRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } @@ -18646,7 +21825,6 @@ func (p *TClearAlterTaskRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18704,6 +21882,7 @@ func (p *TClearAlterTaskRequest) String() string { return "" } return fmt.Sprintf("TClearAlterTaskRequest(%+v)", *p) + } func (p *TClearAlterTaskRequest) DeepEqual(ano *TClearAlterTaskRequest) bool { @@ -18746,7 +21925,6 @@ func NewTClearTransactionTaskRequest() *TClearTransactionTaskRequest { } func (p *TClearTransactionTaskRequest) InitDefault() { - *p = TClearTransactionTaskRequest{} } func (p *TClearTransactionTaskRequest) GetTransactionId() (v types.TTransactionId) { @@ -18795,10 +21973,8 @@ func (p *TClearTransactionTaskRequest) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetTransactionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -18806,17 +21982,14 @@ func (p *TClearTransactionTaskRequest) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetPartitionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18853,21 +22026,24 @@ RequiredFieldNotSetError: } func (p *TClearTransactionTaskRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTransactionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TransactionId = v + _field = v } + p.TransactionId = _field return nil } - func (p *TClearTransactionTaskRequest) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionId = make([]types.TPartitionId, 0, size) + _field := make([]types.TPartitionId, 0, size) for i := 0; i < size; i++ { + var _elem types.TPartitionId if v, err := iprot.ReadI64(); err != nil { return err @@ -18875,11 +22051,12 @@ func (p *TClearTransactionTaskRequest) ReadField2(iprot thrift.TProtocol) error _elem = v } - p.PartitionId = append(p.PartitionId, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionId = _field return nil } @@ -18897,7 +22074,6 @@ func (p *TClearTransactionTaskRequest) Write(oprot thrift.TProtocol) (err error) fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18963,6 +22139,7 @@ func (p *TClearTransactionTaskRequest) String() string { return "" } return fmt.Sprintf("TClearTransactionTaskRequest(%+v)", *p) + } func (p *TClearTransactionTaskRequest) DeepEqual(ano *TClearTransactionTaskRequest) bool { @@ -19013,7 +22190,6 @@ func NewTRecoverTabletReq() *TRecoverTabletReq { } func (p *TRecoverTabletReq) InitDefault() { - *p = TRecoverTabletReq{} } var TRecoverTabletReq_TabletId_DEFAULT types.TTabletId @@ -19111,47 +22287,38 @@ func (p *TRecoverTabletReq) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19177,38 +22344,47 @@ ReadStructEndError: } func (p *TRecoverTabletReq) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = &v + _field = &v } + p.TabletId = _field return nil } - func (p *TRecoverTabletReq) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = &v + _field = &v } + p.SchemaHash = _field return nil } - func (p *TRecoverTabletReq) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = &v + _field = &v } + p.Version = _field return nil } - func (p *TRecoverTabletReq) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionHash = &v + _field = &v } + p.VersionHash = _field return nil } @@ -19234,7 +22410,6 @@ func (p *TRecoverTabletReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -19334,6 +22509,7 @@ func (p *TRecoverTabletReq) String() string { return "" } return fmt.Sprintf("TRecoverTabletReq(%+v)", *p) + } func (p *TRecoverTabletReq) DeepEqual(ano *TRecoverTabletReq) bool { @@ -19407,19 +22583,22 @@ func (p *TRecoverTabletReq) Field4DeepEqual(src *types.TVersionHash) bool { } type TTabletMetaInfo struct { - TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"` - SchemaHash *types.TSchemaHash `thrift:"schema_hash,2,optional" frugal:"2,optional,i32" json:"schema_hash,omitempty"` - PartitionId *types.TPartitionId `thrift:"partition_id,3,optional" frugal:"3,optional,i64" json:"partition_id,omitempty"` - IsInMemory *bool `thrift:"is_in_memory,5,optional" frugal:"5,optional,bool" json:"is_in_memory,omitempty"` - StoragePolicyId *int64 `thrift:"storage_policy_id,7,optional" frugal:"7,optional,i64" json:"storage_policy_id,omitempty"` - ReplicaId *types.TReplicaId `thrift:"replica_id,8,optional" frugal:"8,optional,i64" json:"replica_id,omitempty"` - BinlogConfig *TBinlogConfig `thrift:"binlog_config,9,optional" frugal:"9,optional,TBinlogConfig" json:"binlog_config,omitempty"` - CompactionPolicy *string `thrift:"compaction_policy,10,optional" frugal:"10,optional,string" json:"compaction_policy,omitempty"` - TimeSeriesCompactionGoalSizeMbytes *int64 `thrift:"time_series_compaction_goal_size_mbytes,11,optional" frugal:"11,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"` - TimeSeriesCompactionFileCountThreshold *int64 `thrift:"time_series_compaction_file_count_threshold,12,optional" frugal:"12,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"` - TimeSeriesCompactionTimeThresholdSeconds *int64 `thrift:"time_series_compaction_time_threshold_seconds,13,optional" frugal:"13,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"` - EnableSingleReplicaCompaction *bool `thrift:"enable_single_replica_compaction,14,optional" frugal:"14,optional,bool" json:"enable_single_replica_compaction,omitempty"` - SkipWriteIndexOnLoad *bool `thrift:"skip_write_index_on_load,15,optional" frugal:"15,optional,bool" json:"skip_write_index_on_load,omitempty"` + TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"` + SchemaHash *types.TSchemaHash `thrift:"schema_hash,2,optional" frugal:"2,optional,i32" json:"schema_hash,omitempty"` + PartitionId *types.TPartitionId `thrift:"partition_id,3,optional" frugal:"3,optional,i64" json:"partition_id,omitempty"` + IsInMemory *bool `thrift:"is_in_memory,5,optional" frugal:"5,optional,bool" json:"is_in_memory,omitempty"` + StoragePolicyId *int64 `thrift:"storage_policy_id,7,optional" frugal:"7,optional,i64" json:"storage_policy_id,omitempty"` + ReplicaId *types.TReplicaId `thrift:"replica_id,8,optional" frugal:"8,optional,i64" json:"replica_id,omitempty"` + BinlogConfig *TBinlogConfig `thrift:"binlog_config,9,optional" frugal:"9,optional,TBinlogConfig" json:"binlog_config,omitempty"` + CompactionPolicy *string `thrift:"compaction_policy,10,optional" frugal:"10,optional,string" json:"compaction_policy,omitempty"` + TimeSeriesCompactionGoalSizeMbytes *int64 `thrift:"time_series_compaction_goal_size_mbytes,11,optional" frugal:"11,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"` + TimeSeriesCompactionFileCountThreshold *int64 `thrift:"time_series_compaction_file_count_threshold,12,optional" frugal:"12,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"` + TimeSeriesCompactionTimeThresholdSeconds *int64 `thrift:"time_series_compaction_time_threshold_seconds,13,optional" frugal:"13,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"` + EnableSingleReplicaCompaction *bool `thrift:"enable_single_replica_compaction,14,optional" frugal:"14,optional,bool" json:"enable_single_replica_compaction,omitempty"` + SkipWriteIndexOnLoad *bool `thrift:"skip_write_index_on_load,15,optional" frugal:"15,optional,bool" json:"skip_write_index_on_load,omitempty"` + DisableAutoCompaction *bool `thrift:"disable_auto_compaction,16,optional" frugal:"16,optional,bool" json:"disable_auto_compaction,omitempty"` + TimeSeriesCompactionEmptyRowsetsThreshold *int64 `thrift:"time_series_compaction_empty_rowsets_threshold,17,optional" frugal:"17,optional,i64" json:"time_series_compaction_empty_rowsets_threshold,omitempty"` + TimeSeriesCompactionLevelThreshold *int64 `thrift:"time_series_compaction_level_threshold,18,optional" frugal:"18,optional,i64" json:"time_series_compaction_level_threshold,omitempty"` } func NewTTabletMetaInfo() *TTabletMetaInfo { @@ -19427,7 +22606,6 @@ func NewTTabletMetaInfo() *TTabletMetaInfo { } func (p *TTabletMetaInfo) InitDefault() { - *p = TTabletMetaInfo{} } var TTabletMetaInfo_TabletId_DEFAULT types.TTabletId @@ -19546,6 +22724,33 @@ func (p *TTabletMetaInfo) GetSkipWriteIndexOnLoad() (v bool) { } return *p.SkipWriteIndexOnLoad } + +var TTabletMetaInfo_DisableAutoCompaction_DEFAULT bool + +func (p *TTabletMetaInfo) GetDisableAutoCompaction() (v bool) { + if !p.IsSetDisableAutoCompaction() { + return TTabletMetaInfo_DisableAutoCompaction_DEFAULT + } + return *p.DisableAutoCompaction +} + +var TTabletMetaInfo_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT int64 + +func (p *TTabletMetaInfo) GetTimeSeriesCompactionEmptyRowsetsThreshold() (v int64) { + if !p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + return TTabletMetaInfo_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT + } + return *p.TimeSeriesCompactionEmptyRowsetsThreshold +} + +var TTabletMetaInfo_TimeSeriesCompactionLevelThreshold_DEFAULT int64 + +func (p *TTabletMetaInfo) GetTimeSeriesCompactionLevelThreshold() (v int64) { + if !p.IsSetTimeSeriesCompactionLevelThreshold() { + return TTabletMetaInfo_TimeSeriesCompactionLevelThreshold_DEFAULT + } + return *p.TimeSeriesCompactionLevelThreshold +} func (p *TTabletMetaInfo) SetTabletId(val *types.TTabletId) { p.TabletId = val } @@ -19585,6 +22790,15 @@ func (p *TTabletMetaInfo) SetEnableSingleReplicaCompaction(val *bool) { func (p *TTabletMetaInfo) SetSkipWriteIndexOnLoad(val *bool) { p.SkipWriteIndexOnLoad = val } +func (p *TTabletMetaInfo) SetDisableAutoCompaction(val *bool) { + p.DisableAutoCompaction = val +} +func (p *TTabletMetaInfo) SetTimeSeriesCompactionEmptyRowsetsThreshold(val *int64) { + p.TimeSeriesCompactionEmptyRowsetsThreshold = val +} +func (p *TTabletMetaInfo) SetTimeSeriesCompactionLevelThreshold(val *int64) { + p.TimeSeriesCompactionLevelThreshold = val +} var fieldIDToName_TTabletMetaInfo = map[int16]string{ 1: "tablet_id", @@ -19600,6 +22814,9 @@ var fieldIDToName_TTabletMetaInfo = map[int16]string{ 13: "time_series_compaction_time_threshold_seconds", 14: "enable_single_replica_compaction", 15: "skip_write_index_on_load", + 16: "disable_auto_compaction", + 17: "time_series_compaction_empty_rowsets_threshold", + 18: "time_series_compaction_level_threshold", } func (p *TTabletMetaInfo) IsSetTabletId() bool { @@ -19654,6 +22871,18 @@ func (p *TTabletMetaInfo) IsSetSkipWriteIndexOnLoad() bool { return p.SkipWriteIndexOnLoad != nil } +func (p *TTabletMetaInfo) IsSetDisableAutoCompaction() bool { + return p.DisableAutoCompaction != nil +} + +func (p *TTabletMetaInfo) IsSetTimeSeriesCompactionEmptyRowsetsThreshold() bool { + return p.TimeSeriesCompactionEmptyRowsetsThreshold != nil +} + +func (p *TTabletMetaInfo) IsSetTimeSeriesCompactionLevelThreshold() bool { + return p.TimeSeriesCompactionLevelThreshold != nil +} + func (p *TTabletMetaInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -19678,137 +22907,134 @@ func (p *TTabletMetaInfo) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRING { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I64 { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.BOOL { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.I64 { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 15: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField15(iprot); err != nil { + case 18: + if fieldTypeId == thrift.I64 { + if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19834,118 +23060,176 @@ ReadStructEndError: } func (p *TTabletMetaInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = &v + _field = &v } + p.TabletId = _field return nil } - func (p *TTabletMetaInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = &v + _field = &v } + p.SchemaHash = _field return nil } - func (p *TTabletMetaInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TPartitionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionId = &v + _field = &v } + p.PartitionId = _field return nil } - func (p *TTabletMetaInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsInMemory = &v + _field = &v } + p.IsInMemory = _field return nil } - func (p *TTabletMetaInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.StoragePolicyId = &v + _field = &v } + p.StoragePolicyId = _field return nil } - func (p *TTabletMetaInfo) ReadField8(iprot thrift.TProtocol) error { + + var _field *types.TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplicaId = &v + _field = &v } + p.ReplicaId = _field return nil } - func (p *TTabletMetaInfo) ReadField9(iprot thrift.TProtocol) error { - p.BinlogConfig = NewTBinlogConfig() - if err := p.BinlogConfig.Read(iprot); err != nil { + _field := NewTBinlogConfig() + if err := _field.Read(iprot); err != nil { return err } + p.BinlogConfig = _field return nil } - func (p *TTabletMetaInfo) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.CompactionPolicy = &v + _field = &v } + p.CompactionPolicy = _field return nil } - func (p *TTabletMetaInfo) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TimeSeriesCompactionGoalSizeMbytes = &v + _field = &v } + p.TimeSeriesCompactionGoalSizeMbytes = _field return nil } - func (p *TTabletMetaInfo) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TimeSeriesCompactionFileCountThreshold = &v + _field = &v } + p.TimeSeriesCompactionFileCountThreshold = _field return nil } - func (p *TTabletMetaInfo) ReadField13(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TimeSeriesCompactionTimeThresholdSeconds = &v + _field = &v } + p.TimeSeriesCompactionTimeThresholdSeconds = _field return nil } - func (p *TTabletMetaInfo) ReadField14(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.EnableSingleReplicaCompaction = &v + _field = &v } + p.EnableSingleReplicaCompaction = _field return nil } - func (p *TTabletMetaInfo) ReadField15(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.SkipWriteIndexOnLoad = _field + return nil +} +func (p *TTabletMetaInfo) ReadField16(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.SkipWriteIndexOnLoad = &v + _field = &v + } + p.DisableAutoCompaction = _field + return nil +} +func (p *TTabletMetaInfo) ReadField17(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TimeSeriesCompactionEmptyRowsetsThreshold = _field + return nil +} +func (p *TTabletMetaInfo) ReadField18(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.TimeSeriesCompactionLevelThreshold = _field return nil } @@ -20007,7 +23291,18 @@ func (p *TTabletMetaInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 15 goto WriteFieldError } - + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -20273,11 +23568,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } +func (p *TTabletMetaInfo) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetDisableAutoCompaction() { + if err = oprot.WriteFieldBegin("disable_auto_compaction", thrift.BOOL, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.DisableAutoCompaction); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TTabletMetaInfo) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + if err = oprot.WriteFieldBegin("time_series_compaction_empty_rowsets_threshold", thrift.I64, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TimeSeriesCompactionEmptyRowsetsThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TTabletMetaInfo) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeSeriesCompactionLevelThreshold() { + if err = oprot.WriteFieldBegin("time_series_compaction_level_threshold", thrift.I64, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TimeSeriesCompactionLevelThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + func (p *TTabletMetaInfo) String() string { if p == nil { return "" } return fmt.Sprintf("TTabletMetaInfo(%+v)", *p) + } func (p *TTabletMetaInfo) DeepEqual(ano *TTabletMetaInfo) bool { @@ -20325,6 +23678,15 @@ func (p *TTabletMetaInfo) DeepEqual(ano *TTabletMetaInfo) bool { if !p.Field15DeepEqual(ano.SkipWriteIndexOnLoad) { return false } + if !p.Field16DeepEqual(ano.DisableAutoCompaction) { + return false + } + if !p.Field17DeepEqual(ano.TimeSeriesCompactionEmptyRowsetsThreshold) { + return false + } + if !p.Field18DeepEqual(ano.TimeSeriesCompactionLevelThreshold) { + return false + } return true } @@ -20479,6 +23841,42 @@ func (p *TTabletMetaInfo) Field15DeepEqual(src *bool) bool { } return true } +func (p *TTabletMetaInfo) Field16DeepEqual(src *bool) bool { + + if p.DisableAutoCompaction == src { + return true + } else if p.DisableAutoCompaction == nil || src == nil { + return false + } + if *p.DisableAutoCompaction != *src { + return false + } + return true +} +func (p *TTabletMetaInfo) Field17DeepEqual(src *int64) bool { + + if p.TimeSeriesCompactionEmptyRowsetsThreshold == src { + return true + } else if p.TimeSeriesCompactionEmptyRowsetsThreshold == nil || src == nil { + return false + } + if *p.TimeSeriesCompactionEmptyRowsetsThreshold != *src { + return false + } + return true +} +func (p *TTabletMetaInfo) Field18DeepEqual(src *int64) bool { + + if p.TimeSeriesCompactionLevelThreshold == src { + return true + } else if p.TimeSeriesCompactionLevelThreshold == nil || src == nil { + return false + } + if *p.TimeSeriesCompactionLevelThreshold != *src { + return false + } + return true +} type TUpdateTabletMetaInfoReq struct { TabletMetaInfos []*TTabletMetaInfo `thrift:"tabletMetaInfos,1,optional" frugal:"1,optional,list" json:"tabletMetaInfos,omitempty"` @@ -20489,7 +23887,6 @@ func NewTUpdateTabletMetaInfoReq() *TUpdateTabletMetaInfoReq { } func (p *TUpdateTabletMetaInfoReq) InitDefault() { - *p = TUpdateTabletMetaInfoReq{} } var TUpdateTabletMetaInfoReq_TabletMetaInfos_DEFAULT []*TTabletMetaInfo @@ -20536,17 +23933,14 @@ func (p *TUpdateTabletMetaInfoReq) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -20576,18 +23970,22 @@ func (p *TUpdateTabletMetaInfoReq) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.TabletMetaInfos = make([]*TTabletMetaInfo, 0, size) + _field := make([]*TTabletMetaInfo, 0, size) + values := make([]TTabletMetaInfo, size) for i := 0; i < size; i++ { - _elem := NewTTabletMetaInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TabletMetaInfos = append(p.TabletMetaInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletMetaInfos = _field return nil } @@ -20601,7 +23999,6 @@ func (p *TUpdateTabletMetaInfoReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -20652,6 +24049,7 @@ func (p *TUpdateTabletMetaInfoReq) String() string { return "" } return fmt.Sprintf("TUpdateTabletMetaInfoReq(%+v)", *p) + } func (p *TUpdateTabletMetaInfoReq) DeepEqual(ano *TUpdateTabletMetaInfoReq) bool { @@ -20692,7 +24090,6 @@ func NewTPluginMetaInfo() *TPluginMetaInfo { } func (p *TPluginMetaInfo) InitDefault() { - *p = TPluginMetaInfo{} } func (p *TPluginMetaInfo) GetName() (v string) { @@ -20775,10 +24172,8 @@ func (p *TPluginMetaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -20786,37 +24181,30 @@ func (p *TPluginMetaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -20853,38 +24241,47 @@ RequiredFieldNotSetError: } func (p *TPluginMetaInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } - func (p *TPluginMetaInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = v + _field = v } + p.Type = _field return nil } - func (p *TPluginMetaInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SoName = &v + _field = &v } + p.SoName = _field return nil } - func (p *TPluginMetaInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Source = &v + _field = &v } + p.Source = _field return nil } @@ -20910,7 +24307,6 @@ func (p *TPluginMetaInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -21006,6 +24402,7 @@ func (p *TPluginMetaInfo) String() string { return "" } return fmt.Sprintf("TPluginMetaInfo(%+v)", *p) + } func (p *TPluginMetaInfo) DeepEqual(ano *TPluginMetaInfo) bool { @@ -21079,7 +24476,6 @@ func NewTCooldownConf() *TCooldownConf { } func (p *TCooldownConf) InitDefault() { - *p = TCooldownConf{} } func (p *TCooldownConf) GetTabletId() (v types.TTabletId) { @@ -21153,37 +24549,30 @@ func (p *TCooldownConf) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -21215,29 +24604,36 @@ RequiredFieldNotSetError: } func (p *TCooldownConf) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TCooldownConf) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CooldownReplicaId = &v + _field = &v } + p.CooldownReplicaId = _field return nil } - func (p *TCooldownConf) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CooldownTerm = &v + _field = &v } + p.CooldownTerm = _field return nil } @@ -21259,7 +24655,6 @@ func (p *TCooldownConf) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -21338,6 +24733,7 @@ func (p *TCooldownConf) String() string { return "" } return fmt.Sprintf("TCooldownConf(%+v)", *p) + } func (p *TCooldownConf) DeepEqual(ano *TCooldownConf) bool { @@ -21399,7 +24795,6 @@ func NewTPushCooldownConfReq() *TPushCooldownConfReq { } func (p *TPushCooldownConfReq) InitDefault() { - *p = TPushCooldownConfReq{} } func (p *TPushCooldownConfReq) GetCooldownConfs() (v []*TCooldownConf) { @@ -21439,17 +24834,14 @@ func (p *TPushCooldownConfReq) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCooldownConfs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -21485,18 +24877,22 @@ func (p *TPushCooldownConfReq) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.CooldownConfs = make([]*TCooldownConf, 0, size) + _field := make([]*TCooldownConf, 0, size) + values := make([]TCooldownConf, size) for i := 0; i < size; i++ { - _elem := NewTCooldownConf() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.CooldownConfs = append(p.CooldownConfs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.CooldownConfs = _field return nil } @@ -21510,7 +24906,6 @@ func (p *TPushCooldownConfReq) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -21559,6 +24954,7 @@ func (p *TPushCooldownConfReq) String() string { return "" } return fmt.Sprintf("TPushCooldownConfReq(%+v)", *p) + } func (p *TPushCooldownConfReq) DeepEqual(ano *TPushCooldownConfReq) bool { @@ -21620,6 +25016,10 @@ type TAgentTaskRequest struct { PushStoragePolicyReq *TPushStoragePolicyReq `thrift:"push_storage_policy_req,31,optional" frugal:"31,optional,TPushStoragePolicyReq" json:"push_storage_policy_req,omitempty"` AlterInvertedIndexReq *TAlterInvertedIndexReq `thrift:"alter_inverted_index_req,32,optional" frugal:"32,optional,TAlterInvertedIndexReq" json:"alter_inverted_index_req,omitempty"` GcBinlogReq *TGcBinlogReq `thrift:"gc_binlog_req,33,optional" frugal:"33,optional,TGcBinlogReq" json:"gc_binlog_req,omitempty"` + CleanTrashReq *TCleanTrashReq `thrift:"clean_trash_req,34,optional" frugal:"34,optional,TCleanTrashReq" json:"clean_trash_req,omitempty"` + VisibleVersionReq *TVisibleVersionReq `thrift:"visible_version_req,35,optional" frugal:"35,optional,TVisibleVersionReq" json:"visible_version_req,omitempty"` + CleanUdfCacheReq *TCleanUDFCacheReq `thrift:"clean_udf_cache_req,36,optional" frugal:"36,optional,TCleanUDFCacheReq" json:"clean_udf_cache_req,omitempty"` + CalcDeleteBitmapReq *TCalcDeleteBitmapRequest `thrift:"calc_delete_bitmap_req,1000,optional" frugal:"1000,optional,TCalcDeleteBitmapRequest" json:"calc_delete_bitmap_req,omitempty"` } func NewTAgentTaskRequest() *TAgentTaskRequest { @@ -21627,7 +25027,6 @@ func NewTAgentTaskRequest() *TAgentTaskRequest { } func (p *TAgentTaskRequest) InitDefault() { - *p = TAgentTaskRequest{} } func (p *TAgentTaskRequest) GetProtocolVersion() (v TAgentServiceVersion) { @@ -21902,6 +25301,42 @@ func (p *TAgentTaskRequest) GetGcBinlogReq() (v *TGcBinlogReq) { } return p.GcBinlogReq } + +var TAgentTaskRequest_CleanTrashReq_DEFAULT *TCleanTrashReq + +func (p *TAgentTaskRequest) GetCleanTrashReq() (v *TCleanTrashReq) { + if !p.IsSetCleanTrashReq() { + return TAgentTaskRequest_CleanTrashReq_DEFAULT + } + return p.CleanTrashReq +} + +var TAgentTaskRequest_VisibleVersionReq_DEFAULT *TVisibleVersionReq + +func (p *TAgentTaskRequest) GetVisibleVersionReq() (v *TVisibleVersionReq) { + if !p.IsSetVisibleVersionReq() { + return TAgentTaskRequest_VisibleVersionReq_DEFAULT + } + return p.VisibleVersionReq +} + +var TAgentTaskRequest_CleanUdfCacheReq_DEFAULT *TCleanUDFCacheReq + +func (p *TAgentTaskRequest) GetCleanUdfCacheReq() (v *TCleanUDFCacheReq) { + if !p.IsSetCleanUdfCacheReq() { + return TAgentTaskRequest_CleanUdfCacheReq_DEFAULT + } + return p.CleanUdfCacheReq +} + +var TAgentTaskRequest_CalcDeleteBitmapReq_DEFAULT *TCalcDeleteBitmapRequest + +func (p *TAgentTaskRequest) GetCalcDeleteBitmapReq() (v *TCalcDeleteBitmapRequest) { + if !p.IsSetCalcDeleteBitmapReq() { + return TAgentTaskRequest_CalcDeleteBitmapReq_DEFAULT + } + return p.CalcDeleteBitmapReq +} func (p *TAgentTaskRequest) SetProtocolVersion(val TAgentServiceVersion) { p.ProtocolVersion = val } @@ -21998,40 +25433,56 @@ func (p *TAgentTaskRequest) SetAlterInvertedIndexReq(val *TAlterInvertedIndexReq func (p *TAgentTaskRequest) SetGcBinlogReq(val *TGcBinlogReq) { p.GcBinlogReq = val } +func (p *TAgentTaskRequest) SetCleanTrashReq(val *TCleanTrashReq) { + p.CleanTrashReq = val +} +func (p *TAgentTaskRequest) SetVisibleVersionReq(val *TVisibleVersionReq) { + p.VisibleVersionReq = val +} +func (p *TAgentTaskRequest) SetCleanUdfCacheReq(val *TCleanUDFCacheReq) { + p.CleanUdfCacheReq = val +} +func (p *TAgentTaskRequest) SetCalcDeleteBitmapReq(val *TCalcDeleteBitmapRequest) { + p.CalcDeleteBitmapReq = val +} var fieldIDToName_TAgentTaskRequest = map[int16]string{ - 1: "protocol_version", - 2: "task_type", - 3: "signature", - 4: "priority", - 5: "create_tablet_req", - 6: "drop_tablet_req", - 7: "alter_tablet_req", - 8: "clone_req", - 9: "push_req", - 10: "cancel_delete_data_req", - 11: "resource_info", - 12: "storage_medium_migrate_req", - 13: "check_consistency_req", - 14: "upload_req", - 15: "download_req", - 16: "snapshot_req", - 17: "release_snapshot_req", - 18: "clear_remote_file_req", - 19: "publish_version_req", - 20: "clear_alter_task_req", - 21: "clear_transaction_task_req", - 22: "move_dir_req", - 23: "recover_tablet_req", - 24: "alter_tablet_req_v2", - 25: "recv_time", - 26: "update_tablet_meta_info_req", - 27: "compaction_req", - 28: "storage_migration_req_v2", - 30: "push_cooldown_conf", - 31: "push_storage_policy_req", - 32: "alter_inverted_index_req", - 33: "gc_binlog_req", + 1: "protocol_version", + 2: "task_type", + 3: "signature", + 4: "priority", + 5: "create_tablet_req", + 6: "drop_tablet_req", + 7: "alter_tablet_req", + 8: "clone_req", + 9: "push_req", + 10: "cancel_delete_data_req", + 11: "resource_info", + 12: "storage_medium_migrate_req", + 13: "check_consistency_req", + 14: "upload_req", + 15: "download_req", + 16: "snapshot_req", + 17: "release_snapshot_req", + 18: "clear_remote_file_req", + 19: "publish_version_req", + 20: "clear_alter_task_req", + 21: "clear_transaction_task_req", + 22: "move_dir_req", + 23: "recover_tablet_req", + 24: "alter_tablet_req_v2", + 25: "recv_time", + 26: "update_tablet_meta_info_req", + 27: "compaction_req", + 28: "storage_migration_req_v2", + 30: "push_cooldown_conf", + 31: "push_storage_policy_req", + 32: "alter_inverted_index_req", + 33: "gc_binlog_req", + 34: "clean_trash_req", + 35: "visible_version_req", + 36: "clean_udf_cache_req", + 1000: "calc_delete_bitmap_req", } func (p *TAgentTaskRequest) IsSetPriority() bool { @@ -22150,6 +25601,22 @@ func (p *TAgentTaskRequest) IsSetGcBinlogReq() bool { return p.GcBinlogReq != nil } +func (p *TAgentTaskRequest) IsSetCleanTrashReq() bool { + return p.CleanTrashReq != nil +} + +func (p *TAgentTaskRequest) IsSetVisibleVersionReq() bool { + return p.VisibleVersionReq != nil +} + +func (p *TAgentTaskRequest) IsSetCleanUdfCacheReq() bool { + return p.CleanUdfCacheReq != nil +} + +func (p *TAgentTaskRequest) IsSetCalcDeleteBitmapReq() bool { + return p.CalcDeleteBitmapReq != nil +} + func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -22178,10 +25645,8 @@ func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -22189,10 +25654,8 @@ func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTaskType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -22200,307 +25663,278 @@ func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSignature = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRUCT { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRUCT { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRUCT { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.STRUCT { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.STRUCT { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.STRUCT { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.STRUCT { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.STRUCT { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: if fieldTypeId == thrift.STRUCT { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 22: if fieldTypeId == thrift.STRUCT { if err = p.ReadField22(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 23: if fieldTypeId == thrift.STRUCT { if err = p.ReadField23(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 24: if fieldTypeId == thrift.STRUCT { if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 25: if fieldTypeId == thrift.I64 { if err = p.ReadField25(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 26: if fieldTypeId == thrift.STRUCT { if err = p.ReadField26(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 27: if fieldTypeId == thrift.STRUCT { if err = p.ReadField27(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 28: if fieldTypeId == thrift.STRUCT { if err = p.ReadField28(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 30: if fieldTypeId == thrift.STRUCT { if err = p.ReadField30(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 31: if fieldTypeId == thrift.STRUCT { if err = p.ReadField31(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 32: if fieldTypeId == thrift.STRUCT { if err = p.ReadField32(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 33: if fieldTypeId == thrift.STRUCT { if err = p.ReadField33(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 34: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField34(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 35: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField35(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 36: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField36(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -22542,264 +25976,307 @@ RequiredFieldNotSetError: } func (p *TAgentTaskRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field TAgentServiceVersion if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ProtocolVersion = TAgentServiceVersion(v) + _field = TAgentServiceVersion(v) } + p.ProtocolVersion = _field return nil } - func (p *TAgentTaskRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TTaskType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TaskType = types.TTaskType(v) + _field = types.TTaskType(v) } + p.TaskType = _field return nil } - func (p *TAgentTaskRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Signature = v + _field = v } + p.Signature = _field return nil } - func (p *TAgentTaskRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TPriority if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TPriority(v) - p.Priority = &tmp + _field = &tmp } + p.Priority = _field return nil } - func (p *TAgentTaskRequest) ReadField5(iprot thrift.TProtocol) error { - p.CreateTabletReq = NewTCreateTabletReq() - if err := p.CreateTabletReq.Read(iprot); err != nil { + _field := NewTCreateTabletReq() + if err := _field.Read(iprot); err != nil { return err } + p.CreateTabletReq = _field return nil } - func (p *TAgentTaskRequest) ReadField6(iprot thrift.TProtocol) error { - p.DropTabletReq = NewTDropTabletReq() - if err := p.DropTabletReq.Read(iprot); err != nil { + _field := NewTDropTabletReq() + if err := _field.Read(iprot); err != nil { return err } + p.DropTabletReq = _field return nil } - func (p *TAgentTaskRequest) ReadField7(iprot thrift.TProtocol) error { - p.AlterTabletReq = NewTAlterTabletReq() - if err := p.AlterTabletReq.Read(iprot); err != nil { + _field := NewTAlterTabletReq() + if err := _field.Read(iprot); err != nil { return err } + p.AlterTabletReq = _field return nil } - func (p *TAgentTaskRequest) ReadField8(iprot thrift.TProtocol) error { - p.CloneReq = NewTCloneReq() - if err := p.CloneReq.Read(iprot); err != nil { + _field := NewTCloneReq() + if err := _field.Read(iprot); err != nil { return err } + p.CloneReq = _field return nil } - func (p *TAgentTaskRequest) ReadField9(iprot thrift.TProtocol) error { - p.PushReq = NewTPushReq() - if err := p.PushReq.Read(iprot); err != nil { + _field := NewTPushReq() + if err := _field.Read(iprot); err != nil { return err } + p.PushReq = _field return nil } - func (p *TAgentTaskRequest) ReadField10(iprot thrift.TProtocol) error { - p.CancelDeleteDataReq = NewTCancelDeleteDataReq() - if err := p.CancelDeleteDataReq.Read(iprot); err != nil { + _field := NewTCancelDeleteDataReq() + if err := _field.Read(iprot); err != nil { return err } + p.CancelDeleteDataReq = _field return nil } - func (p *TAgentTaskRequest) ReadField11(iprot thrift.TProtocol) error { - p.ResourceInfo = types.NewTResourceInfo() - if err := p.ResourceInfo.Read(iprot); err != nil { + _field := types.NewTResourceInfo() + if err := _field.Read(iprot); err != nil { return err } + p.ResourceInfo = _field return nil } - func (p *TAgentTaskRequest) ReadField12(iprot thrift.TProtocol) error { - p.StorageMediumMigrateReq = NewTStorageMediumMigrateReq() - if err := p.StorageMediumMigrateReq.Read(iprot); err != nil { + _field := NewTStorageMediumMigrateReq() + if err := _field.Read(iprot); err != nil { return err } + p.StorageMediumMigrateReq = _field return nil } - func (p *TAgentTaskRequest) ReadField13(iprot thrift.TProtocol) error { - p.CheckConsistencyReq = NewTCheckConsistencyReq() - if err := p.CheckConsistencyReq.Read(iprot); err != nil { + _field := NewTCheckConsistencyReq() + if err := _field.Read(iprot); err != nil { return err } + p.CheckConsistencyReq = _field return nil } - func (p *TAgentTaskRequest) ReadField14(iprot thrift.TProtocol) error { - p.UploadReq = NewTUploadReq() - if err := p.UploadReq.Read(iprot); err != nil { + _field := NewTUploadReq() + if err := _field.Read(iprot); err != nil { return err } + p.UploadReq = _field return nil } - func (p *TAgentTaskRequest) ReadField15(iprot thrift.TProtocol) error { - p.DownloadReq = NewTDownloadReq() - if err := p.DownloadReq.Read(iprot); err != nil { + _field := NewTDownloadReq() + if err := _field.Read(iprot); err != nil { return err } + p.DownloadReq = _field return nil } - func (p *TAgentTaskRequest) ReadField16(iprot thrift.TProtocol) error { - p.SnapshotReq = NewTSnapshotRequest() - if err := p.SnapshotReq.Read(iprot); err != nil { + _field := NewTSnapshotRequest() + if err := _field.Read(iprot); err != nil { return err } + p.SnapshotReq = _field return nil } - func (p *TAgentTaskRequest) ReadField17(iprot thrift.TProtocol) error { - p.ReleaseSnapshotReq = NewTReleaseSnapshotRequest() - if err := p.ReleaseSnapshotReq.Read(iprot); err != nil { + _field := NewTReleaseSnapshotRequest() + if err := _field.Read(iprot); err != nil { return err } + p.ReleaseSnapshotReq = _field return nil } - func (p *TAgentTaskRequest) ReadField18(iprot thrift.TProtocol) error { - p.ClearRemoteFileReq = NewTClearRemoteFileReq() - if err := p.ClearRemoteFileReq.Read(iprot); err != nil { + _field := NewTClearRemoteFileReq() + if err := _field.Read(iprot); err != nil { return err } + p.ClearRemoteFileReq = _field return nil } - func (p *TAgentTaskRequest) ReadField19(iprot thrift.TProtocol) error { - p.PublishVersionReq = NewTPublishVersionRequest() - if err := p.PublishVersionReq.Read(iprot); err != nil { + _field := NewTPublishVersionRequest() + if err := _field.Read(iprot); err != nil { return err } + p.PublishVersionReq = _field return nil } - func (p *TAgentTaskRequest) ReadField20(iprot thrift.TProtocol) error { - p.ClearAlterTaskReq = NewTClearAlterTaskRequest() - if err := p.ClearAlterTaskReq.Read(iprot); err != nil { + _field := NewTClearAlterTaskRequest() + if err := _field.Read(iprot); err != nil { return err } + p.ClearAlterTaskReq = _field return nil } - func (p *TAgentTaskRequest) ReadField21(iprot thrift.TProtocol) error { - p.ClearTransactionTaskReq = NewTClearTransactionTaskRequest() - if err := p.ClearTransactionTaskReq.Read(iprot); err != nil { + _field := NewTClearTransactionTaskRequest() + if err := _field.Read(iprot); err != nil { return err } + p.ClearTransactionTaskReq = _field return nil } - func (p *TAgentTaskRequest) ReadField22(iprot thrift.TProtocol) error { - p.MoveDirReq = NewTMoveDirReq() - if err := p.MoveDirReq.Read(iprot); err != nil { + _field := NewTMoveDirReq() + if err := _field.Read(iprot); err != nil { return err } + p.MoveDirReq = _field return nil } - func (p *TAgentTaskRequest) ReadField23(iprot thrift.TProtocol) error { - p.RecoverTabletReq = NewTRecoverTabletReq() - if err := p.RecoverTabletReq.Read(iprot); err != nil { + _field := NewTRecoverTabletReq() + if err := _field.Read(iprot); err != nil { return err } + p.RecoverTabletReq = _field return nil } - func (p *TAgentTaskRequest) ReadField24(iprot thrift.TProtocol) error { - p.AlterTabletReqV2 = NewTAlterTabletReqV2() - if err := p.AlterTabletReqV2.Read(iprot); err != nil { + _field := NewTAlterTabletReqV2() + if err := _field.Read(iprot); err != nil { return err } + p.AlterTabletReqV2 = _field return nil } - func (p *TAgentTaskRequest) ReadField25(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RecvTime = &v + _field = &v } + p.RecvTime = _field return nil } - func (p *TAgentTaskRequest) ReadField26(iprot thrift.TProtocol) error { - p.UpdateTabletMetaInfoReq = NewTUpdateTabletMetaInfoReq() - if err := p.UpdateTabletMetaInfoReq.Read(iprot); err != nil { + _field := NewTUpdateTabletMetaInfoReq() + if err := _field.Read(iprot); err != nil { return err } + p.UpdateTabletMetaInfoReq = _field return nil } - func (p *TAgentTaskRequest) ReadField27(iprot thrift.TProtocol) error { - p.CompactionReq = NewTCompactionReq() - if err := p.CompactionReq.Read(iprot); err != nil { + _field := NewTCompactionReq() + if err := _field.Read(iprot); err != nil { return err } + p.CompactionReq = _field return nil } - func (p *TAgentTaskRequest) ReadField28(iprot thrift.TProtocol) error { - p.StorageMigrationReqV2 = NewTStorageMigrationReqV2() - if err := p.StorageMigrationReqV2.Read(iprot); err != nil { + _field := NewTStorageMigrationReqV2() + if err := _field.Read(iprot); err != nil { return err } + p.StorageMigrationReqV2 = _field return nil } - func (p *TAgentTaskRequest) ReadField30(iprot thrift.TProtocol) error { - p.PushCooldownConf = NewTPushCooldownConfReq() - if err := p.PushCooldownConf.Read(iprot); err != nil { + _field := NewTPushCooldownConfReq() + if err := _field.Read(iprot); err != nil { return err } + p.PushCooldownConf = _field return nil } - func (p *TAgentTaskRequest) ReadField31(iprot thrift.TProtocol) error { - p.PushStoragePolicyReq = NewTPushStoragePolicyReq() - if err := p.PushStoragePolicyReq.Read(iprot); err != nil { + _field := NewTPushStoragePolicyReq() + if err := _field.Read(iprot); err != nil { return err } + p.PushStoragePolicyReq = _field return nil } - func (p *TAgentTaskRequest) ReadField32(iprot thrift.TProtocol) error { - p.AlterInvertedIndexReq = NewTAlterInvertedIndexReq() - if err := p.AlterInvertedIndexReq.Read(iprot); err != nil { + _field := NewTAlterInvertedIndexReq() + if err := _field.Read(iprot); err != nil { return err } + p.AlterInvertedIndexReq = _field return nil } - func (p *TAgentTaskRequest) ReadField33(iprot thrift.TProtocol) error { - p.GcBinlogReq = NewTGcBinlogReq() - if err := p.GcBinlogReq.Read(iprot); err != nil { + _field := NewTGcBinlogReq() + if err := _field.Read(iprot); err != nil { + return err + } + p.GcBinlogReq = _field + return nil +} +func (p *TAgentTaskRequest) ReadField34(iprot thrift.TProtocol) error { + _field := NewTCleanTrashReq() + if err := _field.Read(iprot); err != nil { + return err + } + p.CleanTrashReq = _field + return nil +} +func (p *TAgentTaskRequest) ReadField35(iprot thrift.TProtocol) error { + _field := NewTVisibleVersionReq() + if err := _field.Read(iprot); err != nil { return err } + p.VisibleVersionReq = _field + return nil +} +func (p *TAgentTaskRequest) ReadField36(iprot thrift.TProtocol) error { + _field := NewTCleanUDFCacheReq() + if err := _field.Read(iprot); err != nil { + return err + } + p.CleanUdfCacheReq = _field + return nil +} +func (p *TAgentTaskRequest) ReadField1000(iprot thrift.TProtocol) error { + _field := NewTCalcDeleteBitmapRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.CalcDeleteBitmapReq = _field return nil } @@ -22937,7 +26414,22 @@ func (p *TAgentTaskRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 33 goto WriteFieldError } - + if err = p.writeField34(oprot); err != nil { + fieldId = 34 + goto WriteFieldError + } + if err = p.writeField35(oprot); err != nil { + fieldId = 35 + goto WriteFieldError + } + if err = p.writeField36(oprot); err != nil { + fieldId = 36 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -23558,11 +27050,88 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) } +func (p *TAgentTaskRequest) writeField34(oprot thrift.TProtocol) (err error) { + if p.IsSetCleanTrashReq() { + if err = oprot.WriteFieldBegin("clean_trash_req", thrift.STRUCT, 34); err != nil { + goto WriteFieldBeginError + } + if err := p.CleanTrashReq.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) +} + +func (p *TAgentTaskRequest) writeField35(oprot thrift.TProtocol) (err error) { + if p.IsSetVisibleVersionReq() { + if err = oprot.WriteFieldBegin("visible_version_req", thrift.STRUCT, 35); err != nil { + goto WriteFieldBeginError + } + if err := p.VisibleVersionReq.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 end error: ", p), err) +} + +func (p *TAgentTaskRequest) writeField36(oprot thrift.TProtocol) (err error) { + if p.IsSetCleanUdfCacheReq() { + if err = oprot.WriteFieldBegin("clean_udf_cache_req", thrift.STRUCT, 36); err != nil { + goto WriteFieldBeginError + } + if err := p.CleanUdfCacheReq.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 end error: ", p), err) +} + +func (p *TAgentTaskRequest) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetCalcDeleteBitmapReq() { + if err = oprot.WriteFieldBegin("calc_delete_bitmap_req", thrift.STRUCT, 1000); err != nil { + goto WriteFieldBeginError + } + if err := p.CalcDeleteBitmapReq.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + func (p *TAgentTaskRequest) String() string { if p == nil { return "" } return fmt.Sprintf("TAgentTaskRequest(%+v)", *p) + } func (p *TAgentTaskRequest) DeepEqual(ano *TAgentTaskRequest) bool { @@ -23667,6 +27236,18 @@ func (p *TAgentTaskRequest) DeepEqual(ano *TAgentTaskRequest) bool { if !p.Field33DeepEqual(ano.GcBinlogReq) { return false } + if !p.Field34DeepEqual(ano.CleanTrashReq) { + return false + } + if !p.Field35DeepEqual(ano.VisibleVersionReq) { + return false + } + if !p.Field36DeepEqual(ano.CleanUdfCacheReq) { + return false + } + if !p.Field1000DeepEqual(ano.CalcDeleteBitmapReq) { + return false + } return true } @@ -23904,6 +27485,34 @@ func (p *TAgentTaskRequest) Field33DeepEqual(src *TGcBinlogReq) bool { } return true } +func (p *TAgentTaskRequest) Field34DeepEqual(src *TCleanTrashReq) bool { + + if !p.CleanTrashReq.DeepEqual(src) { + return false + } + return true +} +func (p *TAgentTaskRequest) Field35DeepEqual(src *TVisibleVersionReq) bool { + + if !p.VisibleVersionReq.DeepEqual(src) { + return false + } + return true +} +func (p *TAgentTaskRequest) Field36DeepEqual(src *TCleanUDFCacheReq) bool { + + if !p.CleanUdfCacheReq.DeepEqual(src) { + return false + } + return true +} +func (p *TAgentTaskRequest) Field1000DeepEqual(src *TCalcDeleteBitmapRequest) bool { + + if !p.CalcDeleteBitmapReq.DeepEqual(src) { + return false + } + return true +} type TAgentResult_ struct { Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` @@ -23920,10 +27529,7 @@ func NewTAgentResult_() *TAgentResult_ { } func (p *TAgentResult_) InitDefault() { - *p = TAgentResult_{ - - SnapshotVersion: 1, - } + p.SnapshotVersion = 1 } var TAgentResult__Status_DEFAULT *status.TStatus @@ -24023,47 +27629,38 @@ func (p *TAgentResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -24095,37 +27692,44 @@ RequiredFieldNotSetError: } func (p *TAgentResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } - func (p *TAgentResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SnapshotPath = &v + _field = &v } + p.SnapshotPath = _field return nil } - func (p *TAgentResult_) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.AllowIncrementalClone = &v + _field = &v } + p.AllowIncrementalClone = _field return nil } - func (p *TAgentResult_) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SnapshotVersion = v + _field = v } + p.SnapshotVersion = _field return nil } @@ -24151,7 +27755,6 @@ func (p *TAgentResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -24249,6 +27852,7 @@ func (p *TAgentResult_) String() string { return "" } return fmt.Sprintf("TAgentResult_(%+v)", *p) + } func (p *TAgentResult_) DeepEqual(ano *TAgentResult_) bool { @@ -24323,7 +27927,6 @@ func NewTTopicItem() *TTopicItem { } func (p *TTopicItem) InitDefault() { - *p = TTopicItem{} } func (p *TTopicItem) GetKey() (v string) { @@ -24414,47 +28017,38 @@ func (p *TTopicItem) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.DOUBLE { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -24486,38 +28080,47 @@ RequiredFieldNotSetError: } func (p *TTopicItem) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Key = v + _field = v } + p.Key = _field return nil } - func (p *TTopicItem) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IntValue = &v + _field = &v } + p.IntValue = _field return nil } - func (p *TTopicItem) ReadField3(iprot thrift.TProtocol) error { + + var _field *float64 if v, err := iprot.ReadDouble(); err != nil { return err } else { - p.DoubleValue = &v + _field = &v } + p.DoubleValue = _field return nil } - func (p *TTopicItem) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.StringValue = &v + _field = &v } + p.StringValue = _field return nil } @@ -24543,7 +28146,6 @@ func (p *TTopicItem) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -24641,6 +28243,7 @@ func (p *TTopicItem) String() string { return "" } return fmt.Sprintf("TTopicItem(%+v)", *p) + } func (p *TTopicItem) DeepEqual(ano *TTopicItem) bool { @@ -24719,7 +28322,6 @@ func NewTTopicUpdate() *TTopicUpdate { } func (p *TTopicUpdate) InitDefault() { - *p = TTopicUpdate{} } func (p *TTopicUpdate) GetType() (v TTopicType) { @@ -24793,37 +28395,30 @@ func (p *TTopicUpdate) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -24855,41 +28450,47 @@ RequiredFieldNotSetError: } func (p *TTopicUpdate) ReadField1(iprot thrift.TProtocol) error { + + var _field TTopicType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TTopicType(v) + _field = TTopicType(v) } + p.Type = _field return nil } - func (p *TTopicUpdate) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Updates = make([]*TTopicItem, 0, size) + _field := make([]*TTopicItem, 0, size) + values := make([]TTopicItem, size) for i := 0; i < size; i++ { - _elem := NewTTopicItem() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Updates = append(p.Updates, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Updates = _field return nil } - func (p *TTopicUpdate) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Deletes = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -24897,11 +28498,12 @@ func (p *TTopicUpdate) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.Deletes = append(p.Deletes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Deletes = _field return nil } @@ -24923,7 +28525,6 @@ func (p *TTopicUpdate) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -25018,6 +28619,7 @@ func (p *TTopicUpdate) String() string { return "" } return fmt.Sprintf("TTopicUpdate(%+v)", *p) + } func (p *TTopicUpdate) DeepEqual(ano *TTopicUpdate) bool { @@ -25082,7 +28684,6 @@ func NewTAgentPublishRequest() *TAgentPublishRequest { } func (p *TAgentPublishRequest) InitDefault() { - *p = TAgentPublishRequest{} } func (p *TAgentPublishRequest) GetProtocolVersion() (v TAgentServiceVersion) { @@ -25131,10 +28732,8 @@ func (p *TAgentPublishRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -25142,17 +28741,14 @@ func (p *TAgentPublishRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUpdates = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -25189,31 +28785,37 @@ RequiredFieldNotSetError: } func (p *TAgentPublishRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field TAgentServiceVersion if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ProtocolVersion = TAgentServiceVersion(v) + _field = TAgentServiceVersion(v) } + p.ProtocolVersion = _field return nil } - func (p *TAgentPublishRequest) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Updates = make([]*TTopicUpdate, 0, size) + _field := make([]*TTopicUpdate, 0, size) + values := make([]TTopicUpdate, size) for i := 0; i < size; i++ { - _elem := NewTTopicUpdate() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Updates = append(p.Updates, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Updates = _field return nil } @@ -25231,7 +28833,6 @@ func (p *TAgentPublishRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -25297,6 +28898,7 @@ func (p *TAgentPublishRequest) String() string { return "" } return fmt.Sprintf("TAgentPublishRequest(%+v)", *p) + } func (p *TAgentPublishRequest) DeepEqual(ano *TAgentPublishRequest) bool { diff --git a/pkg/rpc/kitex_gen/agentservice/k-AgentService.go b/pkg/rpc/kitex_gen/agentservice/k-AgentService.go index 6f4cbac9..667cffd1 100644 --- a/pkg/rpc/kitex_gen/agentservice/k-AgentService.go +++ b/pkg/rpc/kitex_gen/agentservice/k-AgentService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package agentservice @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice" @@ -319,6 +320,76 @@ func (p *TTabletSchema) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 19: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 21: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 22: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField22(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 23: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -655,6 +726,108 @@ func (p *TTabletSchema) FastReadField18(buf []byte) (int, error) { return offset, nil } +func (p *TTabletSchema) FastReadField19(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ClusterKeyIdxes = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ClusterKeyIdxes = append(p.ClusterKeyIdxes, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TTabletSchema) FastReadField20(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.RowStoreColCids = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.RowStoreColCids = append(p.RowStoreColCids, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TTabletSchema) FastReadField21(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RowStorePageSize = v + + } + return offset, nil +} + +func (p *TTabletSchema) FastReadField22(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.VariantEnableFlattenNested = v + + } + return offset, nil +} + +func (p *TTabletSchema) FastReadField23(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.StoragePageSize = v + + } + return offset, nil +} + // for compatibility func (p *TTabletSchema) FastWrite(buf []byte) int { return 0 @@ -677,11 +850,16 @@ func (p *TTabletSchema) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField16(buf[offset:], binaryWriter) offset += p.fastWriteField17(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -710,6 +888,11 @@ func (p *TTabletSchema) BLength() int { l += p.field16Length() l += p.field17Length() l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -918,6 +1101,77 @@ func (p *TTabletSchema) fastWriteField18(buf []byte, binaryWriter bthrift.Binary return offset } +func (p *TTabletSchema) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetClusterKeyIdxes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_key_idxes", thrift.LIST, 19) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.ClusterKeyIdxes { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletSchema) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRowStoreColCids() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_store_col_cids", thrift.LIST, 20) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.RowStoreColCids { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletSchema) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRowStorePageSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_store_page_size", thrift.I64, 21) + offset += bthrift.Binary.WriteI64(buf[offset:], p.RowStorePageSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletSchema) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVariantEnableFlattenNested() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "variant_enable_flatten_nested", thrift.BOOL, 22) + offset += bthrift.Binary.WriteBool(buf[offset:], p.VariantEnableFlattenNested) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletSchema) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStoragePageSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_page_size", thrift.I64, 23) + offset += bthrift.Binary.WriteI64(buf[offset:], p.StoragePageSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TTabletSchema) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("short_key_column_count", thrift.I16, 1) @@ -1112,6 +1366,65 @@ func (p *TTabletSchema) field18Length() int { return l } +func (p *TTabletSchema) field19Length() int { + l := 0 + if p.IsSetClusterKeyIdxes() { + l += bthrift.Binary.FieldBeginLength("cluster_key_idxes", thrift.LIST, 19) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.ClusterKeyIdxes)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.ClusterKeyIdxes) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletSchema) field20Length() int { + l := 0 + if p.IsSetRowStoreColCids() { + l += bthrift.Binary.FieldBeginLength("row_store_col_cids", thrift.LIST, 20) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.RowStoreColCids)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.RowStoreColCids) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletSchema) field21Length() int { + l := 0 + if p.IsSetRowStorePageSize() { + l += bthrift.Binary.FieldBeginLength("row_store_page_size", thrift.I64, 21) + l += bthrift.Binary.I64Length(p.RowStorePageSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletSchema) field22Length() int { + l := 0 + if p.IsSetVariantEnableFlattenNested() { + l += bthrift.Binary.FieldBeginLength("variant_enable_flatten_nested", thrift.BOOL, 22) + l += bthrift.Binary.BoolLength(p.VariantEnableFlattenNested) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletSchema) field23Length() int { + l := 0 + if p.IsSetStoragePageSize() { + l += bthrift.Binary.FieldBeginLength("storage_page_size", thrift.I64, 23) + l += bthrift.Binary.I64Length(p.StoragePageSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TS3StorageParam) FastRead(buf []byte) (int, error) { var err error var offset int @@ -1274,15 +1587,43 @@ func (p *TS3StorageParam) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l if err != nil { goto ReadFieldEndError @@ -1443,6 +1784,34 @@ func (p *TS3StorageParam) FastReadField10(buf []byte) (int, error) { return offset, nil } +func (p *TS3StorageParam) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TS3StorageParam) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TObjStorageType(v) + p.Provider = &tmp + + } + return offset, nil +} + // for compatibility func (p *TS3StorageParam) FastWrite(buf []byte) int { return 0 @@ -1462,6 +1831,8 @@ func (p *TS3StorageParam) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1482,6 +1853,8 @@ func (p *TS3StorageParam) BLength() int { l += p.field8Length() l += p.field9Length() l += p.field10Length() + l += p.field11Length() + l += p.field12Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1598,6 +1971,28 @@ func (p *TS3StorageParam) fastWriteField10(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TS3StorageParam) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TS3StorageParam) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProvider() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "provider", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Provider)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TS3StorageParam) field1Length() int { l := 0 if p.IsSetEndpoint() { @@ -1708,6 +2103,28 @@ func (p *TS3StorageParam) field10Length() int { return l } +func (p *TS3StorageParam) field11Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TS3StorageParam) field12Length() int { + l := 0 + if p.IsSetProvider() { + l += bthrift.Binary.FieldBeginLength("provider", thrift.I32, 12) + l += bthrift.Binary.I32Length(int32(*p.Provider)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TStoragePolicy) FastRead(buf []byte) (int, error) { var err error var offset int @@ -2174,6 +2591,20 @@ func (p *TStorageResource) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -2261,6 +2692,19 @@ func (p *TStorageResource) FastReadField4(buf []byte) (int, error) { return offset, nil } +func (p *TStorageResource) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTHdfsParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.HdfsStorageParam = tmp + return offset, nil +} + // for compatibility func (p *TStorageResource) FastWrite(buf []byte) int { return 0 @@ -2274,6 +2718,7 @@ func (p *TStorageResource) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -2288,6 +2733,7 @@ func (p *TStorageResource) BLength() int { l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -2337,6 +2783,16 @@ func (p *TStorageResource) fastWriteField4(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TStorageResource) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHdfsStorageParam() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hdfs_storage_param", thrift.STRUCT, 5) + offset += p.HdfsStorageParam.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TStorageResource) field1Length() int { l := 0 if p.IsSetId() { @@ -2380,6 +2836,16 @@ func (p *TStorageResource) field4Length() int { return l } +func (p *TStorageResource) field5Length() int { + l := 0 + if p.IsSetHdfsStorageParam() { + l += bthrift.Binary.FieldBeginLength("hdfs_storage_param", thrift.STRUCT, 5) + l += p.HdfsStorageParam.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TPushStoragePolicyReq) FastRead(buf []byte) (int, error) { var err error var offset int @@ -2690,7 +3156,84 @@ func (p *TPushStoragePolicyReq) field3Length() int { return l } -func (p *TBinlogConfig) FastRead(buf []byte) (int, error) { +func (p *TCleanTrashReq) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +// for compatibility +func (p *TCleanTrashReq) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCleanTrashReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCleanTrashReq") + if p != nil { + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCleanTrashReq) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCleanTrashReq") + if p != nil { + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCleanUDFCacheReq) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -2713,7 +3256,7 @@ func (p *TBinlogConfig) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -2726,48 +3269,6 @@ func (p *TBinlogConfig) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -2794,7 +3295,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCleanUDFCacheReq[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -2803,24 +3304,199 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBinlogConfig) FastReadField1(buf []byte) (int, error) { +func (p *TCleanUDFCacheReq) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Enable = &v + p.FunctionSignature = &v } return offset, nil } -func (p *TBinlogConfig) FastReadField2(buf []byte) (int, error) { - offset := 0 +// for compatibility +func (p *TCleanUDFCacheReq) FastWrite(buf []byte) int { + return 0 +} - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err +func (p *TCleanUDFCacheReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCleanUDFCacheReq") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCleanUDFCacheReq) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCleanUDFCacheReq") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCleanUDFCacheReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFunctionSignature() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "function_signature", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FunctionSignature) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCleanUDFCacheReq) field1Length() int { + l := 0 + if p.IsSetFunctionSignature() { + l += bthrift.Binary.FieldBeginLength("function_signature", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.FunctionSignature) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlogConfig) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBinlogConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Enable = &v + + } + return offset, nil +} + +func (p *TBinlogConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err } else { offset += l p.TtlSeconds = &v @@ -3324,6 +4000,90 @@ func (p *TCreateTabletReq) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 26: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField26(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 27: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField27(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 28: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField28(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 29: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField29(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1001: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1001(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -3683,14 +4443,98 @@ func (p *TCreateTabletReq) FastReadField25(buf []byte) (int, error) { return offset, nil } -// for compatibility -func (p *TCreateTabletReq) FastWrite(buf []byte) int { - return 0 +func (p *TCreateTabletReq) FastReadField26(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TimeSeriesCompactionEmptyRowsetsThreshold = v + + } + return offset, nil } -func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCreateTabletReq) FastReadField27(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreateTabletReq") + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TimeSeriesCompactionLevelThreshold = v + + } + return offset, nil +} + +func (p *TCreateTabletReq) FastReadField28(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InvertedIndexStorageFormat = TInvertedIndexStorageFormat(v) + + } + return offset, nil +} + +func (p *TCreateTabletReq) FastReadField29(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat(v) + + } + return offset, nil +} + +func (p *TCreateTabletReq) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsInMemory = v + + } + return offset, nil +} + +func (p *TCreateTabletReq) FastReadField1001(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsPersistent = v + + } + return offset, nil +} + +// for compatibility +func (p *TCreateTabletReq) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreateTabletReq") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -3708,6 +4552,10 @@ func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField23(buf[offset:], binaryWriter) offset += p.fastWriteField24(buf[offset:], binaryWriter) offset += p.fastWriteField25(buf[offset:], binaryWriter) + offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + offset += p.fastWriteField1001(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) @@ -3715,6 +4563,8 @@ func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField16(buf[offset:], binaryWriter) offset += p.fastWriteField21(buf[offset:], binaryWriter) offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -3748,6 +4598,12 @@ func (p *TCreateTabletReq) BLength() int { l += p.field23Length() l += p.field24Length() l += p.field25Length() + l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field1000Length() + l += p.field1001Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -4001,6 +4857,72 @@ func (p *TCreateTabletReq) fastWriteField25(buf []byte, binaryWriter bthrift.Bin return offset } +func (p *TCreateTabletReq) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_empty_rowsets_threshold", thrift.I64, 26) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TimeSeriesCompactionEmptyRowsetsThreshold) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreateTabletReq) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeSeriesCompactionLevelThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_level_threshold", thrift.I64, 27) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TimeSeriesCompactionLevelThreshold) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreateTabletReq) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInvertedIndexStorageFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_storage_format", thrift.I32, 28) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.InvertedIndexStorageFormat)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreateTabletReq) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInvertedIndexFileStorageFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_file_storage_format", thrift.I32, 29) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.InvertedIndexFileStorageFormat)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreateTabletReq) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsInMemory() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_in_memory", thrift.BOOL, 1000) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsInMemory) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreateTabletReq) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsPersistent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_persistent", thrift.BOOL, 1001) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsPersistent) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TCreateTabletReq) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) @@ -4248,6 +5170,72 @@ func (p *TCreateTabletReq) field25Length() int { return l } +func (p *TCreateTabletReq) field26Length() int { + l := 0 + if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + l += bthrift.Binary.FieldBeginLength("time_series_compaction_empty_rowsets_threshold", thrift.I64, 26) + l += bthrift.Binary.I64Length(p.TimeSeriesCompactionEmptyRowsetsThreshold) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreateTabletReq) field27Length() int { + l := 0 + if p.IsSetTimeSeriesCompactionLevelThreshold() { + l += bthrift.Binary.FieldBeginLength("time_series_compaction_level_threshold", thrift.I64, 27) + l += bthrift.Binary.I64Length(p.TimeSeriesCompactionLevelThreshold) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreateTabletReq) field28Length() int { + l := 0 + if p.IsSetInvertedIndexStorageFormat() { + l += bthrift.Binary.FieldBeginLength("inverted_index_storage_format", thrift.I32, 28) + l += bthrift.Binary.I32Length(int32(p.InvertedIndexStorageFormat)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreateTabletReq) field29Length() int { + l := 0 + if p.IsSetInvertedIndexFileStorageFormat() { + l += bthrift.Binary.FieldBeginLength("inverted_index_file_storage_format", thrift.I32, 29) + l += bthrift.Binary.I32Length(int32(p.InvertedIndexFileStorageFormat)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreateTabletReq) field1000Length() int { + l := 0 + if p.IsSetIsInMemory() { + l += bthrift.Binary.FieldBeginLength("is_in_memory", thrift.BOOL, 1000) + l += bthrift.Binary.BoolLength(p.IsInMemory) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreateTabletReq) field1001Length() int { + l := 0 + if p.IsSetIsPersistent() { + l += bthrift.Binary.FieldBeginLength("is_persistent", thrift.BOOL, 1001) + l += bthrift.Binary.BoolLength(p.IsPersistent) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TDropTabletReq) FastRead(buf []byte) (int, error) { var err error var offset int @@ -5208,6 +6196,48 @@ func (p *TAlterTabletReqV2) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 1000: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1001: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1001(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1002: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1002(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -5441,6 +6471,45 @@ func (p *TAlterTabletReqV2) FastReadField11(buf []byte) (int, error) { return offset, nil } +func (p *TAlterTabletReqV2) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.JobId = &v + + } + return offset, nil +} + +func (p *TAlterTabletReqV2) FastReadField1001(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Expiration = &v + + } + return offset, nil +} + +func (p *TAlterTabletReqV2) FastReadField1002(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StorageVaultId = &v + + } + return offset, nil +} + // for compatibility func (p *TAlterTabletReqV2) FastWrite(buf []byte) int { return 0 @@ -5457,10 +6526,13 @@ func (p *TAlterTabletReqV2) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bin offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + offset += p.fastWriteField1001(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1002(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -5482,6 +6554,9 @@ func (p *TAlterTabletReqV2) BLength() int { l += p.field9Length() l += p.field10Length() l += p.field11Length() + l += p.field1000Length() + l += p.field1001Length() + l += p.field1002Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -5614,6 +6689,39 @@ func (p *TAlterTabletReqV2) fastWriteField11(buf []byte, binaryWriter bthrift.Bi return offset } +func (p *TAlterTabletReqV2) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 1000) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.JobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAlterTabletReqV2) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetExpiration() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "expiration", thrift.I64, 1001) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Expiration) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAlterTabletReqV2) fastWriteField1002(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStorageVaultId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_vault_id", thrift.STRING, 1002) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StorageVaultId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TAlterTabletReqV2) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("base_tablet_id", thrift.I64, 1) @@ -5732,7 +6840,40 @@ func (p *TAlterTabletReqV2) field11Length() int { return l } -func (p *TAlterInvertedIndexReq) FastRead(buf []byte) (int, error) { +func (p *TAlterTabletReqV2) field1000Length() int { + l := 0 + if p.IsSetJobId() { + l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 1000) + l += bthrift.Binary.I64Length(*p.JobId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAlterTabletReqV2) field1001Length() int { + l := 0 + if p.IsSetExpiration() { + l += bthrift.Binary.FieldBeginLength("expiration", thrift.I64, 1001) + l += bthrift.Binary.I64Length(*p.Expiration) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAlterTabletReqV2) field1002Length() int { + l := 0 + if p.IsSetStorageVaultId() { + l += bthrift.Binary.FieldBeginLength("storage_vault_id", thrift.STRING, 1002) + l += bthrift.Binary.StringLengthNocopy(*p.StorageVaultId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAlterInvertedIndexReq) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7536,6 +8677,34 @@ func (p *TPushReq) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 17: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -7844,6 +9013,32 @@ func (p *TPushReq) FastReadField16(buf []byte) (int, error) { return offset, nil } +func (p *TPushReq) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StorageVaultId = &v + + } + return offset, nil +} + +func (p *TPushReq) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SchemaVersion = &v + + } + return offset, nil +} + // for compatibility func (p *TPushReq) FastWrite(buf []byte) int { return 0 @@ -7863,12 +9058,14 @@ func (p *TPushReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -7895,6 +9092,8 @@ func (p *TPushReq) BLength() int { l += p.field14Length() l += p.field15Length() l += p.field16Length() + l += p.field17Length() + l += p.field18Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -8077,6 +9276,28 @@ func (p *TPushReq) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWrite return offset } +func (p *TPushReq) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStorageVaultId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_vault_id", thrift.STRING, 17) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StorageVaultId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPushReq) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSchemaVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_version", thrift.I32, 18) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.SchemaVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPushReq) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) @@ -8245,6 +9466,28 @@ func (p *TPushReq) field16Length() int { return l } +func (p *TPushReq) field17Length() int { + l := 0 + if p.IsSetStorageVaultId() { + l += bthrift.Binary.FieldBeginLength("storage_vault_id", thrift.STRING, 17) + l += bthrift.Binary.StringLengthNocopy(*p.StorageVaultId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPushReq) field18Length() int { + l := 0 + if p.IsSetSchemaVersion() { + l += bthrift.Binary.FieldBeginLength("schema_version", thrift.I32, 18) + l += bthrift.Binary.I32Length(*p.SchemaVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TCloneReq) FastRead(buf []byte) (int, error) { var err error var offset int @@ -8441,6 +9684,20 @@ func (p *TCloneReq) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 13: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -8569,7 +9826,7 @@ func (p *TCloneReq) FastReadField5(buf []byte) (int, error) { return offset, err } else { offset += l - p.CommittedVersion = &v + p.Version = &v } return offset, nil @@ -8667,6 +9924,20 @@ func (p *TCloneReq) FastReadField12(buf []byte) (int, error) { return offset, nil } +func (p *TCloneReq) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TableId = v + + } + return offset, nil +} + // for compatibility func (p *TCloneReq) FastWrite(buf []byte) int { return 0 @@ -8686,6 +9957,7 @@ func (p *TCloneReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) } @@ -8710,6 +9982,7 @@ func (p *TCloneReq) BLength() int { l += p.field10Length() l += p.field11Length() l += p.field12Length() + l += p.field13Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -8763,9 +10036,9 @@ func (p *TCloneReq) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWrite func (p *TCloneReq) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCommittedVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "committed_version", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.CommittedVersion) + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } @@ -8849,6 +10122,17 @@ func (p *TCloneReq) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWrit return offset } +func (p *TCloneReq) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 13) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TCloneReq) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) @@ -8892,9 +10176,9 @@ func (p *TCloneReq) field4Length() int { func (p *TCloneReq) field5Length() int { l := 0 - if p.IsSetCommittedVersion() { - l += bthrift.Binary.FieldBeginLength("committed_version", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.CommittedVersion) + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.Version) l += bthrift.Binary.FieldEndLength() } @@ -8978,6 +10262,17 @@ func (p *TCloneReq) field12Length() int { return l } +func (p *TCloneReq) field13Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 13) + l += bthrift.Binary.I64Length(p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TCompactionReq) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11814,6 +13109,20 @@ func (p *TSnapshotRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -12049,6 +13358,19 @@ func (p *TSnapshotRequest) FastReadField13(buf []byte) (int, error) { return offset, nil } +func (p *TSnapshotRequest) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RefTabletId = &v + + } + return offset, nil +} + // for compatibility func (p *TSnapshotRequest) FastWrite(buf []byte) int { return 0 @@ -12070,6 +13392,7 @@ func (p *TSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -12094,6 +13417,7 @@ func (p *TSnapshotRequest) BLength() int { l += p.field11Length() l += p.field12Length() l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -12247,6 +13571,17 @@ func (p *TSnapshotRequest) fastWriteField13(buf []byte, binaryWriter bthrift.Bin return offset } +func (p *TSnapshotRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRefTabletId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ref_tablet_id", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RefTabletId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TSnapshotRequest) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) @@ -12388,6 +13723,17 @@ func (p *TSnapshotRequest) field13Length() int { return l } +func (p *TSnapshotRequest) field14Length() int { + l := 0 + if p.IsSetRefTabletId() { + l += bthrift.Binary.FieldBeginLength("ref_tablet_id", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.RefTabletId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TReleaseSnapshotRequest) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13233,150 +14579,1286 @@ func (p *TMoveDirReq) FastReadField4(buf []byte) (int, error) { func (p *TMoveDirReq) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Overwrite = v + + } + return offset, nil +} + +// for compatibility +func (p *TMoveDirReq) FastWrite(buf []byte) int { + return 0 +} + +func (p *TMoveDirReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMoveDirReq") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TMoveDirReq) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TMoveDirReq") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TMoveDirReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TabletId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMoveDirReq) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_hash", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], p.SchemaHash) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMoveDirReq) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "src", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Src) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMoveDirReq) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMoveDirReq) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], p.Overwrite) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMoveDirReq) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.TabletId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMoveDirReq) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("schema_hash", thrift.I32, 2) + l += bthrift.Binary.I32Length(p.SchemaHash) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMoveDirReq) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("src", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Src) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMoveDirReq) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 4) + l += bthrift.Binary.I64Length(p.JobId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMoveDirReq) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("overwrite", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(p.Overwrite) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTransactionId bool = false + var issetPartitionVersionInfos bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTransactionId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPartitionVersionInfos = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.SET { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetTransactionId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetPartitionVersionInfos { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId])) +} + +func (p *TPublishVersionRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TransactionId = v + + } + return offset, nil +} + +func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionVersionInfos = make([]*TPartitionVersionInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPartitionVersionInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PartitionVersionInfos = append(p.PartitionVersionInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPublishVersionRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.StrictMode = v + + } + return offset, nil +} + +func (p *TPublishVersionRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadSetBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.BaseTabletIds = make([]types.TTabletId, 0, size) + for i := 0; i < size; i++ { + var _elem types.TTabletId + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.BaseTabletIds = append(p.BaseTabletIds, _elem) + } + if l, err := bthrift.Binary.ReadSetEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TPublishVersionRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPublishVersionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishVersionRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPublishVersionRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPublishVersionRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPublishVersionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "transaction_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TransactionId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPublishVersionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_version_infos", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.PartitionVersionInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPublishVersionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStrictMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strict_mode", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], p.StrictMode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPublishVersionRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBaseTabletIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_tablet_ids", thrift.SET, 4) + setBeginOffset := offset + offset += bthrift.Binary.SetBeginLength(thrift.I64, 0) + + for i := 0; i < len(p.BaseTabletIds); i++ { + for j := i + 1; j < len(p.BaseTabletIds); j++ { + if func(tgt, src types.TTabletId) bool { + if tgt != src { + return false + } + return true + }(p.BaseTabletIds[i], p.BaseTabletIds[j]) { + panic(fmt.Errorf("%T error writing set field: slice is not unique", p.BaseTabletIds[i])) + } + } + } + var length int + for _, v := range p.BaseTabletIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteSetBegin(buf[setBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteSetEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPublishVersionRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("transaction_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.TransactionId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPublishVersionRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("partition_version_infos", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionVersionInfos)) + for _, v := range p.PartitionVersionInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPublishVersionRequest) field3Length() int { + l := 0 + if p.IsSetStrictMode() { + l += bthrift.Binary.FieldBeginLength("strict_mode", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(p.StrictMode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPublishVersionRequest) field4Length() int { + l := 0 + if p.IsSetBaseTabletIds() { + l += bthrift.Binary.FieldBeginLength("base_tablet_ids", thrift.SET, 4) + l += bthrift.Binary.SetBeginLength(thrift.I64, len(p.BaseTabletIds)) + + for i := 0; i < len(p.BaseTabletIds); i++ { + for j := i + 1; j < len(p.BaseTabletIds); j++ { + if func(tgt, src types.TTabletId) bool { + if tgt != src { + return false + } + return true + }(p.BaseTabletIds[i], p.BaseTabletIds[j]) { + panic(fmt.Errorf("%T error writing set field: slice is not unique", p.BaseTabletIds[i])) + } + } + } + var tmpV types.TTabletId + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.BaseTabletIds) + l += bthrift.Binary.SetEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TVisibleVersionReq) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionVersion bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPartitionVersion = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetPartitionVersion { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TVisibleVersionReq[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TVisibleVersionReq[fieldId])) +} + +func (p *TVisibleVersionReq) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionVersion = make(map[types.TPartitionId]types.TVersion, size) + for i := 0; i < size; i++ { + var _key types.TPartitionId + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val types.TVersion + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PartitionVersion[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TVisibleVersionReq) FastWrite(buf []byte) int { + return 0 +} + +func (p *TVisibleVersionReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TVisibleVersionReq") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TVisibleVersionReq) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TVisibleVersionReq") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TVisibleVersionReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_version", thrift.MAP, 1) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0) + var length int + for k, v := range p.PartitionVersion { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.I64, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TVisibleVersionReq) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("partition_version", thrift.MAP, 1) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.PartitionVersion)) + var tmpK types.TPartitionId + var tmpV types.TVersion + l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.PartitionVersion) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionId bool = false + var issetVersion bool = false + var issetTabletIds bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPartitionId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetVersion = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTabletIds = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetPartitionId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetVersion { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTabletIds { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId])) +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionId = v + + } + return offset, nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Version = v + + } + return offset, nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletIds = make([]types.TTabletId, 0, size) + for i := 0; i < size; i++ { + var _elem types.TTabletId + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TabletIds = append(p.TabletIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.BaseCompactionCnts = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.BaseCompactionCnts = append(p.BaseCompactionCnts, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.CumulativeCompactionCnts = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.CumulativeCompactionCnts = append(p.CumulativeCompactionCnts, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.CumulativePoints = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.CumulativePoints = append(p.CumulativePoints, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TCalcDeleteBitmapPartitionInfo) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SubTxnIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.SubTxnIds = append(p.SubTxnIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Overwrite = v - } return offset, nil } // for compatibility -func (p *TMoveDirReq) FastWrite(buf []byte) int { +func (p *TCalcDeleteBitmapPartitionInfo) FastWrite(buf []byte) int { return 0 } -func (p *TMoveDirReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapPartitionInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMoveDirReq") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCalcDeleteBitmapPartitionInfo") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TMoveDirReq) BLength() int { +func (p *TCalcDeleteBitmapPartitionInfo) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMoveDirReq") + l += bthrift.Binary.StructBeginLength("TCalcDeleteBitmapPartitionInfo") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMoveDirReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], p.TabletId) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.PartitionId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TMoveDirReq) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_hash", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], p.SchemaHash) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], p.Version) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TMoveDirReq) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "src", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Src) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TabletIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TMoveDirReq) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) + if p.IsSetBaseCompactionCnts() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_compaction_cnts", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.BaseCompactionCnts { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TMoveDirReq) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite", thrift.BOOL, 5) - offset += bthrift.Binary.WriteBool(buf[offset:], p.Overwrite) + if p.IsSetCumulativeCompactionCnts() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cumulative_compaction_cnts", thrift.LIST, 5) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.CumulativeCompactionCnts { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TMoveDirReq) field1Length() int { +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCumulativePoints() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cumulative_points", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.CumulativePoints { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSubTxnIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_ids", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.SubTxnIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCalcDeleteBitmapPartitionInfo) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(p.TabletId) + l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.PartitionId) l += bthrift.Binary.FieldEndLength() return l } -func (p *TMoveDirReq) field2Length() int { +func (p *TCalcDeleteBitmapPartitionInfo) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("schema_hash", thrift.I32, 2) - l += bthrift.Binary.I32Length(p.SchemaHash) + l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.Version) l += bthrift.Binary.FieldEndLength() return l } -func (p *TMoveDirReq) field3Length() int { +func (p *TCalcDeleteBitmapPartitionInfo) field3Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("src", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Src) - + l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds)) + var tmpV types.TTabletId + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TMoveDirReq) field4Length() int { +func (p *TCalcDeleteBitmapPartitionInfo) field4Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 4) - l += bthrift.Binary.I64Length(p.JobId) + if p.IsSetBaseCompactionCnts() { + l += bthrift.Binary.FieldBeginLength("base_compaction_cnts", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.BaseCompactionCnts)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.BaseCompactionCnts) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l += bthrift.Binary.FieldEndLength() +func (p *TCalcDeleteBitmapPartitionInfo) field5Length() int { + l := 0 + if p.IsSetCumulativeCompactionCnts() { + l += bthrift.Binary.FieldBeginLength("cumulative_compaction_cnts", thrift.LIST, 5) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.CumulativeCompactionCnts)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.CumulativeCompactionCnts) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TMoveDirReq) field5Length() int { +func (p *TCalcDeleteBitmapPartitionInfo) field6Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("overwrite", thrift.BOOL, 5) - l += bthrift.Binary.BoolLength(p.Overwrite) + if p.IsSetCumulativePoints() { + l += bthrift.Binary.FieldBeginLength("cumulative_points", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.CumulativePoints)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.CumulativePoints) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l += bthrift.Binary.FieldEndLength() +func (p *TCalcDeleteBitmapPartitionInfo) field7Length() int { + l := 0 + if p.IsSetSubTxnIds() { + l += bthrift.Binary.FieldBeginLength("sub_txn_ids", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.SubTxnIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.SubTxnIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) { +func (p *TCalcDeleteBitmapRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 var issetTransactionId bool = false - var issetPartitionVersionInfos bool = false + var issetPartitions bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -13415,21 +15897,7 @@ func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetPartitionVersionInfos = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } + issetPartitions = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13462,7 +15930,7 @@ func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) { goto RequiredFieldNotSetError } - if !issetPartitionVersionInfos { + if !issetPartitions { fieldId = 2 goto RequiredFieldNotSetError } @@ -13472,7 +15940,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -13480,10 +15948,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapRequest[fieldId])) } -func (p *TPublishVersionRequest) FastReadField1(buf []byte) (int, error) { +func (p *TCalcDeleteBitmapRequest) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { @@ -13497,7 +15965,7 @@ func (p *TPublishVersionRequest) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) { +func (p *TCalcDeleteBitmapRequest) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -13505,16 +15973,16 @@ func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.PartitionVersionInfos = make([]*TPartitionVersionInfo, 0, size) + p.Partitions = make([]*TCalcDeleteBitmapPartitionInfo, 0, size) for i := 0; i < size; i++ { - _elem := NewTPartitionVersionInfo() + _elem := NewTCalcDeleteBitmapPartitionInfo() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.PartitionVersionInfos = append(p.PartitionVersionInfos, _elem) + p.Partitions = append(p.Partitions, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -13524,31 +15992,16 @@ func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TPublishVersionRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.StrictMode = v - - } - return offset, nil -} - // for compatibility -func (p *TPublishVersionRequest) FastWrite(buf []byte) int { +func (p *TCalcDeleteBitmapRequest) FastWrite(buf []byte) int { return 0 } -func (p *TPublishVersionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishVersionRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCalcDeleteBitmapRequest") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -13556,20 +16009,19 @@ func (p *TPublishVersionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrif return offset } -func (p *TPublishVersionRequest) BLength() int { +func (p *TCalcDeleteBitmapRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPublishVersionRequest") + l += bthrift.Binary.StructBeginLength("TCalcDeleteBitmapRequest") if p != nil { l += p.field1Length() l += p.field2Length() - l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPublishVersionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "transaction_id", thrift.I64, 1) offset += bthrift.Binary.WriteI64(buf[offset:], p.TransactionId) @@ -13578,13 +16030,13 @@ func (p *TPublishVersionRequest) fastWriteField1(buf []byte, binaryWriter bthrif return offset } -func (p *TPublishVersionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCalcDeleteBitmapRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_version_infos", thrift.LIST, 2) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 2) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.PartitionVersionInfos { + for _, v := range p.Partitions { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -13594,18 +16046,7 @@ func (p *TPublishVersionRequest) fastWriteField2(buf []byte, binaryWriter bthrif return offset } -func (p *TPublishVersionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStrictMode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strict_mode", thrift.BOOL, 3) - offset += bthrift.Binary.WriteBool(buf[offset:], p.StrictMode) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPublishVersionRequest) field1Length() int { +func (p *TCalcDeleteBitmapRequest) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("transaction_id", thrift.I64, 1) l += bthrift.Binary.I64Length(p.TransactionId) @@ -13614,11 +16055,11 @@ func (p *TPublishVersionRequest) field1Length() int { return l } -func (p *TPublishVersionRequest) field2Length() int { +func (p *TCalcDeleteBitmapRequest) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("partition_version_infos", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionVersionInfos)) - for _, v := range p.PartitionVersionInfos { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) + for _, v := range p.Partitions { l += v.BLength() } l += bthrift.Binary.ListEndLength() @@ -13626,17 +16067,6 @@ func (p *TPublishVersionRequest) field2Length() int { return l } -func (p *TPublishVersionRequest) field3Length() int { - l := 0 - if p.IsSetStrictMode() { - l += bthrift.Binary.FieldBeginLength("strict_mode", thrift.BOOL, 3) - l += bthrift.Binary.BoolLength(p.StrictMode) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - func (p *TClearAlterTaskRequest) FastRead(buf []byte) (int, error) { var err error var offset int @@ -14469,9 +16899,51 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField11(buf[offset:]) + case 11: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField14(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14483,9 +16955,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 12: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField12(buf[offset:]) + case 15: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField15(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14497,9 +16969,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 13: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField13(buf[offset:]) + case 16: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField16(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14511,9 +16983,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 14: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField14(buf[offset:]) + case 17: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField17(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14525,9 +16997,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 15: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField15(buf[offset:]) + case 18: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField18(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14743,6 +17215,45 @@ func (p *TTabletMetaInfo) FastReadField15(buf []byte) (int, error) { return offset, nil } +func (p *TTabletMetaInfo) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DisableAutoCompaction = &v + + } + return offset, nil +} + +func (p *TTabletMetaInfo) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TimeSeriesCompactionEmptyRowsetsThreshold = &v + + } + return offset, nil +} + +func (p *TTabletMetaInfo) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TimeSeriesCompactionLevelThreshold = &v + + } + return offset, nil +} + // for compatibility func (p *TTabletMetaInfo) FastWrite(buf []byte) int { return 0 @@ -14763,6 +17274,9 @@ func (p *TTabletMetaInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) } @@ -14788,6 +17302,9 @@ func (p *TTabletMetaInfo) BLength() int { l += p.field13Length() l += p.field14Length() l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -14936,6 +17453,39 @@ func (p *TTabletMetaInfo) fastWriteField15(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TTabletMetaInfo) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDisableAutoCompaction() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_auto_compaction", thrift.BOOL, 16) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.DisableAutoCompaction) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletMetaInfo) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_empty_rowsets_threshold", thrift.I64, 17) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TimeSeriesCompactionEmptyRowsetsThreshold) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletMetaInfo) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeSeriesCompactionLevelThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_level_threshold", thrift.I64, 18) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TimeSeriesCompactionLevelThreshold) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TTabletMetaInfo) field1Length() int { l := 0 if p.IsSetTabletId() { @@ -15078,6 +17628,39 @@ func (p *TTabletMetaInfo) field15Length() int { return l } +func (p *TTabletMetaInfo) field16Length() int { + l := 0 + if p.IsSetDisableAutoCompaction() { + l += bthrift.Binary.FieldBeginLength("disable_auto_compaction", thrift.BOOL, 16) + l += bthrift.Binary.BoolLength(*p.DisableAutoCompaction) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletMetaInfo) field17Length() int { + l := 0 + if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() { + l += bthrift.Binary.FieldBeginLength("time_series_compaction_empty_rowsets_threshold", thrift.I64, 17) + l += bthrift.Binary.I64Length(*p.TimeSeriesCompactionEmptyRowsetsThreshold) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletMetaInfo) field18Length() int { + l := 0 + if p.IsSetTimeSeriesCompactionLevelThreshold() { + l += bthrift.Binary.FieldBeginLength("time_series_compaction_level_threshold", thrift.I64, 18) + l += bthrift.Binary.I64Length(*p.TimeSeriesCompactionLevelThreshold) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TUpdateTabletMetaInfoReq) FastRead(buf []byte) (int, error) { var err error var offset int @@ -16407,6 +18990,62 @@ func (p *TAgentTaskRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 34: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField34(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 35: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField35(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 36: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField36(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16879,6 +19518,58 @@ func (p *TAgentTaskRequest) FastReadField33(buf []byte) (int, error) { return offset, nil } +func (p *TAgentTaskRequest) FastReadField34(buf []byte) (int, error) { + offset := 0 + + tmp := NewTCleanTrashReq() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CleanTrashReq = tmp + return offset, nil +} + +func (p *TAgentTaskRequest) FastReadField35(buf []byte) (int, error) { + offset := 0 + + tmp := NewTVisibleVersionReq() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.VisibleVersionReq = tmp + return offset, nil +} + +func (p *TAgentTaskRequest) FastReadField36(buf []byte) (int, error) { + offset := 0 + + tmp := NewTCleanUDFCacheReq() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CleanUdfCacheReq = tmp + return offset, nil +} + +func (p *TAgentTaskRequest) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + tmp := NewTCalcDeleteBitmapRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CalcDeleteBitmapReq = tmp + return offset, nil +} + // for compatibility func (p *TAgentTaskRequest) FastWrite(buf []byte) int { return 0 @@ -16920,6 +19611,10 @@ func (p *TAgentTaskRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bin offset += p.fastWriteField31(buf[offset:], binaryWriter) offset += p.fastWriteField32(buf[offset:], binaryWriter) offset += p.fastWriteField33(buf[offset:], binaryWriter) + offset += p.fastWriteField34(buf[offset:], binaryWriter) + offset += p.fastWriteField35(buf[offset:], binaryWriter) + offset += p.fastWriteField36(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -16962,6 +19657,10 @@ func (p *TAgentTaskRequest) BLength() int { l += p.field31Length() l += p.field32Length() l += p.field33Length() + l += p.field34Length() + l += p.field35Length() + l += p.field36Length() + l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -17287,6 +19986,46 @@ func (p *TAgentTaskRequest) fastWriteField33(buf []byte, binaryWriter bthrift.Bi return offset } +func (p *TAgentTaskRequest) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCleanTrashReq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_trash_req", thrift.STRUCT, 34) + offset += p.CleanTrashReq.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAgentTaskRequest) fastWriteField35(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVisibleVersionReq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version_req", thrift.STRUCT, 35) + offset += p.VisibleVersionReq.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAgentTaskRequest) fastWriteField36(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCleanUdfCacheReq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_udf_cache_req", thrift.STRUCT, 36) + offset += p.CleanUdfCacheReq.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAgentTaskRequest) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCalcDeleteBitmapReq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "calc_delete_bitmap_req", thrift.STRUCT, 1000) + offset += p.CalcDeleteBitmapReq.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TAgentTaskRequest) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) @@ -17606,6 +20345,46 @@ func (p *TAgentTaskRequest) field33Length() int { return l } +func (p *TAgentTaskRequest) field34Length() int { + l := 0 + if p.IsSetCleanTrashReq() { + l += bthrift.Binary.FieldBeginLength("clean_trash_req", thrift.STRUCT, 34) + l += p.CleanTrashReq.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAgentTaskRequest) field35Length() int { + l := 0 + if p.IsSetVisibleVersionReq() { + l += bthrift.Binary.FieldBeginLength("visible_version_req", thrift.STRUCT, 35) + l += p.VisibleVersionReq.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAgentTaskRequest) field36Length() int { + l := 0 + if p.IsSetCleanUdfCacheReq() { + l += bthrift.Binary.FieldBeginLength("clean_udf_cache_req", thrift.STRUCT, 36) + l += p.CleanUdfCacheReq.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAgentTaskRequest) field1000Length() int { + l := 0 + if p.IsSetCalcDeleteBitmapReq() { + l += bthrift.Binary.FieldBeginLength("calc_delete_bitmap_req", thrift.STRUCT, 1000) + l += p.CalcDeleteBitmapReq.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TAgentResult_) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/backendservice/BackendService.go b/pkg/rpc/kitex_gen/backendservice/BackendService.go index b4657654..3b1477c5 100644 --- a/pkg/rpc/kitex_gen/backendservice/BackendService.go +++ b/pkg/rpc/kitex_gen/backendservice/BackendService.go @@ -1,13 +1,16 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package backendservice import ( "context" + "database/sql" + "database/sql/driver" "fmt" "github.com/apache/thrift/lib/go/thrift" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/dorisexternalservice" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/plannodes" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" @@ -15,6 +18,397 @@ import ( "strings" ) +type TDownloadType int64 + +const ( + TDownloadType_BE TDownloadType = 0 + TDownloadType_S3 TDownloadType = 1 +) + +func (p TDownloadType) String() string { + switch p { + case TDownloadType_BE: + return "BE" + case TDownloadType_S3: + return "S3" + } + return "" +} + +func TDownloadTypeFromString(s string) (TDownloadType, error) { + switch s { + case "BE": + return TDownloadType_BE, nil + case "S3": + return TDownloadType_S3, nil + } + return TDownloadType(0), fmt.Errorf("not a valid TDownloadType string") +} + +func TDownloadTypePtr(v TDownloadType) *TDownloadType { return &v } +func (p *TDownloadType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TDownloadType(result.Int64) + return +} + +func (p *TDownloadType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TWarmUpTabletsRequestType int64 + +const ( + TWarmUpTabletsRequestType_SET_JOB TWarmUpTabletsRequestType = 0 + TWarmUpTabletsRequestType_SET_BATCH TWarmUpTabletsRequestType = 1 + TWarmUpTabletsRequestType_GET_CURRENT_JOB_STATE_AND_LEASE TWarmUpTabletsRequestType = 2 + TWarmUpTabletsRequestType_CLEAR_JOB TWarmUpTabletsRequestType = 3 +) + +func (p TWarmUpTabletsRequestType) String() string { + switch p { + case TWarmUpTabletsRequestType_SET_JOB: + return "SET_JOB" + case TWarmUpTabletsRequestType_SET_BATCH: + return "SET_BATCH" + case TWarmUpTabletsRequestType_GET_CURRENT_JOB_STATE_AND_LEASE: + return "GET_CURRENT_JOB_STATE_AND_LEASE" + case TWarmUpTabletsRequestType_CLEAR_JOB: + return "CLEAR_JOB" + } + return "" +} + +func TWarmUpTabletsRequestTypeFromString(s string) (TWarmUpTabletsRequestType, error) { + switch s { + case "SET_JOB": + return TWarmUpTabletsRequestType_SET_JOB, nil + case "SET_BATCH": + return TWarmUpTabletsRequestType_SET_BATCH, nil + case "GET_CURRENT_JOB_STATE_AND_LEASE": + return TWarmUpTabletsRequestType_GET_CURRENT_JOB_STATE_AND_LEASE, nil + case "CLEAR_JOB": + return TWarmUpTabletsRequestType_CLEAR_JOB, nil + } + return TWarmUpTabletsRequestType(0), fmt.Errorf("not a valid TWarmUpTabletsRequestType string") +} + +func TWarmUpTabletsRequestTypePtr(v TWarmUpTabletsRequestType) *TWarmUpTabletsRequestType { return &v } +func (p *TWarmUpTabletsRequestType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TWarmUpTabletsRequestType(result.Int64) + return +} + +func (p *TWarmUpTabletsRequestType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TIngestBinlogStatus int64 + +const ( + TIngestBinlogStatus_ANALYSIS_ERROR TIngestBinlogStatus = 0 + TIngestBinlogStatus_UNKNOWN TIngestBinlogStatus = 1 + TIngestBinlogStatus_NOT_FOUND TIngestBinlogStatus = 2 + TIngestBinlogStatus_OK TIngestBinlogStatus = 3 + TIngestBinlogStatus_FAILED TIngestBinlogStatus = 4 + TIngestBinlogStatus_DOING TIngestBinlogStatus = 5 +) + +func (p TIngestBinlogStatus) String() string { + switch p { + case TIngestBinlogStatus_ANALYSIS_ERROR: + return "ANALYSIS_ERROR" + case TIngestBinlogStatus_UNKNOWN: + return "UNKNOWN" + case TIngestBinlogStatus_NOT_FOUND: + return "NOT_FOUND" + case TIngestBinlogStatus_OK: + return "OK" + case TIngestBinlogStatus_FAILED: + return "FAILED" + case TIngestBinlogStatus_DOING: + return "DOING" + } + return "" +} + +func TIngestBinlogStatusFromString(s string) (TIngestBinlogStatus, error) { + switch s { + case "ANALYSIS_ERROR": + return TIngestBinlogStatus_ANALYSIS_ERROR, nil + case "UNKNOWN": + return TIngestBinlogStatus_UNKNOWN, nil + case "NOT_FOUND": + return TIngestBinlogStatus_NOT_FOUND, nil + case "OK": + return TIngestBinlogStatus_OK, nil + case "FAILED": + return TIngestBinlogStatus_FAILED, nil + case "DOING": + return TIngestBinlogStatus_DOING, nil + } + return TIngestBinlogStatus(0), fmt.Errorf("not a valid TIngestBinlogStatus string") +} + +func TIngestBinlogStatusPtr(v TIngestBinlogStatus) *TIngestBinlogStatus { return &v } +func (p *TIngestBinlogStatus) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TIngestBinlogStatus(result.Int64) + return +} + +func (p *TIngestBinlogStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TTopicInfoType int64 + +const ( + TTopicInfoType_WORKLOAD_GROUP TTopicInfoType = 0 + TTopicInfoType_MOVE_QUERY_TO_GROUP TTopicInfoType = 1 + TTopicInfoType_WORKLOAD_SCHED_POLICY TTopicInfoType = 2 +) + +func (p TTopicInfoType) String() string { + switch p { + case TTopicInfoType_WORKLOAD_GROUP: + return "WORKLOAD_GROUP" + case TTopicInfoType_MOVE_QUERY_TO_GROUP: + return "MOVE_QUERY_TO_GROUP" + case TTopicInfoType_WORKLOAD_SCHED_POLICY: + return "WORKLOAD_SCHED_POLICY" + } + return "" +} + +func TTopicInfoTypeFromString(s string) (TTopicInfoType, error) { + switch s { + case "WORKLOAD_GROUP": + return TTopicInfoType_WORKLOAD_GROUP, nil + case "MOVE_QUERY_TO_GROUP": + return TTopicInfoType_MOVE_QUERY_TO_GROUP, nil + case "WORKLOAD_SCHED_POLICY": + return TTopicInfoType_WORKLOAD_SCHED_POLICY, nil + } + return TTopicInfoType(0), fmt.Errorf("not a valid TTopicInfoType string") +} + +func TTopicInfoTypePtr(v TTopicInfoType) *TTopicInfoType { return &v } +func (p *TTopicInfoType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TTopicInfoType(result.Int64) + return +} + +func (p *TTopicInfoType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TWorkloadMetricType int64 + +const ( + TWorkloadMetricType_QUERY_TIME TWorkloadMetricType = 0 + TWorkloadMetricType_BE_SCAN_ROWS TWorkloadMetricType = 1 + TWorkloadMetricType_BE_SCAN_BYTES TWorkloadMetricType = 2 + TWorkloadMetricType_QUERY_BE_MEMORY_BYTES TWorkloadMetricType = 3 +) + +func (p TWorkloadMetricType) String() string { + switch p { + case TWorkloadMetricType_QUERY_TIME: + return "QUERY_TIME" + case TWorkloadMetricType_BE_SCAN_ROWS: + return "BE_SCAN_ROWS" + case TWorkloadMetricType_BE_SCAN_BYTES: + return "BE_SCAN_BYTES" + case TWorkloadMetricType_QUERY_BE_MEMORY_BYTES: + return "QUERY_BE_MEMORY_BYTES" + } + return "" +} + +func TWorkloadMetricTypeFromString(s string) (TWorkloadMetricType, error) { + switch s { + case "QUERY_TIME": + return TWorkloadMetricType_QUERY_TIME, nil + case "BE_SCAN_ROWS": + return TWorkloadMetricType_BE_SCAN_ROWS, nil + case "BE_SCAN_BYTES": + return TWorkloadMetricType_BE_SCAN_BYTES, nil + case "QUERY_BE_MEMORY_BYTES": + return TWorkloadMetricType_QUERY_BE_MEMORY_BYTES, nil + } + return TWorkloadMetricType(0), fmt.Errorf("not a valid TWorkloadMetricType string") +} + +func TWorkloadMetricTypePtr(v TWorkloadMetricType) *TWorkloadMetricType { return &v } +func (p *TWorkloadMetricType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TWorkloadMetricType(result.Int64) + return +} + +func (p *TWorkloadMetricType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TCompareOperator int64 + +const ( + TCompareOperator_EQUAL TCompareOperator = 0 + TCompareOperator_GREATER TCompareOperator = 1 + TCompareOperator_GREATER_EQUAL TCompareOperator = 2 + TCompareOperator_LESS TCompareOperator = 3 + TCompareOperator_LESS_EQUAL TCompareOperator = 4 +) + +func (p TCompareOperator) String() string { + switch p { + case TCompareOperator_EQUAL: + return "EQUAL" + case TCompareOperator_GREATER: + return "GREATER" + case TCompareOperator_GREATER_EQUAL: + return "GREATER_EQUAL" + case TCompareOperator_LESS: + return "LESS" + case TCompareOperator_LESS_EQUAL: + return "LESS_EQUAL" + } + return "" +} + +func TCompareOperatorFromString(s string) (TCompareOperator, error) { + switch s { + case "EQUAL": + return TCompareOperator_EQUAL, nil + case "GREATER": + return TCompareOperator_GREATER, nil + case "GREATER_EQUAL": + return TCompareOperator_GREATER_EQUAL, nil + case "LESS": + return TCompareOperator_LESS, nil + case "LESS_EQUAL": + return TCompareOperator_LESS_EQUAL, nil + } + return TCompareOperator(0), fmt.Errorf("not a valid TCompareOperator string") +} + +func TCompareOperatorPtr(v TCompareOperator) *TCompareOperator { return &v } +func (p *TCompareOperator) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TCompareOperator(result.Int64) + return +} + +func (p *TCompareOperator) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TWorkloadActionType int64 + +const ( + TWorkloadActionType_MOVE_QUERY_TO_GROUP TWorkloadActionType = 0 + TWorkloadActionType_CANCEL_QUERY TWorkloadActionType = 1 +) + +func (p TWorkloadActionType) String() string { + switch p { + case TWorkloadActionType_MOVE_QUERY_TO_GROUP: + return "MOVE_QUERY_TO_GROUP" + case TWorkloadActionType_CANCEL_QUERY: + return "CANCEL_QUERY" + } + return "" +} + +func TWorkloadActionTypeFromString(s string) (TWorkloadActionType, error) { + switch s { + case "MOVE_QUERY_TO_GROUP": + return TWorkloadActionType_MOVE_QUERY_TO_GROUP, nil + case "CANCEL_QUERY": + return TWorkloadActionType_CANCEL_QUERY, nil + } + return TWorkloadActionType(0), fmt.Errorf("not a valid TWorkloadActionType string") +} + +func TWorkloadActionTypePtr(v TWorkloadActionType) *TWorkloadActionType { return &v } +func (p *TWorkloadActionType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TWorkloadActionType(result.Int64) + return +} + +func (p *TWorkloadActionType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TWorkloadType int64 + +const ( + TWorkloadType_INTERNAL TWorkloadType = 2 +) + +func (p TWorkloadType) String() string { + switch p { + case TWorkloadType_INTERNAL: + return "INTERNAL" + } + return "" +} + +func TWorkloadTypeFromString(s string) (TWorkloadType, error) { + switch s { + case "INTERNAL": + return TWorkloadType_INTERNAL, nil + } + return TWorkloadType(0), fmt.Errorf("not a valid TWorkloadType string") +} + +func TWorkloadTypePtr(v TWorkloadType) *TWorkloadType { return &v } +func (p *TWorkloadType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TWorkloadType(result.Int64) + return +} + +func (p *TWorkloadType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TExportTaskRequest struct { Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,1,required" frugal:"1,required,palointernalservice.TExecPlanFragmentParams" json:"params"` } @@ -24,7 +418,6 @@ func NewTExportTaskRequest() *TExportTaskRequest { } func (p *TExportTaskRequest) InitDefault() { - *p = TExportTaskRequest{} } var TExportTaskRequest_Params_DEFAULT *palointernalservice.TExecPlanFragmentParams @@ -73,17 +466,14 @@ func (p *TExportTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetParams = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -115,10 +505,11 @@ RequiredFieldNotSetError: } func (p *TExportTaskRequest) ReadField1(iprot thrift.TProtocol) error { - p.Params = palointernalservice.NewTExecPlanFragmentParams() - if err := p.Params.Read(iprot); err != nil { + _field := palointernalservice.NewTExecPlanFragmentParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } @@ -132,7 +523,6 @@ func (p *TExportTaskRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -173,6 +563,7 @@ func (p *TExportTaskRequest) String() string { return "" } return fmt.Sprintf("TExportTaskRequest(%+v)", *p) + } func (p *TExportTaskRequest) DeepEqual(ano *TExportTaskRequest) bool { @@ -196,11 +587,13 @@ func (p *TExportTaskRequest) Field1DeepEqual(src *palointernalservice.TExecPlanF } type TTabletStat struct { - TabletId int64 `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` - DataSize *int64 `thrift:"data_size,2,optional" frugal:"2,optional,i64" json:"data_size,omitempty"` - RowNum *int64 `thrift:"row_num,3,optional" frugal:"3,optional,i64" json:"row_num,omitempty"` - VersionCount *int64 `thrift:"version_count,4,optional" frugal:"4,optional,i64" json:"version_count,omitempty"` - RemoteDataSize *int64 `thrift:"remote_data_size,5,optional" frugal:"5,optional,i64" json:"remote_data_size,omitempty"` + TabletId int64 `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` + DataSize *int64 `thrift:"data_size,2,optional" frugal:"2,optional,i64" json:"data_size,omitempty"` + RowCount *int64 `thrift:"row_count,3,optional" frugal:"3,optional,i64" json:"row_count,omitempty"` + TotalVersionCount *int64 `thrift:"total_version_count,4,optional" frugal:"4,optional,i64" json:"total_version_count,omitempty"` + RemoteDataSize *int64 `thrift:"remote_data_size,5,optional" frugal:"5,optional,i64" json:"remote_data_size,omitempty"` + VisibleVersionCount *int64 `thrift:"visible_version_count,6,optional" frugal:"6,optional,i64" json:"visible_version_count,omitempty"` + VisibleVersion *int64 `thrift:"visible_version,7,optional" frugal:"7,optional,i64" json:"visible_version,omitempty"` } func NewTTabletStat() *TTabletStat { @@ -208,7 +601,6 @@ func NewTTabletStat() *TTabletStat { } func (p *TTabletStat) InitDefault() { - *p = TTabletStat{} } func (p *TTabletStat) GetTabletId() (v int64) { @@ -224,22 +616,22 @@ func (p *TTabletStat) GetDataSize() (v int64) { return *p.DataSize } -var TTabletStat_RowNum_DEFAULT int64 +var TTabletStat_RowCount_DEFAULT int64 -func (p *TTabletStat) GetRowNum() (v int64) { - if !p.IsSetRowNum() { - return TTabletStat_RowNum_DEFAULT +func (p *TTabletStat) GetRowCount() (v int64) { + if !p.IsSetRowCount() { + return TTabletStat_RowCount_DEFAULT } - return *p.RowNum + return *p.RowCount } -var TTabletStat_VersionCount_DEFAULT int64 +var TTabletStat_TotalVersionCount_DEFAULT int64 -func (p *TTabletStat) GetVersionCount() (v int64) { - if !p.IsSetVersionCount() { - return TTabletStat_VersionCount_DEFAULT +func (p *TTabletStat) GetTotalVersionCount() (v int64) { + if !p.IsSetTotalVersionCount() { + return TTabletStat_TotalVersionCount_DEFAULT } - return *p.VersionCount + return *p.TotalVersionCount } var TTabletStat_RemoteDataSize_DEFAULT int64 @@ -250,46 +642,80 @@ func (p *TTabletStat) GetRemoteDataSize() (v int64) { } return *p.RemoteDataSize } + +var TTabletStat_VisibleVersionCount_DEFAULT int64 + +func (p *TTabletStat) GetVisibleVersionCount() (v int64) { + if !p.IsSetVisibleVersionCount() { + return TTabletStat_VisibleVersionCount_DEFAULT + } + return *p.VisibleVersionCount +} + +var TTabletStat_VisibleVersion_DEFAULT int64 + +func (p *TTabletStat) GetVisibleVersion() (v int64) { + if !p.IsSetVisibleVersion() { + return TTabletStat_VisibleVersion_DEFAULT + } + return *p.VisibleVersion +} func (p *TTabletStat) SetTabletId(val int64) { p.TabletId = val } func (p *TTabletStat) SetDataSize(val *int64) { p.DataSize = val } -func (p *TTabletStat) SetRowNum(val *int64) { - p.RowNum = val +func (p *TTabletStat) SetRowCount(val *int64) { + p.RowCount = val } -func (p *TTabletStat) SetVersionCount(val *int64) { - p.VersionCount = val +func (p *TTabletStat) SetTotalVersionCount(val *int64) { + p.TotalVersionCount = val } func (p *TTabletStat) SetRemoteDataSize(val *int64) { p.RemoteDataSize = val } +func (p *TTabletStat) SetVisibleVersionCount(val *int64) { + p.VisibleVersionCount = val +} +func (p *TTabletStat) SetVisibleVersion(val *int64) { + p.VisibleVersion = val +} var fieldIDToName_TTabletStat = map[int16]string{ 1: "tablet_id", 2: "data_size", - 3: "row_num", - 4: "version_count", + 3: "row_count", + 4: "total_version_count", 5: "remote_data_size", + 6: "visible_version_count", + 7: "visible_version", } func (p *TTabletStat) IsSetDataSize() bool { return p.DataSize != nil } -func (p *TTabletStat) IsSetRowNum() bool { - return p.RowNum != nil +func (p *TTabletStat) IsSetRowCount() bool { + return p.RowCount != nil } -func (p *TTabletStat) IsSetVersionCount() bool { - return p.VersionCount != nil +func (p *TTabletStat) IsSetTotalVersionCount() bool { + return p.TotalVersionCount != nil } func (p *TTabletStat) IsSetRemoteDataSize() bool { return p.RemoteDataSize != nil } +func (p *TTabletStat) IsSetVisibleVersionCount() bool { + return p.VisibleVersionCount != nil +} + +func (p *TTabletStat) IsSetVisibleVersion() bool { + return p.VisibleVersion != nil +} + func (p *TTabletStat) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -316,57 +742,62 @@ func (p *TTabletStat) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -398,47 +829,80 @@ RequiredFieldNotSetError: } func (p *TTabletStat) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TTabletStat) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DataSize = &v + _field = &v } + p.DataSize = _field return nil } - func (p *TTabletStat) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RowNum = &v + _field = &v } + p.RowCount = _field return nil } - func (p *TTabletStat) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionCount = &v + _field = &v } + p.TotalVersionCount = _field return nil } - func (p *TTabletStat) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RemoteDataSize = _field + return nil +} +func (p *TTabletStat) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.VisibleVersionCount = _field + return nil +} +func (p *TTabletStat) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RemoteDataSize = &v + _field = &v } + p.VisibleVersion = _field return nil } @@ -468,7 +932,14 @@ func (p *TTabletStat) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -524,11 +995,11 @@ WriteFieldEndError: } func (p *TTabletStat) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetRowNum() { - if err = oprot.WriteFieldBegin("row_num", thrift.I64, 3); err != nil { + if p.IsSetRowCount() { + if err = oprot.WriteFieldBegin("row_count", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.RowNum); err != nil { + if err := oprot.WriteI64(*p.RowCount); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -543,11 +1014,11 @@ WriteFieldEndError: } func (p *TTabletStat) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetVersionCount() { - if err = oprot.WriteFieldBegin("version_count", thrift.I64, 4); err != nil { + if p.IsSetTotalVersionCount() { + if err = oprot.WriteFieldBegin("total_version_count", thrift.I64, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.VersionCount); err != nil { + if err := oprot.WriteI64(*p.TotalVersionCount); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -580,11 +1051,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *TTabletStat) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetVisibleVersionCount() { + if err = oprot.WriteFieldBegin("visible_version_count", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.VisibleVersionCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TTabletStat) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetVisibleVersion() { + if err = oprot.WriteFieldBegin("visible_version", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.VisibleVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + func (p *TTabletStat) String() string { if p == nil { return "" } return fmt.Sprintf("TTabletStat(%+v)", *p) + } func (p *TTabletStat) DeepEqual(ano *TTabletStat) bool { @@ -599,15 +1109,21 @@ func (p *TTabletStat) DeepEqual(ano *TTabletStat) bool { if !p.Field2DeepEqual(ano.DataSize) { return false } - if !p.Field3DeepEqual(ano.RowNum) { + if !p.Field3DeepEqual(ano.RowCount) { return false } - if !p.Field4DeepEqual(ano.VersionCount) { + if !p.Field4DeepEqual(ano.TotalVersionCount) { return false } if !p.Field5DeepEqual(ano.RemoteDataSize) { return false } + if !p.Field6DeepEqual(ano.VisibleVersionCount) { + return false + } + if !p.Field7DeepEqual(ano.VisibleVersion) { + return false + } return true } @@ -632,24 +1148,24 @@ func (p *TTabletStat) Field2DeepEqual(src *int64) bool { } func (p *TTabletStat) Field3DeepEqual(src *int64) bool { - if p.RowNum == src { + if p.RowCount == src { return true - } else if p.RowNum == nil || src == nil { + } else if p.RowCount == nil || src == nil { return false } - if *p.RowNum != *src { + if *p.RowCount != *src { return false } return true } func (p *TTabletStat) Field4DeepEqual(src *int64) bool { - if p.VersionCount == src { + if p.TotalVersionCount == src { return true - } else if p.VersionCount == nil || src == nil { + } else if p.TotalVersionCount == nil || src == nil { return false } - if *p.VersionCount != *src { + if *p.TotalVersionCount != *src { return false } return true @@ -666,6 +1182,30 @@ func (p *TTabletStat) Field5DeepEqual(src *int64) bool { } return true } +func (p *TTabletStat) Field6DeepEqual(src *int64) bool { + + if p.VisibleVersionCount == src { + return true + } else if p.VisibleVersionCount == nil || src == nil { + return false + } + if *p.VisibleVersionCount != *src { + return false + } + return true +} +func (p *TTabletStat) Field7DeepEqual(src *int64) bool { + + if p.VisibleVersion == src { + return true + } else if p.VisibleVersion == nil || src == nil { + return false + } + if *p.VisibleVersion != *src { + return false + } + return true +} type TTabletStatResult_ struct { TabletsStats map[int64]*TTabletStat `thrift:"tablets_stats,1,required" frugal:"1,required,map" json:"tablets_stats"` @@ -677,7 +1217,6 @@ func NewTTabletStatResult_() *TTabletStatResult_ { } func (p *TTabletStatResult_) InitDefault() { - *p = TTabletStatResult_{} } func (p *TTabletStatResult_) GetTabletsStats() (v map[int64]*TTabletStat) { @@ -734,27 +1273,22 @@ func (p *TTabletStatResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletsStats = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -790,7 +1324,8 @@ func (p *TTabletStatResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.TabletsStats = make(map[int64]*TTabletStat, size) + _field := make(map[int64]*TTabletStat, size) + values := make([]TTabletStat, size) for i := 0; i < size; i++ { var _key int64 if v, err := iprot.ReadI64(); err != nil { @@ -798,36 +1333,42 @@ func (p *TTabletStatResult_) ReadField1(iprot thrift.TProtocol) error { } else { _key = v } - _val := NewTTabletStat() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.TabletsStats[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.TabletsStats = _field return nil } - func (p *TTabletStatResult_) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TabletStatList = make([]*TTabletStat, 0, size) + _field := make([]*TTabletStat, 0, size) + values := make([]TTabletStat, size) for i := 0; i < size; i++ { - _elem := NewTTabletStat() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TabletStatList = append(p.TabletStatList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletStatList = _field return nil } @@ -845,7 +1386,6 @@ func (p *TTabletStatResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -872,11 +1412,9 @@ func (p *TTabletStatResult_) writeField1(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.TabletsStats { - if err := oprot.WriteI64(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -926,6 +1464,7 @@ func (p *TTabletStatResult_) String() string { return "" } return fmt.Sprintf("TTabletStatResult_(%+v)", *p) + } func (p *TTabletStatResult_) DeepEqual(ano *TTabletStatResult_) bool { @@ -982,7 +1521,6 @@ func NewTKafkaLoadInfo() *TKafkaLoadInfo { } func (p *TKafkaLoadInfo) InitDefault() { - *p = TKafkaLoadInfo{} } func (p *TKafkaLoadInfo) GetBrokers() (v string) { @@ -1057,10 +1595,8 @@ func (p *TKafkaLoadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBrokers = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -1068,10 +1604,8 @@ func (p *TKafkaLoadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTopic = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { @@ -1079,27 +1613,22 @@ func (p *TKafkaLoadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPartitionBeginOffset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1141,29 +1670,33 @@ RequiredFieldNotSetError: } func (p *TKafkaLoadInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Brokers = v + _field = v } + p.Brokers = _field return nil } - func (p *TKafkaLoadInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Topic = v + _field = v } + p.Topic = _field return nil } - func (p *TKafkaLoadInfo) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.PartitionBeginOffset = make(map[int32]int64, size) + _field := make(map[int32]int64, size) for i := 0; i < size; i++ { var _key int32 if v, err := iprot.ReadI32(); err != nil { @@ -1179,20 +1712,20 @@ func (p *TKafkaLoadInfo) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.PartitionBeginOffset[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.PartitionBeginOffset = _field return nil } - func (p *TKafkaLoadInfo) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -1208,11 +1741,12 @@ func (p *TKafkaLoadInfo) ReadField4(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } @@ -1238,7 +1772,6 @@ func (p *TKafkaLoadInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1299,11 +1832,9 @@ func (p *TKafkaLoadInfo) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.PartitionBeginOffset { - if err := oprot.WriteI32(k); err != nil { return err } - if err := oprot.WriteI64(v); err != nil { return err } @@ -1330,11 +1861,9 @@ func (p *TKafkaLoadInfo) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -1358,6 +1887,7 @@ func (p *TKafkaLoadInfo) String() string { return "" } return fmt.Sprintf("TKafkaLoadInfo(%+v)", *p) + } func (p *TKafkaLoadInfo) DeepEqual(ano *TKafkaLoadInfo) bool { @@ -1423,22 +1953,25 @@ func (p *TKafkaLoadInfo) Field4DeepEqual(src map[string]string) bool { } type TRoutineLoadTask struct { - Type types.TLoadSourceType `thrift:"type,1,required" frugal:"1,required,TLoadSourceType" json:"type"` - JobId int64 `thrift:"job_id,2,required" frugal:"2,required,i64" json:"job_id"` - Id *types.TUniqueId `thrift:"id,3,required" frugal:"3,required,types.TUniqueId" json:"id"` - TxnId int64 `thrift:"txn_id,4,required" frugal:"4,required,i64" json:"txn_id"` - AuthCode int64 `thrift:"auth_code,5,required" frugal:"5,required,i64" json:"auth_code"` - Db *string `thrift:"db,6,optional" frugal:"6,optional,string" json:"db,omitempty"` - Tbl *string `thrift:"tbl,7,optional" frugal:"7,optional,string" json:"tbl,omitempty"` - Label *string `thrift:"label,8,optional" frugal:"8,optional,string" json:"label,omitempty"` - MaxIntervalS *int64 `thrift:"max_interval_s,9,optional" frugal:"9,optional,i64" json:"max_interval_s,omitempty"` - MaxBatchRows *int64 `thrift:"max_batch_rows,10,optional" frugal:"10,optional,i64" json:"max_batch_rows,omitempty"` - MaxBatchSize *int64 `thrift:"max_batch_size,11,optional" frugal:"11,optional,i64" json:"max_batch_size,omitempty"` - KafkaLoadInfo *TKafkaLoadInfo `thrift:"kafka_load_info,12,optional" frugal:"12,optional,TKafkaLoadInfo" json:"kafka_load_info,omitempty"` - Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,13,optional" frugal:"13,optional,palointernalservice.TExecPlanFragmentParams" json:"params,omitempty"` - Format *plannodes.TFileFormatType `thrift:"format,14,optional" frugal:"14,optional,TFileFormatType" json:"format,omitempty"` - PipelineParams *palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,15,optional" frugal:"15,optional,palointernalservice.TPipelineFragmentParams" json:"pipeline_params,omitempty"` - IsMultiTable *bool `thrift:"is_multi_table,16,optional" frugal:"16,optional,bool" json:"is_multi_table,omitempty"` + Type types.TLoadSourceType `thrift:"type,1,required" frugal:"1,required,TLoadSourceType" json:"type"` + JobId int64 `thrift:"job_id,2,required" frugal:"2,required,i64" json:"job_id"` + Id *types.TUniqueId `thrift:"id,3,required" frugal:"3,required,types.TUniqueId" json:"id"` + TxnId int64 `thrift:"txn_id,4,required" frugal:"4,required,i64" json:"txn_id"` + AuthCode int64 `thrift:"auth_code,5,required" frugal:"5,required,i64" json:"auth_code"` + Db *string `thrift:"db,6,optional" frugal:"6,optional,string" json:"db,omitempty"` + Tbl *string `thrift:"tbl,7,optional" frugal:"7,optional,string" json:"tbl,omitempty"` + Label *string `thrift:"label,8,optional" frugal:"8,optional,string" json:"label,omitempty"` + MaxIntervalS *int64 `thrift:"max_interval_s,9,optional" frugal:"9,optional,i64" json:"max_interval_s,omitempty"` + MaxBatchRows *int64 `thrift:"max_batch_rows,10,optional" frugal:"10,optional,i64" json:"max_batch_rows,omitempty"` + MaxBatchSize *int64 `thrift:"max_batch_size,11,optional" frugal:"11,optional,i64" json:"max_batch_size,omitempty"` + KafkaLoadInfo *TKafkaLoadInfo `thrift:"kafka_load_info,12,optional" frugal:"12,optional,TKafkaLoadInfo" json:"kafka_load_info,omitempty"` + Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,13,optional" frugal:"13,optional,palointernalservice.TExecPlanFragmentParams" json:"params,omitempty"` + Format *plannodes.TFileFormatType `thrift:"format,14,optional" frugal:"14,optional,TFileFormatType" json:"format,omitempty"` + PipelineParams *palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,15,optional" frugal:"15,optional,palointernalservice.TPipelineFragmentParams" json:"pipeline_params,omitempty"` + IsMultiTable *bool `thrift:"is_multi_table,16,optional" frugal:"16,optional,bool" json:"is_multi_table,omitempty"` + MemtableOnSinkNode *bool `thrift:"memtable_on_sink_node,17,optional" frugal:"17,optional,bool" json:"memtable_on_sink_node,omitempty"` + QualifiedUser *string `thrift:"qualified_user,18,optional" frugal:"18,optional,string" json:"qualified_user,omitempty"` + CloudCluster *string `thrift:"cloud_cluster,19,optional" frugal:"19,optional,string" json:"cloud_cluster,omitempty"` } func NewTRoutineLoadTask() *TRoutineLoadTask { @@ -1446,7 +1979,6 @@ func NewTRoutineLoadTask() *TRoutineLoadTask { } func (p *TRoutineLoadTask) InitDefault() { - *p = TRoutineLoadTask{} } func (p *TRoutineLoadTask) GetType() (v types.TLoadSourceType) { @@ -1572,6 +2104,33 @@ func (p *TRoutineLoadTask) GetIsMultiTable() (v bool) { } return *p.IsMultiTable } + +var TRoutineLoadTask_MemtableOnSinkNode_DEFAULT bool + +func (p *TRoutineLoadTask) GetMemtableOnSinkNode() (v bool) { + if !p.IsSetMemtableOnSinkNode() { + return TRoutineLoadTask_MemtableOnSinkNode_DEFAULT + } + return *p.MemtableOnSinkNode +} + +var TRoutineLoadTask_QualifiedUser_DEFAULT string + +func (p *TRoutineLoadTask) GetQualifiedUser() (v string) { + if !p.IsSetQualifiedUser() { + return TRoutineLoadTask_QualifiedUser_DEFAULT + } + return *p.QualifiedUser +} + +var TRoutineLoadTask_CloudCluster_DEFAULT string + +func (p *TRoutineLoadTask) GetCloudCluster() (v string) { + if !p.IsSetCloudCluster() { + return TRoutineLoadTask_CloudCluster_DEFAULT + } + return *p.CloudCluster +} func (p *TRoutineLoadTask) SetType(val types.TLoadSourceType) { p.Type = val } @@ -1620,6 +2179,15 @@ func (p *TRoutineLoadTask) SetPipelineParams(val *palointernalservice.TPipelineF func (p *TRoutineLoadTask) SetIsMultiTable(val *bool) { p.IsMultiTable = val } +func (p *TRoutineLoadTask) SetMemtableOnSinkNode(val *bool) { + p.MemtableOnSinkNode = val +} +func (p *TRoutineLoadTask) SetQualifiedUser(val *string) { + p.QualifiedUser = val +} +func (p *TRoutineLoadTask) SetCloudCluster(val *string) { + p.CloudCluster = val +} var fieldIDToName_TRoutineLoadTask = map[int16]string{ 1: "type", @@ -1638,6 +2206,9 @@ var fieldIDToName_TRoutineLoadTask = map[int16]string{ 14: "format", 15: "pipeline_params", 16: "is_multi_table", + 17: "memtable_on_sink_node", + 18: "qualified_user", + 19: "cloud_cluster", } func (p *TRoutineLoadTask) IsSetId() bool { @@ -1688,6 +2259,18 @@ func (p *TRoutineLoadTask) IsSetIsMultiTable() bool { return p.IsMultiTable != nil } +func (p *TRoutineLoadTask) IsSetMemtableOnSinkNode() bool { + return p.MemtableOnSinkNode != nil +} + +func (p *TRoutineLoadTask) IsSetQualifiedUser() bool { + return p.QualifiedUser != nil +} + +func (p *TRoutineLoadTask) IsSetCloudCluster() bool { + return p.CloudCluster != nil +} + func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -1718,10 +2301,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -1729,10 +2310,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetJobId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -1740,10 +2319,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -1751,10 +2328,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTxnId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -1762,127 +2337,126 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetAuthCode = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRUCT { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I32 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRUCT { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.BOOL { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.STRING { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.STRING { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1934,143 +2508,201 @@ RequiredFieldNotSetError: } func (p *TRoutineLoadTask) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TLoadSourceType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = types.TLoadSourceType(v) + _field = types.TLoadSourceType(v) } + p.Type = _field return nil } - func (p *TRoutineLoadTask) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.JobId = v + _field = v } + p.JobId = _field return nil } - func (p *TRoutineLoadTask) ReadField3(iprot thrift.TProtocol) error { - p.Id = types.NewTUniqueId() - if err := p.Id.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.Id = _field return nil } - func (p *TRoutineLoadTask) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnId = v + _field = v } + p.TxnId = _field return nil } - func (p *TRoutineLoadTask) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AuthCode = v + _field = v } + p.AuthCode = _field return nil } - func (p *TRoutineLoadTask) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Db = _field return nil } - func (p *TRoutineLoadTask) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Tbl = &v + _field = &v } + p.Tbl = _field return nil } - func (p *TRoutineLoadTask) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Label = &v + _field = &v } + p.Label = _field return nil } - func (p *TRoutineLoadTask) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxIntervalS = &v + _field = &v } + p.MaxIntervalS = _field return nil } - func (p *TRoutineLoadTask) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxBatchRows = &v + _field = &v } + p.MaxBatchRows = _field return nil } - func (p *TRoutineLoadTask) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxBatchSize = &v + _field = &v } + p.MaxBatchSize = _field return nil } - func (p *TRoutineLoadTask) ReadField12(iprot thrift.TProtocol) error { - p.KafkaLoadInfo = NewTKafkaLoadInfo() - if err := p.KafkaLoadInfo.Read(iprot); err != nil { + _field := NewTKafkaLoadInfo() + if err := _field.Read(iprot); err != nil { return err } + p.KafkaLoadInfo = _field return nil } - func (p *TRoutineLoadTask) ReadField13(iprot thrift.TProtocol) error { - p.Params = palointernalservice.NewTExecPlanFragmentParams() - if err := p.Params.Read(iprot); err != nil { + _field := palointernalservice.NewTExecPlanFragmentParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } - func (p *TRoutineLoadTask) ReadField14(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileFormatType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := plannodes.TFileFormatType(v) - p.Format = &tmp + _field = &tmp } + p.Format = _field return nil } - func (p *TRoutineLoadTask) ReadField15(iprot thrift.TProtocol) error { - p.PipelineParams = palointernalservice.NewTPipelineFragmentParams() - if err := p.PipelineParams.Read(iprot); err != nil { + _field := palointernalservice.NewTPipelineFragmentParams() + if err := _field.Read(iprot); err != nil { return err } + p.PipelineParams = _field return nil } - func (p *TRoutineLoadTask) ReadField16(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsMultiTable = _field + return nil +} +func (p *TRoutineLoadTask) ReadField17(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsMultiTable = &v + _field = &v } + p.MemtableOnSinkNode = _field + return nil +} +func (p *TRoutineLoadTask) ReadField18(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.QualifiedUser = _field + return nil +} +func (p *TRoutineLoadTask) ReadField19(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CloudCluster = _field return nil } @@ -2144,7 +2776,18 @@ func (p *TRoutineLoadTask) Write(oprot thrift.TProtocol) (err error) { fieldId = 16 goto WriteFieldError } - + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2457,11 +3100,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } +func (p *TRoutineLoadTask) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetMemtableOnSinkNode() { + if err = oprot.WriteFieldBegin("memtable_on_sink_node", thrift.BOOL, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.MemtableOnSinkNode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TRoutineLoadTask) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetQualifiedUser() { + if err = oprot.WriteFieldBegin("qualified_user", thrift.STRING, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.QualifiedUser); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TRoutineLoadTask) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetCloudCluster() { + if err = oprot.WriteFieldBegin("cloud_cluster", thrift.STRING, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CloudCluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + func (p *TRoutineLoadTask) String() string { if p == nil { return "" } return fmt.Sprintf("TRoutineLoadTask(%+v)", *p) + } func (p *TRoutineLoadTask) DeepEqual(ano *TRoutineLoadTask) bool { @@ -2518,6 +3219,15 @@ func (p *TRoutineLoadTask) DeepEqual(ano *TRoutineLoadTask) bool { if !p.Field16DeepEqual(ano.IsMultiTable) { return false } + if !p.Field17DeepEqual(ano.MemtableOnSinkNode) { + return false + } + if !p.Field18DeepEqual(ano.QualifiedUser) { + return false + } + if !p.Field19DeepEqual(ano.CloudCluster) { + return false + } return true } @@ -2673,6 +3383,42 @@ func (p *TRoutineLoadTask) Field16DeepEqual(src *bool) bool { } return true } +func (p *TRoutineLoadTask) Field17DeepEqual(src *bool) bool { + + if p.MemtableOnSinkNode == src { + return true + } else if p.MemtableOnSinkNode == nil || src == nil { + return false + } + if *p.MemtableOnSinkNode != *src { + return false + } + return true +} +func (p *TRoutineLoadTask) Field18DeepEqual(src *string) bool { + + if p.QualifiedUser == src { + return true + } else if p.QualifiedUser == nil || src == nil { + return false + } + if strings.Compare(*p.QualifiedUser, *src) != 0 { + return false + } + return true +} +func (p *TRoutineLoadTask) Field19DeepEqual(src *string) bool { + + if p.CloudCluster == src { + return true + } else if p.CloudCluster == nil || src == nil { + return false + } + if strings.Compare(*p.CloudCluster, *src) != 0 { + return false + } + return true +} type TKafkaMetaProxyRequest struct { KafkaInfo *TKafkaLoadInfo `thrift:"kafka_info,1,optional" frugal:"1,optional,TKafkaLoadInfo" json:"kafka_info,omitempty"` @@ -2683,7 +3429,6 @@ func NewTKafkaMetaProxyRequest() *TKafkaMetaProxyRequest { } func (p *TKafkaMetaProxyRequest) InitDefault() { - *p = TKafkaMetaProxyRequest{} } var TKafkaMetaProxyRequest_KafkaInfo_DEFAULT *TKafkaLoadInfo @@ -2730,17 +3475,14 @@ func (p *TKafkaMetaProxyRequest) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2766,10 +3508,11 @@ ReadStructEndError: } func (p *TKafkaMetaProxyRequest) ReadField1(iprot thrift.TProtocol) error { - p.KafkaInfo = NewTKafkaLoadInfo() - if err := p.KafkaInfo.Read(iprot); err != nil { + _field := NewTKafkaLoadInfo() + if err := _field.Read(iprot); err != nil { return err } + p.KafkaInfo = _field return nil } @@ -2783,7 +3526,6 @@ func (p *TKafkaMetaProxyRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2826,6 +3568,7 @@ func (p *TKafkaMetaProxyRequest) String() string { return "" } return fmt.Sprintf("TKafkaMetaProxyRequest(%+v)", *p) + } func (p *TKafkaMetaProxyRequest) DeepEqual(ano *TKafkaMetaProxyRequest) bool { @@ -2857,7 +3600,6 @@ func NewTKafkaMetaProxyResult_() *TKafkaMetaProxyResult_ { } func (p *TKafkaMetaProxyResult_) InitDefault() { - *p = TKafkaMetaProxyResult_{} } var TKafkaMetaProxyResult__PartitionIds_DEFAULT []int32 @@ -2904,17 +3646,14 @@ func (p *TKafkaMetaProxyResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2944,8 +3683,9 @@ func (p *TKafkaMetaProxyResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.PartitionIds = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -2953,11 +3693,12 @@ func (p *TKafkaMetaProxyResult_) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.PartitionIds = append(p.PartitionIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionIds = _field return nil } @@ -2971,7 +3712,6 @@ func (p *TKafkaMetaProxyResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3022,6 +3762,7 @@ func (p *TKafkaMetaProxyResult_) String() string { return "" } return fmt.Sprintf("TKafkaMetaProxyResult_(%+v)", *p) + } func (p *TKafkaMetaProxyResult_) DeepEqual(ano *TKafkaMetaProxyResult_) bool { @@ -3059,7 +3800,6 @@ func NewTProxyRequest() *TProxyRequest { } func (p *TProxyRequest) InitDefault() { - *p = TProxyRequest{} } var TProxyRequest_KafkaMetaRequest_DEFAULT *TKafkaMetaProxyRequest @@ -3106,17 +3846,14 @@ func (p *TProxyRequest) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3142,10 +3879,11 @@ ReadStructEndError: } func (p *TProxyRequest) ReadField1(iprot thrift.TProtocol) error { - p.KafkaMetaRequest = NewTKafkaMetaProxyRequest() - if err := p.KafkaMetaRequest.Read(iprot); err != nil { + _field := NewTKafkaMetaProxyRequest() + if err := _field.Read(iprot); err != nil { return err } + p.KafkaMetaRequest = _field return nil } @@ -3159,7 +3897,6 @@ func (p *TProxyRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3202,6 +3939,7 @@ func (p *TProxyRequest) String() string { return "" } return fmt.Sprintf("TProxyRequest(%+v)", *p) + } func (p *TProxyRequest) DeepEqual(ano *TProxyRequest) bool { @@ -3234,7 +3972,6 @@ func NewTProxyResult_() *TProxyResult_ { } func (p *TProxyResult_) InitDefault() { - *p = TProxyResult_{} } var TProxyResult__Status_DEFAULT *status.TStatus @@ -3300,27 +4037,22 @@ func (p *TProxyResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3352,18 +4084,19 @@ RequiredFieldNotSetError: } func (p *TProxyResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } - func (p *TProxyResult_) ReadField2(iprot thrift.TProtocol) error { - p.KafkaMetaResult_ = NewTKafkaMetaProxyResult_() - if err := p.KafkaMetaResult_.Read(iprot); err != nil { + _field := NewTKafkaMetaProxyResult_() + if err := _field.Read(iprot); err != nil { return err } + p.KafkaMetaResult_ = _field return nil } @@ -3381,7 +4114,6 @@ func (p *TProxyResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3441,6 +4173,7 @@ func (p *TProxyResult_) String() string { return "" } return fmt.Sprintf("TProxyResult_(%+v)", *p) + } func (p *TProxyResult_) DeepEqual(ano *TProxyResult_) bool { @@ -3500,7 +4233,6 @@ func NewTStreamLoadRecord() *TStreamLoadRecord { } func (p *TStreamLoadRecord) InitDefault() { - *p = TStreamLoadRecord{} } var TStreamLoadRecord_Cluster_DEFAULT string @@ -3741,10 +4473,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -3752,10 +4482,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -3763,10 +4491,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -3774,10 +4500,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { @@ -3785,20 +4509,16 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTbl = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { @@ -3806,10 +4526,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLabel = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { @@ -3817,10 +4535,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { @@ -3828,30 +4544,24 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetMessage = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRING { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { @@ -3859,10 +4569,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTotalRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I64 { @@ -3870,10 +4578,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLoadedRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I64 { @@ -3881,10 +4587,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFilteredRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.I64 { @@ -3892,10 +4596,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUnselectedRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.I64 { @@ -3903,10 +4605,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLoadBytes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.I64 { @@ -3914,10 +4614,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStartTime = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.I64 { @@ -3925,27 +4623,22 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFinishTime = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.STRING { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4042,173 +4735,212 @@ RequiredFieldNotSetError: } func (p *TStreamLoadRecord) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Cluster = &v + _field = &v } + p.Cluster = _field return nil } - func (p *TStreamLoadRecord) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TStreamLoadRecord) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = v } + p.Passwd = _field return nil } - func (p *TStreamLoadRecord) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = v } + p.Db = _field return nil } - func (p *TStreamLoadRecord) ReadField5(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Tbl = v + _field = v } + p.Tbl = _field return nil } - func (p *TStreamLoadRecord) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.UserIp = _field return nil } - func (p *TStreamLoadRecord) ReadField7(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Label = v + _field = v } + p.Label = _field return nil } - func (p *TStreamLoadRecord) ReadField8(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Status = v + _field = v } + p.Status = _field return nil } - func (p *TStreamLoadRecord) ReadField9(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Message = v + _field = v } + p.Message = _field return nil } - func (p *TStreamLoadRecord) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Url = &v + _field = &v } + p.Url = _field return nil } - func (p *TStreamLoadRecord) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AuthCode = &v + _field = &v } + p.AuthCode = _field return nil } - func (p *TStreamLoadRecord) ReadField12(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TotalRows = v + _field = v } + p.TotalRows = _field return nil } - func (p *TStreamLoadRecord) ReadField13(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LoadedRows = v + _field = v } + p.LoadedRows = _field return nil } - func (p *TStreamLoadRecord) ReadField14(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FilteredRows = v + _field = v } + p.FilteredRows = _field return nil } - func (p *TStreamLoadRecord) ReadField15(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.UnselectedRows = v + _field = v } + p.UnselectedRows = _field return nil } - func (p *TStreamLoadRecord) ReadField16(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LoadBytes = v + _field = v } + p.LoadBytes = _field return nil } - func (p *TStreamLoadRecord) ReadField17(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.StartTime = v + _field = v } + p.StartTime = _field return nil } - func (p *TStreamLoadRecord) ReadField18(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FinishTime = v + _field = v } + p.FinishTime = _field return nil } - func (p *TStreamLoadRecord) ReadField19(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Comment = &v + _field = &v } + p.Comment = _field return nil } @@ -4294,7 +5026,6 @@ func (p *TStreamLoadRecord) Write(oprot thrift.TProtocol) (err error) { fieldId = 19 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4651,6 +5382,7 @@ func (p *TStreamLoadRecord) String() string { return "" } return fmt.Sprintf("TStreamLoadRecord(%+v)", *p) + } func (p *TStreamLoadRecord) DeepEqual(ano *TStreamLoadRecord) bool { @@ -4887,7 +5619,6 @@ func NewTStreamLoadRecordResult_() *TStreamLoadRecordResult_ { } func (p *TStreamLoadRecordResult_) InitDefault() { - *p = TStreamLoadRecordResult_{} } func (p *TStreamLoadRecordResult_) GetStreamLoadRecord() (v map[string]*TStreamLoadRecord) { @@ -4927,17 +5658,14 @@ func (p *TStreamLoadRecordResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStreamLoadRecord = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4973,7 +5701,8 @@ func (p *TStreamLoadRecordResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.StreamLoadRecord = make(map[string]*TStreamLoadRecord, size) + _field := make(map[string]*TStreamLoadRecord, size) + values := make([]TStreamLoadRecord, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -4981,16 +5710,19 @@ func (p *TStreamLoadRecordResult_) ReadField1(iprot thrift.TProtocol) error { } else { _key = v } - _val := NewTStreamLoadRecord() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.StreamLoadRecord[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.StreamLoadRecord = _field return nil } @@ -5004,7 +5736,6 @@ func (p *TStreamLoadRecordResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5031,11 +5762,9 @@ func (p *TStreamLoadRecordResult_) writeField1(oprot thrift.TProtocol) (err erro return err } for k, v := range p.StreamLoadRecord { - if err := oprot.WriteString(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -5058,6 +5787,7 @@ func (p *TStreamLoadRecordResult_) String() string { return "" } return fmt.Sprintf("TStreamLoadRecordResult_(%+v)", *p) + } func (p *TStreamLoadRecordResult_) DeepEqual(ano *TStreamLoadRecordResult_) bool { @@ -5097,7 +5827,6 @@ func NewTDiskTrashInfo() *TDiskTrashInfo { } func (p *TDiskTrashInfo) InitDefault() { - *p = TDiskTrashInfo{} } func (p *TDiskTrashInfo) GetRootPath() (v string) { @@ -5155,10 +5884,8 @@ func (p *TDiskTrashInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRootPath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -5166,10 +5893,8 @@ func (p *TDiskTrashInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetState = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -5177,17 +5902,14 @@ func (p *TDiskTrashInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTrashUsedCapacity = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5229,29 +5951,36 @@ RequiredFieldNotSetError: } func (p *TDiskTrashInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RootPath = v + _field = v } + p.RootPath = _field return nil } - func (p *TDiskTrashInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.State = v + _field = v } + p.State = _field return nil } - func (p *TDiskTrashInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TrashUsedCapacity = v + _field = v } + p.TrashUsedCapacity = _field return nil } @@ -5273,7 +6002,6 @@ func (p *TDiskTrashInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5348,6 +6076,7 @@ func (p *TDiskTrashInfo) String() string { return "" } return fmt.Sprintf("TDiskTrashInfo(%+v)", *p) + } func (p *TDiskTrashInfo) DeepEqual(ano *TDiskTrashInfo) bool { @@ -5400,7 +6129,6 @@ func NewTCheckStorageFormatResult_() *TCheckStorageFormatResult_ { } func (p *TCheckStorageFormatResult_) InitDefault() { - *p = TCheckStorageFormatResult_{} } var TCheckStorageFormatResult__V1Tablets_DEFAULT []int64 @@ -5464,27 +6192,22 @@ func (p *TCheckStorageFormatResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5514,8 +6237,9 @@ func (p *TCheckStorageFormatResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.V1Tablets = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -5523,21 +6247,22 @@ func (p *TCheckStorageFormatResult_) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.V1Tablets = append(p.V1Tablets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.V1Tablets = _field return nil } - func (p *TCheckStorageFormatResult_) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.V2Tablets = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -5545,11 +6270,12 @@ func (p *TCheckStorageFormatResult_) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.V2Tablets = append(p.V2Tablets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.V2Tablets = _field return nil } @@ -5567,7 +6293,6 @@ func (p *TCheckStorageFormatResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5645,6 +6370,7 @@ func (p *TCheckStorageFormatResult_) String() string { return "" } return fmt.Sprintf("TCheckStorageFormatResult_(%+v)", *p) + } func (p *TCheckStorageFormatResult_) DeepEqual(ano *TCheckStorageFormatResult_) bool { @@ -5689,168 +6415,53 @@ func (p *TCheckStorageFormatResult_) Field2DeepEqual(src []int64) bool { return true } -type TIngestBinlogRequest struct { - TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"` - RemoteTabletId *int64 `thrift:"remote_tablet_id,2,optional" frugal:"2,optional,i64" json:"remote_tablet_id,omitempty"` - BinlogVersion *int64 `thrift:"binlog_version,3,optional" frugal:"3,optional,i64" json:"binlog_version,omitempty"` - RemoteHost *string `thrift:"remote_host,4,optional" frugal:"4,optional,string" json:"remote_host,omitempty"` - RemotePort *string `thrift:"remote_port,5,optional" frugal:"5,optional,string" json:"remote_port,omitempty"` - PartitionId *int64 `thrift:"partition_id,6,optional" frugal:"6,optional,i64" json:"partition_id,omitempty"` - LocalTabletId *int64 `thrift:"local_tablet_id,7,optional" frugal:"7,optional,i64" json:"local_tablet_id,omitempty"` - LoadId *types.TUniqueId `thrift:"load_id,8,optional" frugal:"8,optional,types.TUniqueId" json:"load_id,omitempty"` -} - -func NewTIngestBinlogRequest() *TIngestBinlogRequest { - return &TIngestBinlogRequest{} -} - -func (p *TIngestBinlogRequest) InitDefault() { - *p = TIngestBinlogRequest{} -} - -var TIngestBinlogRequest_TxnId_DEFAULT int64 - -func (p *TIngestBinlogRequest) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TIngestBinlogRequest_TxnId_DEFAULT - } - return *p.TxnId -} - -var TIngestBinlogRequest_RemoteTabletId_DEFAULT int64 - -func (p *TIngestBinlogRequest) GetRemoteTabletId() (v int64) { - if !p.IsSetRemoteTabletId() { - return TIngestBinlogRequest_RemoteTabletId_DEFAULT - } - return *p.RemoteTabletId -} - -var TIngestBinlogRequest_BinlogVersion_DEFAULT int64 - -func (p *TIngestBinlogRequest) GetBinlogVersion() (v int64) { - if !p.IsSetBinlogVersion() { - return TIngestBinlogRequest_BinlogVersion_DEFAULT - } - return *p.BinlogVersion -} - -var TIngestBinlogRequest_RemoteHost_DEFAULT string - -func (p *TIngestBinlogRequest) GetRemoteHost() (v string) { - if !p.IsSetRemoteHost() { - return TIngestBinlogRequest_RemoteHost_DEFAULT - } - return *p.RemoteHost -} - -var TIngestBinlogRequest_RemotePort_DEFAULT string - -func (p *TIngestBinlogRequest) GetRemotePort() (v string) { - if !p.IsSetRemotePort() { - return TIngestBinlogRequest_RemotePort_DEFAULT - } - return *p.RemotePort -} - -var TIngestBinlogRequest_PartitionId_DEFAULT int64 - -func (p *TIngestBinlogRequest) GetPartitionId() (v int64) { - if !p.IsSetPartitionId() { - return TIngestBinlogRequest_PartitionId_DEFAULT - } - return *p.PartitionId -} - -var TIngestBinlogRequest_LocalTabletId_DEFAULT int64 - -func (p *TIngestBinlogRequest) GetLocalTabletId() (v int64) { - if !p.IsSetLocalTabletId() { - return TIngestBinlogRequest_LocalTabletId_DEFAULT - } - return *p.LocalTabletId -} - -var TIngestBinlogRequest_LoadId_DEFAULT *types.TUniqueId - -func (p *TIngestBinlogRequest) GetLoadId() (v *types.TUniqueId) { - if !p.IsSetLoadId() { - return TIngestBinlogRequest_LoadId_DEFAULT - } - return p.LoadId -} -func (p *TIngestBinlogRequest) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TIngestBinlogRequest) SetRemoteTabletId(val *int64) { - p.RemoteTabletId = val -} -func (p *TIngestBinlogRequest) SetBinlogVersion(val *int64) { - p.BinlogVersion = val -} -func (p *TIngestBinlogRequest) SetRemoteHost(val *string) { - p.RemoteHost = val -} -func (p *TIngestBinlogRequest) SetRemotePort(val *string) { - p.RemotePort = val -} -func (p *TIngestBinlogRequest) SetPartitionId(val *int64) { - p.PartitionId = val -} -func (p *TIngestBinlogRequest) SetLocalTabletId(val *int64) { - p.LocalTabletId = val -} -func (p *TIngestBinlogRequest) SetLoadId(val *types.TUniqueId) { - p.LoadId = val +type TWarmUpCacheAsyncRequest struct { + Host string `thrift:"host,1,required" frugal:"1,required,string" json:"host"` + BrpcPort int32 `thrift:"brpc_port,2,required" frugal:"2,required,i32" json:"brpc_port"` + TabletIds []int64 `thrift:"tablet_ids,3,required" frugal:"3,required,list" json:"tablet_ids"` } -var fieldIDToName_TIngestBinlogRequest = map[int16]string{ - 1: "txn_id", - 2: "remote_tablet_id", - 3: "binlog_version", - 4: "remote_host", - 5: "remote_port", - 6: "partition_id", - 7: "local_tablet_id", - 8: "load_id", +func NewTWarmUpCacheAsyncRequest() *TWarmUpCacheAsyncRequest { + return &TWarmUpCacheAsyncRequest{} } -func (p *TIngestBinlogRequest) IsSetTxnId() bool { - return p.TxnId != nil +func (p *TWarmUpCacheAsyncRequest) InitDefault() { } -func (p *TIngestBinlogRequest) IsSetRemoteTabletId() bool { - return p.RemoteTabletId != nil +func (p *TWarmUpCacheAsyncRequest) GetHost() (v string) { + return p.Host } -func (p *TIngestBinlogRequest) IsSetBinlogVersion() bool { - return p.BinlogVersion != nil +func (p *TWarmUpCacheAsyncRequest) GetBrpcPort() (v int32) { + return p.BrpcPort } -func (p *TIngestBinlogRequest) IsSetRemoteHost() bool { - return p.RemoteHost != nil +func (p *TWarmUpCacheAsyncRequest) GetTabletIds() (v []int64) { + return p.TabletIds } - -func (p *TIngestBinlogRequest) IsSetRemotePort() bool { - return p.RemotePort != nil +func (p *TWarmUpCacheAsyncRequest) SetHost(val string) { + p.Host = val } - -func (p *TIngestBinlogRequest) IsSetPartitionId() bool { - return p.PartitionId != nil +func (p *TWarmUpCacheAsyncRequest) SetBrpcPort(val int32) { + p.BrpcPort = val } - -func (p *TIngestBinlogRequest) IsSetLocalTabletId() bool { - return p.LocalTabletId != nil +func (p *TWarmUpCacheAsyncRequest) SetTabletIds(val []int64) { + p.TabletIds = val } -func (p *TIngestBinlogRequest) IsSetLoadId() bool { - return p.LoadId != nil +var fieldIDToName_TWarmUpCacheAsyncRequest = map[int16]string{ + 1: "host", + 2: "brpc_port", + 3: "tablet_ids", } -func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TWarmUpCacheAsyncRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetHost bool = false + var issetBrpcPort bool = false + var issetTabletIds bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -5867,91 +6478,37 @@ func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetHost = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetBrpcPort = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetTabletIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5960,13 +6517,27 @@ func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } + if !issetHost { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetBrpcPort { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTabletIds { + fieldId = 3 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -5974,82 +6545,59 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncRequest[fieldId])) } -func (p *TIngestBinlogRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = &v - } - return nil -} - -func (p *TIngestBinlogRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.RemoteTabletId = &v - } - return nil -} - -func (p *TIngestBinlogRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.BinlogVersion = &v - } - return nil -} +func (p *TWarmUpCacheAsyncRequest) ReadField1(iprot thrift.TProtocol) error { -func (p *TIngestBinlogRequest) ReadField4(iprot thrift.TProtocol) error { + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RemoteHost = &v + _field = v } + p.Host = _field return nil } +func (p *TWarmUpCacheAsyncRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TIngestBinlogRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.RemotePort = &v + _field = v } + p.BrpcPort = _field return nil } - -func (p *TIngestBinlogRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TWarmUpCacheAsyncRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.PartitionId = &v } - return nil -} + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { -func (p *TIngestBinlogRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LocalTabletId = &v - } - return nil -} + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } -func (p *TIngestBinlogRequest) ReadField8(iprot thrift.TProtocol) error { - p.LoadId = types.NewTUniqueId() - if err := p.LoadId.Read(iprot); err != nil { + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletIds = _field return nil } -func (p *TIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TWarmUpCacheAsyncRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TIngestBinlogRequest"); err != nil { + if err = oprot.WriteStructBegin("TWarmUpCacheAsyncRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -6065,27 +6613,6 @@ func (p *TIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6104,17 +6631,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TIngestBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TWarmUpCacheAsyncRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("host", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Host); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -6123,17 +6648,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TIngestBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetRemoteTabletId() { - if err = oprot.WriteFieldBegin("remote_tablet_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.RemoteTabletId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TWarmUpCacheAsyncRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("brpc_port", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.BrpcPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -6142,17 +6665,23 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TIngestBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetBinlogVersion() { - if err = oprot.WriteFieldBegin("binlog_version", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.BinlogVersion); err != nil { +func (p *TWarmUpCacheAsyncRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { + return err + } + for _, v := range p.TabletIds { + if err := oprot.WriteI64(v); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -6161,269 +6690,96 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TIngestBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetRemoteHost() { - if err = oprot.WriteFieldBegin("remote_host", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.RemoteHost); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TWarmUpCacheAsyncRequest) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} + return fmt.Sprintf("TWarmUpCacheAsyncRequest(%+v)", *p) -func (p *TIngestBinlogRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetRemotePort() { - if err = oprot.WriteFieldBegin("remote_port", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.RemotePort); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TIngestBinlogRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetPartitionId() { - if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.PartitionId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TWarmUpCacheAsyncRequest) DeepEqual(ano *TWarmUpCacheAsyncRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + if !p.Field1DeepEqual(ano.Host) { + return false + } + if !p.Field2DeepEqual(ano.BrpcPort) { + return false + } + if !p.Field3DeepEqual(ano.TabletIds) { + return false + } + return true } -func (p *TIngestBinlogRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetLocalTabletId() { - if err = oprot.WriteFieldBegin("local_tablet_id", thrift.I64, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LocalTabletId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TWarmUpCacheAsyncRequest) Field1DeepEqual(src string) bool { + + if strings.Compare(p.Host, src) != 0 { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return true } +func (p *TWarmUpCacheAsyncRequest) Field2DeepEqual(src int32) bool { -func (p *TIngestBinlogRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadId() { - if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 8); err != nil { - goto WriteFieldBeginError - } - if err := p.LoadId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TIngestBinlogRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TIngestBinlogRequest(%+v)", *p) -} - -func (p *TIngestBinlogRequest) DeepEqual(ano *TIngestBinlogRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.TxnId) { - return false - } - if !p.Field2DeepEqual(ano.RemoteTabletId) { - return false - } - if !p.Field3DeepEqual(ano.BinlogVersion) { - return false - } - if !p.Field4DeepEqual(ano.RemoteHost) { - return false - } - if !p.Field5DeepEqual(ano.RemotePort) { - return false - } - if !p.Field6DeepEqual(ano.PartitionId) { - return false - } - if !p.Field7DeepEqual(ano.LocalTabletId) { - return false - } - if !p.Field8DeepEqual(ano.LoadId) { - return false - } - return true -} - -func (p *TIngestBinlogRequest) Field1DeepEqual(src *int64) bool { - - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false - } - if *p.TxnId != *src { - return false - } - return true -} -func (p *TIngestBinlogRequest) Field2DeepEqual(src *int64) bool { - - if p.RemoteTabletId == src { - return true - } else if p.RemoteTabletId == nil || src == nil { - return false - } - if *p.RemoteTabletId != *src { - return false - } - return true -} -func (p *TIngestBinlogRequest) Field3DeepEqual(src *int64) bool { - - if p.BinlogVersion == src { - return true - } else if p.BinlogVersion == nil || src == nil { - return false - } - if *p.BinlogVersion != *src { - return false - } - return true -} -func (p *TIngestBinlogRequest) Field4DeepEqual(src *string) bool { - - if p.RemoteHost == src { - return true - } else if p.RemoteHost == nil || src == nil { - return false - } - if strings.Compare(*p.RemoteHost, *src) != 0 { - return false - } - return true -} -func (p *TIngestBinlogRequest) Field5DeepEqual(src *string) bool { - - if p.RemotePort == src { - return true - } else if p.RemotePort == nil || src == nil { - return false - } - if strings.Compare(*p.RemotePort, *src) != 0 { - return false - } - return true -} -func (p *TIngestBinlogRequest) Field6DeepEqual(src *int64) bool { - - if p.PartitionId == src { - return true - } else if p.PartitionId == nil || src == nil { - return false - } - if *p.PartitionId != *src { + if p.BrpcPort != src { return false } return true } -func (p *TIngestBinlogRequest) Field7DeepEqual(src *int64) bool { +func (p *TWarmUpCacheAsyncRequest) Field3DeepEqual(src []int64) bool { - if p.LocalTabletId == src { - return true - } else if p.LocalTabletId == nil || src == nil { - return false - } - if *p.LocalTabletId != *src { + if len(p.TabletIds) != len(src) { return false } - return true -} -func (p *TIngestBinlogRequest) Field8DeepEqual(src *types.TUniqueId) bool { - - if !p.LoadId.DeepEqual(src) { - return false + for i, v := range p.TabletIds { + _src := src[i] + if v != _src { + return false + } } return true } -type TIngestBinlogResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +type TWarmUpCacheAsyncResponse struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` } -func NewTIngestBinlogResult_() *TIngestBinlogResult_ { - return &TIngestBinlogResult_{} +func NewTWarmUpCacheAsyncResponse() *TWarmUpCacheAsyncResponse { + return &TWarmUpCacheAsyncResponse{} } -func (p *TIngestBinlogResult_) InitDefault() { - *p = TIngestBinlogResult_{} +func (p *TWarmUpCacheAsyncResponse) InitDefault() { } -var TIngestBinlogResult__Status_DEFAULT *status.TStatus +var TWarmUpCacheAsyncResponse_Status_DEFAULT *status.TStatus -func (p *TIngestBinlogResult_) GetStatus() (v *status.TStatus) { +func (p *TWarmUpCacheAsyncResponse) GetStatus() (v *status.TStatus) { if !p.IsSetStatus() { - return TIngestBinlogResult__Status_DEFAULT + return TWarmUpCacheAsyncResponse_Status_DEFAULT } return p.Status } -func (p *TIngestBinlogResult_) SetStatus(val *status.TStatus) { +func (p *TWarmUpCacheAsyncResponse) SetStatus(val *status.TStatus) { p.Status = val } -var fieldIDToName_TIngestBinlogResult_ = map[int16]string{ +var fieldIDToName_TWarmUpCacheAsyncResponse = map[int16]string{ 1: "status", } -func (p *TIngestBinlogResult_) IsSetStatus() bool { +func (p *TWarmUpCacheAsyncResponse) IsSetStatus() bool { return p.Status != nil } -func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TWarmUpCacheAsyncResponse) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -6444,17 +6800,15 @@ func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6463,13 +6817,17 @@ func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncResponse[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -6477,19 +6835,22 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncResponse[fieldId])) } -func (p *TIngestBinlogResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TWarmUpCacheAsyncResponse) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } -func (p *TIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TWarmUpCacheAsyncResponse) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TIngestBinlogResult"); err != nil { + if err = oprot.WriteStructBegin("TWarmUpCacheAsyncResponse"); err != nil { goto WriteStructBeginError } if p != nil { @@ -6497,7 +6858,6 @@ func (p *TIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6516,17 +6876,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TIngestBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TWarmUpCacheAsyncResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -6535,14 +6893,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TIngestBinlogResult_) String() string { +func (p *TWarmUpCacheAsyncResponse) String() string { if p == nil { return "" } - return fmt.Sprintf("TIngestBinlogResult_(%+v)", *p) + return fmt.Sprintf("TWarmUpCacheAsyncResponse(%+v)", *p) + } -func (p *TIngestBinlogResult_) DeepEqual(ano *TIngestBinlogResult_) bool { +func (p *TWarmUpCacheAsyncResponse) DeepEqual(ano *TWarmUpCacheAsyncResponse) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -6554,7 +6913,7 @@ func (p *TIngestBinlogResult_) DeepEqual(ano *TIngestBinlogResult_) bool { return true } -func (p *TIngestBinlogResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TWarmUpCacheAsyncResponse) Field1DeepEqual(src *status.TStatus) bool { if !p.Status.DeepEqual(src) { return false @@ -6562,1333 +6921,12966 @@ func (p *TIngestBinlogResult_) Field1DeepEqual(src *status.TStatus) bool { return true } -type BackendService interface { - ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error) - - CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error) - - TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error) - - SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error) - - MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error) - - ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error) - - PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error) - - SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error) - - GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error) - - EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error) - - GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error) - - GetTrashUsedCapacity(ctx context.Context) (r int64, err error) - - GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error) - - SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error) - - OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error) - - GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error) - - CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error) - - GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error) - - CleanTrash(ctx context.Context) (err error) - - CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error) - - IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error) +type TCheckWarmUpCacheAsyncRequest struct { + Tablets []int64 `thrift:"tablets,1,optional" frugal:"1,optional,list" json:"tablets,omitempty"` } -type BackendServiceClient struct { - c thrift.TClient +func NewTCheckWarmUpCacheAsyncRequest() *TCheckWarmUpCacheAsyncRequest { + return &TCheckWarmUpCacheAsyncRequest{} } -func NewBackendServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BackendServiceClient { - return &BackendServiceClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } +func (p *TCheckWarmUpCacheAsyncRequest) InitDefault() { } -func NewBackendServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BackendServiceClient { - return &BackendServiceClient{ - c: thrift.NewTStandardClient(iprot, oprot), +var TCheckWarmUpCacheAsyncRequest_Tablets_DEFAULT []int64 + +func (p *TCheckWarmUpCacheAsyncRequest) GetTablets() (v []int64) { + if !p.IsSetTablets() { + return TCheckWarmUpCacheAsyncRequest_Tablets_DEFAULT } + return p.Tablets +} +func (p *TCheckWarmUpCacheAsyncRequest) SetTablets(val []int64) { + p.Tablets = val } -func NewBackendServiceClient(c thrift.TClient) *BackendServiceClient { - return &BackendServiceClient{ - c: c, - } +var fieldIDToName_TCheckWarmUpCacheAsyncRequest = map[int16]string{ + 1: "tablets", } -func (p *BackendServiceClient) Client_() thrift.TClient { - return p.c +func (p *TCheckWarmUpCacheAsyncRequest) IsSetTablets() bool { + return p.Tablets != nil } -func (p *BackendServiceClient) ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error) { - var _args BackendServiceExecPlanFragmentArgs - _args.Params = params - var _result BackendServiceExecPlanFragmentResult - if err = p.Client_().Call(ctx, "exec_plan_fragment", &_args, &_result); err != nil { - return +func (p *TCheckWarmUpCacheAsyncRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error) { - var _args BackendServiceCancelPlanFragmentArgs - _args.Params = params - var _result BackendServiceCancelPlanFragmentResult - if err = p.Client_().Call(ctx, "cancel_plan_fragment", &_args, &_result); err != nil { - return + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error) { - var _args BackendServiceTransmitDataArgs - _args.Params = params - var _result BackendServiceTransmitDataResult - if err = p.Client_().Call(ctx, "transmit_data", &_args, &_result); err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return _result.GetSuccess(), nil + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceClient) SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error) { - var _args BackendServiceSubmitTasksArgs - _args.Tasks = tasks - var _result BackendServiceSubmitTasksResult - if err = p.Client_().Call(ctx, "submit_tasks", &_args, &_result); err != nil { - return + +func (p *TCheckWarmUpCacheAsyncRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error) { - var _args BackendServiceMakeSnapshotArgs - _args.SnapshotRequest = snapshotRequest - var _result BackendServiceMakeSnapshotResult - if err = p.Client_().Call(ctx, "make_snapshot", &_args, &_result); err != nil { - return + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error) { - var _args BackendServiceReleaseSnapshotArgs - _args.SnapshotPath = snapshotPath - var _result BackendServiceReleaseSnapshotResult - if err = p.Client_().Call(ctx, "release_snapshot", &_args, &_result); err != nil { - return + if err := iprot.ReadListEnd(); err != nil { + return err } - return _result.GetSuccess(), nil + p.Tablets = _field + return nil } -func (p *BackendServiceClient) PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error) { - var _args BackendServicePublishClusterStateArgs - _args.Request = request - var _result BackendServicePublishClusterStateResult - if err = p.Client_().Call(ctx, "publish_cluster_state", &_args, &_result); err != nil { - return + +func (p *TCheckWarmUpCacheAsyncRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCheckWarmUpCacheAsyncRequest"); err != nil { + goto WriteStructBeginError } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error) { - var _args BackendServiceSubmitExportTaskArgs - _args.Request = request - var _result BackendServiceSubmitExportTaskResult - if err = p.Client_().Call(ctx, "submit_export_task", &_args, &_result); err != nil { - return + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error) { - var _args BackendServiceGetExportStatusArgs - _args.TaskId = taskId - var _result BackendServiceGetExportStatusResult - if err = p.Client_().Call(ctx, "get_export_status", &_args, &_result); err != nil { - return + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error) { - var _args BackendServiceEraseExportTaskArgs - _args.TaskId = taskId - var _result BackendServiceEraseExportTaskResult - if err = p.Client_().Call(ctx, "erase_export_task", &_args, &_result); err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return _result.GetSuccess(), nil + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceClient) GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error) { - var _args BackendServiceGetTabletStatArgs - var _result BackendServiceGetTabletStatResult - if err = p.Client_().Call(ctx, "get_tablet_stat", &_args, &_result); err != nil { - return + +func (p *TCheckWarmUpCacheAsyncRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTablets() { + if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.Tablets)); err != nil { + return err + } + for _, v := range p.Tablets { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return _result.GetSuccess(), nil + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceClient) GetTrashUsedCapacity(ctx context.Context) (r int64, err error) { - var _args BackendServiceGetTrashUsedCapacityArgs - var _result BackendServiceGetTrashUsedCapacityResult - if err = p.Client_().Call(ctx, "get_trash_used_capacity", &_args, &_result); err != nil { - return + +func (p *TCheckWarmUpCacheAsyncRequest) String() string { + if p == nil { + return "" } - return _result.GetSuccess(), nil + return fmt.Sprintf("TCheckWarmUpCacheAsyncRequest(%+v)", *p) + } -func (p *BackendServiceClient) GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error) { - var _args BackendServiceGetDiskTrashUsedCapacityArgs - var _result BackendServiceGetDiskTrashUsedCapacityResult - if err = p.Client_().Call(ctx, "get_disk_trash_used_capacity", &_args, &_result); err != nil { - return + +func (p *TCheckWarmUpCacheAsyncRequest) DeepEqual(ano *TCheckWarmUpCacheAsyncRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error) { - var _args BackendServiceSubmitRoutineLoadTaskArgs - _args.Tasks = tasks - var _result BackendServiceSubmitRoutineLoadTaskResult - if err = p.Client_().Call(ctx, "submit_routine_load_task", &_args, &_result); err != nil { - return + if !p.Field1DeepEqual(ano.Tablets) { + return false } - return _result.GetSuccess(), nil + return true } -func (p *BackendServiceClient) OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error) { - var _args BackendServiceOpenScannerArgs - _args.Params = params - var _result BackendServiceOpenScannerResult - if err = p.Client_().Call(ctx, "open_scanner", &_args, &_result); err != nil { - return + +func (p *TCheckWarmUpCacheAsyncRequest) Field1DeepEqual(src []int64) bool { + + if len(p.Tablets) != len(src) { + return false } - return _result.GetSuccess(), nil -} -func (p *BackendServiceClient) GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error) { - var _args BackendServiceGetNextArgs - _args.Params = params - var _result BackendServiceGetNextResult - if err = p.Client_().Call(ctx, "get_next", &_args, &_result); err != nil { - return + for i, v := range p.Tablets { + _src := src[i] + if v != _src { + return false + } } - return _result.GetSuccess(), nil + return true } -func (p *BackendServiceClient) CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error) { - var _args BackendServiceCloseScannerArgs - _args.Params = params - var _result BackendServiceCloseScannerResult - if err = p.Client_().Call(ctx, "close_scanner", &_args, &_result); err != nil { - return + +type TCheckWarmUpCacheAsyncResponse struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + TaskDone map[int64]bool `thrift:"task_done,2,optional" frugal:"2,optional,map" json:"task_done,omitempty"` +} + +func NewTCheckWarmUpCacheAsyncResponse() *TCheckWarmUpCacheAsyncResponse { + return &TCheckWarmUpCacheAsyncResponse{} +} + +func (p *TCheckWarmUpCacheAsyncResponse) InitDefault() { +} + +var TCheckWarmUpCacheAsyncResponse_Status_DEFAULT *status.TStatus + +func (p *TCheckWarmUpCacheAsyncResponse) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TCheckWarmUpCacheAsyncResponse_Status_DEFAULT } - return _result.GetSuccess(), nil + return p.Status } -func (p *BackendServiceClient) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error) { + +var TCheckWarmUpCacheAsyncResponse_TaskDone_DEFAULT map[int64]bool + +func (p *TCheckWarmUpCacheAsyncResponse) GetTaskDone() (v map[int64]bool) { + if !p.IsSetTaskDone() { + return TCheckWarmUpCacheAsyncResponse_TaskDone_DEFAULT + } + return p.TaskDone +} +func (p *TCheckWarmUpCacheAsyncResponse) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TCheckWarmUpCacheAsyncResponse) SetTaskDone(val map[int64]bool) { + p.TaskDone = val +} + +var fieldIDToName_TCheckWarmUpCacheAsyncResponse = map[int16]string{ + 1: "status", + 2: "task_done", +} + +func (p *TCheckWarmUpCacheAsyncResponse) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCheckWarmUpCacheAsyncResponse) IsSetTaskDone() bool { + return p.TaskDone != nil +} + +func (p *TCheckWarmUpCacheAsyncResponse) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.MAP { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId])) +} + +func (p *TCheckWarmUpCacheAsyncResponse) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TCheckWarmUpCacheAsyncResponse) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int64]bool, size) + for i := 0; i < size; i++ { + var _key int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } + + var _val bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TaskDone = _field + return nil +} + +func (p *TCheckWarmUpCacheAsyncResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCheckWarmUpCacheAsyncResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TCheckWarmUpCacheAsyncResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TCheckWarmUpCacheAsyncResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskDone() { + if err = oprot.WriteFieldBegin("task_done", thrift.MAP, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.BOOL, len(p.TaskDone)); err != nil { + return err + } + for k, v := range p.TaskDone { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteBool(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TCheckWarmUpCacheAsyncResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCheckWarmUpCacheAsyncResponse(%+v)", *p) + +} + +func (p *TCheckWarmUpCacheAsyncResponse) DeepEqual(ano *TCheckWarmUpCacheAsyncResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.TaskDone) { + return false + } + return true +} + +func (p *TCheckWarmUpCacheAsyncResponse) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TCheckWarmUpCacheAsyncResponse) Field2DeepEqual(src map[int64]bool) bool { + + if len(p.TaskDone) != len(src) { + return false + } + for k, v := range p.TaskDone { + _src := src[k] + if v != _src { + return false + } + } + return true +} + +type TSyncLoadForTabletsRequest struct { + TabletIds []int64 `thrift:"tablet_ids,1,required" frugal:"1,required,list" json:"tablet_ids"` +} + +func NewTSyncLoadForTabletsRequest() *TSyncLoadForTabletsRequest { + return &TSyncLoadForTabletsRequest{} +} + +func (p *TSyncLoadForTabletsRequest) InitDefault() { +} + +func (p *TSyncLoadForTabletsRequest) GetTabletIds() (v []int64) { + return p.TabletIds +} +func (p *TSyncLoadForTabletsRequest) SetTabletIds(val []int64) { + p.TabletIds = val +} + +var fieldIDToName_TSyncLoadForTabletsRequest = map[int16]string{ + 1: "tablet_ids", +} + +func (p *TSyncLoadForTabletsRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetTabletIds bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTabletIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTabletIds { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSyncLoadForTabletsRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSyncLoadForTabletsRequest[fieldId])) +} + +func (p *TSyncLoadForTabletsRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TabletIds = _field + return nil +} + +func (p *TSyncLoadForTabletsRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSyncLoadForTabletsRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSyncLoadForTabletsRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { + return err + } + for _, v := range p.TabletIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSyncLoadForTabletsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSyncLoadForTabletsRequest(%+v)", *p) + +} + +func (p *TSyncLoadForTabletsRequest) DeepEqual(ano *TSyncLoadForTabletsRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TabletIds) { + return false + } + return true +} + +func (p *TSyncLoadForTabletsRequest) Field1DeepEqual(src []int64) bool { + + if len(p.TabletIds) != len(src) { + return false + } + for i, v := range p.TabletIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TSyncLoadForTabletsResponse struct { +} + +func NewTSyncLoadForTabletsResponse() *TSyncLoadForTabletsResponse { + return &TSyncLoadForTabletsResponse{} +} + +func (p *TSyncLoadForTabletsResponse) InitDefault() { +} + +var fieldIDToName_TSyncLoadForTabletsResponse = map[int16]string{} + +func (p *TSyncLoadForTabletsResponse) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSyncLoadForTabletsResponse) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TSyncLoadForTabletsResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSyncLoadForTabletsResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSyncLoadForTabletsResponse(%+v)", *p) + +} + +func (p *TSyncLoadForTabletsResponse) DeepEqual(ano *TSyncLoadForTabletsResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + return true +} + +type THotPartition struct { + PartitionId int64 `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"` + LastAccessTime int64 `thrift:"last_access_time,2,required" frugal:"2,required,i64" json:"last_access_time"` + QueryPerDay *int64 `thrift:"query_per_day,3,optional" frugal:"3,optional,i64" json:"query_per_day,omitempty"` + QueryPerWeek *int64 `thrift:"query_per_week,4,optional" frugal:"4,optional,i64" json:"query_per_week,omitempty"` +} + +func NewTHotPartition() *THotPartition { + return &THotPartition{} +} + +func (p *THotPartition) InitDefault() { +} + +func (p *THotPartition) GetPartitionId() (v int64) { + return p.PartitionId +} + +func (p *THotPartition) GetLastAccessTime() (v int64) { + return p.LastAccessTime +} + +var THotPartition_QueryPerDay_DEFAULT int64 + +func (p *THotPartition) GetQueryPerDay() (v int64) { + if !p.IsSetQueryPerDay() { + return THotPartition_QueryPerDay_DEFAULT + } + return *p.QueryPerDay +} + +var THotPartition_QueryPerWeek_DEFAULT int64 + +func (p *THotPartition) GetQueryPerWeek() (v int64) { + if !p.IsSetQueryPerWeek() { + return THotPartition_QueryPerWeek_DEFAULT + } + return *p.QueryPerWeek +} +func (p *THotPartition) SetPartitionId(val int64) { + p.PartitionId = val +} +func (p *THotPartition) SetLastAccessTime(val int64) { + p.LastAccessTime = val +} +func (p *THotPartition) SetQueryPerDay(val *int64) { + p.QueryPerDay = val +} +func (p *THotPartition) SetQueryPerWeek(val *int64) { + p.QueryPerWeek = val +} + +var fieldIDToName_THotPartition = map[int16]string{ + 1: "partition_id", + 2: "last_access_time", + 3: "query_per_day", + 4: "query_per_week", +} + +func (p *THotPartition) IsSetQueryPerDay() bool { + return p.QueryPerDay != nil +} + +func (p *THotPartition) IsSetQueryPerWeek() bool { + return p.QueryPerWeek != nil +} + +func (p *THotPartition) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionId bool = false + var issetLastAccessTime bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetPartitionId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetLastAccessTime = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetPartitionId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetLastAccessTime { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotPartition[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotPartition[fieldId])) +} + +func (p *THotPartition) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.PartitionId = _field + return nil +} +func (p *THotPartition) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.LastAccessTime = _field + return nil +} +func (p *THotPartition) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.QueryPerDay = _field + return nil +} +func (p *THotPartition) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.QueryPerWeek = _field + return nil +} + +func (p *THotPartition) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THotPartition"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THotPartition) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.PartitionId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THotPartition) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("last_access_time", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.LastAccessTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THotPartition) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryPerDay() { + if err = oprot.WriteFieldBegin("query_per_day", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.QueryPerDay); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THotPartition) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryPerWeek() { + if err = oprot.WriteFieldBegin("query_per_week", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.QueryPerWeek); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *THotPartition) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THotPartition(%+v)", *p) + +} + +func (p *THotPartition) DeepEqual(ano *THotPartition) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PartitionId) { + return false + } + if !p.Field2DeepEqual(ano.LastAccessTime) { + return false + } + if !p.Field3DeepEqual(ano.QueryPerDay) { + return false + } + if !p.Field4DeepEqual(ano.QueryPerWeek) { + return false + } + return true +} + +func (p *THotPartition) Field1DeepEqual(src int64) bool { + + if p.PartitionId != src { + return false + } + return true +} +func (p *THotPartition) Field2DeepEqual(src int64) bool { + + if p.LastAccessTime != src { + return false + } + return true +} +func (p *THotPartition) Field3DeepEqual(src *int64) bool { + + if p.QueryPerDay == src { + return true + } else if p.QueryPerDay == nil || src == nil { + return false + } + if *p.QueryPerDay != *src { + return false + } + return true +} +func (p *THotPartition) Field4DeepEqual(src *int64) bool { + + if p.QueryPerWeek == src { + return true + } else if p.QueryPerWeek == nil || src == nil { + return false + } + if *p.QueryPerWeek != *src { + return false + } + return true +} + +type THotTableMessage struct { + TableId int64 `thrift:"table_id,1,required" frugal:"1,required,i64" json:"table_id"` + IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` + HotPartitions []*THotPartition `thrift:"hot_partitions,3,optional" frugal:"3,optional,list" json:"hot_partitions,omitempty"` +} + +func NewTHotTableMessage() *THotTableMessage { + return &THotTableMessage{} +} + +func (p *THotTableMessage) InitDefault() { +} + +func (p *THotTableMessage) GetTableId() (v int64) { + return p.TableId +} + +func (p *THotTableMessage) GetIndexId() (v int64) { + return p.IndexId +} + +var THotTableMessage_HotPartitions_DEFAULT []*THotPartition + +func (p *THotTableMessage) GetHotPartitions() (v []*THotPartition) { + if !p.IsSetHotPartitions() { + return THotTableMessage_HotPartitions_DEFAULT + } + return p.HotPartitions +} +func (p *THotTableMessage) SetTableId(val int64) { + p.TableId = val +} +func (p *THotTableMessage) SetIndexId(val int64) { + p.IndexId = val +} +func (p *THotTableMessage) SetHotPartitions(val []*THotPartition) { + p.HotPartitions = val +} + +var fieldIDToName_THotTableMessage = map[int16]string{ + 1: "table_id", + 2: "index_id", + 3: "hot_partitions", +} + +func (p *THotTableMessage) IsSetHotPartitions() bool { + return p.HotPartitions != nil +} + +func (p *THotTableMessage) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetTableId bool = false + var issetIndexId bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTableId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetIndexId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTableId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetIndexId { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotTableMessage[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotTableMessage[fieldId])) +} + +func (p *THotTableMessage) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TableId = _field + return nil +} +func (p *THotTableMessage) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.IndexId = _field + return nil +} +func (p *THotTableMessage) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*THotPartition, 0, size) + values := make([]THotPartition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.HotPartitions = _field + return nil +} + +func (p *THotTableMessage) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THotTableMessage"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THotTableMessage) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THotTableMessage) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.IndexId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THotTableMessage) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHotPartitions() { + if err = oprot.WriteFieldBegin("hot_partitions", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HotPartitions)); err != nil { + return err + } + for _, v := range p.HotPartitions { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THotTableMessage) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THotTableMessage(%+v)", *p) + +} + +func (p *THotTableMessage) DeepEqual(ano *THotTableMessage) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TableId) { + return false + } + if !p.Field2DeepEqual(ano.IndexId) { + return false + } + if !p.Field3DeepEqual(ano.HotPartitions) { + return false + } + return true +} + +func (p *THotTableMessage) Field1DeepEqual(src int64) bool { + + if p.TableId != src { + return false + } + return true +} +func (p *THotTableMessage) Field2DeepEqual(src int64) bool { + + if p.IndexId != src { + return false + } + return true +} +func (p *THotTableMessage) Field3DeepEqual(src []*THotPartition) bool { + + if len(p.HotPartitions) != len(src) { + return false + } + for i, v := range p.HotPartitions { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TGetTopNHotPartitionsRequest struct { +} + +func NewTGetTopNHotPartitionsRequest() *TGetTopNHotPartitionsRequest { + return &TGetTopNHotPartitionsRequest{} +} + +func (p *TGetTopNHotPartitionsRequest) InitDefault() { +} + +var fieldIDToName_TGetTopNHotPartitionsRequest = map[int16]string{} + +func (p *TGetTopNHotPartitionsRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetTopNHotPartitionsRequest) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TGetTopNHotPartitionsRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetTopNHotPartitionsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTopNHotPartitionsRequest(%+v)", *p) + +} + +func (p *TGetTopNHotPartitionsRequest) DeepEqual(ano *TGetTopNHotPartitionsRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + return true +} + +type TGetTopNHotPartitionsResponse struct { + FileCacheSize int64 `thrift:"file_cache_size,1,required" frugal:"1,required,i64" json:"file_cache_size"` + HotTables []*THotTableMessage `thrift:"hot_tables,2,optional" frugal:"2,optional,list" json:"hot_tables,omitempty"` +} + +func NewTGetTopNHotPartitionsResponse() *TGetTopNHotPartitionsResponse { + return &TGetTopNHotPartitionsResponse{} +} + +func (p *TGetTopNHotPartitionsResponse) InitDefault() { +} + +func (p *TGetTopNHotPartitionsResponse) GetFileCacheSize() (v int64) { + return p.FileCacheSize +} + +var TGetTopNHotPartitionsResponse_HotTables_DEFAULT []*THotTableMessage + +func (p *TGetTopNHotPartitionsResponse) GetHotTables() (v []*THotTableMessage) { + if !p.IsSetHotTables() { + return TGetTopNHotPartitionsResponse_HotTables_DEFAULT + } + return p.HotTables +} +func (p *TGetTopNHotPartitionsResponse) SetFileCacheSize(val int64) { + p.FileCacheSize = val +} +func (p *TGetTopNHotPartitionsResponse) SetHotTables(val []*THotTableMessage) { + p.HotTables = val +} + +var fieldIDToName_TGetTopNHotPartitionsResponse = map[int16]string{ + 1: "file_cache_size", + 2: "hot_tables", +} + +func (p *TGetTopNHotPartitionsResponse) IsSetHotTables() bool { + return p.HotTables != nil +} + +func (p *TGetTopNHotPartitionsResponse) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetFileCacheSize bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetFileCacheSize = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetFileCacheSize { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTopNHotPartitionsResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTopNHotPartitionsResponse[fieldId])) +} + +func (p *TGetTopNHotPartitionsResponse) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.FileCacheSize = _field + return nil +} +func (p *TGetTopNHotPartitionsResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*THotTableMessage, 0, size) + values := make([]THotTableMessage, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.HotTables = _field + return nil +} + +func (p *TGetTopNHotPartitionsResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetTopNHotPartitionsResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetTopNHotPartitionsResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("file_cache_size", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.FileCacheSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetTopNHotPartitionsResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHotTables() { + if err = oprot.WriteFieldBegin("hot_tables", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HotTables)); err != nil { + return err + } + for _, v := range p.HotTables { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetTopNHotPartitionsResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTopNHotPartitionsResponse(%+v)", *p) + +} + +func (p *TGetTopNHotPartitionsResponse) DeepEqual(ano *TGetTopNHotPartitionsResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FileCacheSize) { + return false + } + if !p.Field2DeepEqual(ano.HotTables) { + return false + } + return true +} + +func (p *TGetTopNHotPartitionsResponse) Field1DeepEqual(src int64) bool { + + if p.FileCacheSize != src { + return false + } + return true +} +func (p *TGetTopNHotPartitionsResponse) Field2DeepEqual(src []*THotTableMessage) bool { + + if len(p.HotTables) != len(src) { + return false + } + for i, v := range p.HotTables { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TJobMeta struct { + DownloadType TDownloadType `thrift:"download_type,1,required" frugal:"1,required,TDownloadType" json:"download_type"` + BeIp *string `thrift:"be_ip,2,optional" frugal:"2,optional,string" json:"be_ip,omitempty"` + BrpcPort *int32 `thrift:"brpc_port,3,optional" frugal:"3,optional,i32" json:"brpc_port,omitempty"` + TabletIds []int64 `thrift:"tablet_ids,4,optional" frugal:"4,optional,list" json:"tablet_ids,omitempty"` +} + +func NewTJobMeta() *TJobMeta { + return &TJobMeta{} +} + +func (p *TJobMeta) InitDefault() { +} + +func (p *TJobMeta) GetDownloadType() (v TDownloadType) { + return p.DownloadType +} + +var TJobMeta_BeIp_DEFAULT string + +func (p *TJobMeta) GetBeIp() (v string) { + if !p.IsSetBeIp() { + return TJobMeta_BeIp_DEFAULT + } + return *p.BeIp +} + +var TJobMeta_BrpcPort_DEFAULT int32 + +func (p *TJobMeta) GetBrpcPort() (v int32) { + if !p.IsSetBrpcPort() { + return TJobMeta_BrpcPort_DEFAULT + } + return *p.BrpcPort +} + +var TJobMeta_TabletIds_DEFAULT []int64 + +func (p *TJobMeta) GetTabletIds() (v []int64) { + if !p.IsSetTabletIds() { + return TJobMeta_TabletIds_DEFAULT + } + return p.TabletIds +} +func (p *TJobMeta) SetDownloadType(val TDownloadType) { + p.DownloadType = val +} +func (p *TJobMeta) SetBeIp(val *string) { + p.BeIp = val +} +func (p *TJobMeta) SetBrpcPort(val *int32) { + p.BrpcPort = val +} +func (p *TJobMeta) SetTabletIds(val []int64) { + p.TabletIds = val +} + +var fieldIDToName_TJobMeta = map[int16]string{ + 1: "download_type", + 2: "be_ip", + 3: "brpc_port", + 4: "tablet_ids", +} + +func (p *TJobMeta) IsSetBeIp() bool { + return p.BeIp != nil +} + +func (p *TJobMeta) IsSetBrpcPort() bool { + return p.BrpcPort != nil +} + +func (p *TJobMeta) IsSetTabletIds() bool { + return p.TabletIds != nil +} + +func (p *TJobMeta) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetDownloadType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetDownloadType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetDownloadType { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TJobMeta[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TJobMeta[fieldId])) +} + +func (p *TJobMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field TDownloadType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TDownloadType(v) + } + p.DownloadType = _field + return nil +} +func (p *TJobMeta) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.BeIp = _field + return nil +} +func (p *TJobMeta) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.BrpcPort = _field + return nil +} +func (p *TJobMeta) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TabletIds = _field + return nil +} + +func (p *TJobMeta) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TJobMeta"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TJobMeta) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("download_type", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.DownloadType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TJobMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBeIp() { + if err = oprot.WriteFieldBegin("be_ip", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.BeIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TJobMeta) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBrpcPort() { + if err = oprot.WriteFieldBegin("brpc_port", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BrpcPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TJobMeta) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletIds() { + if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { + return err + } + for _, v := range p.TabletIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TJobMeta) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TJobMeta(%+v)", *p) + +} + +func (p *TJobMeta) DeepEqual(ano *TJobMeta) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DownloadType) { + return false + } + if !p.Field2DeepEqual(ano.BeIp) { + return false + } + if !p.Field3DeepEqual(ano.BrpcPort) { + return false + } + if !p.Field4DeepEqual(ano.TabletIds) { + return false + } + return true +} + +func (p *TJobMeta) Field1DeepEqual(src TDownloadType) bool { + + if p.DownloadType != src { + return false + } + return true +} +func (p *TJobMeta) Field2DeepEqual(src *string) bool { + + if p.BeIp == src { + return true + } else if p.BeIp == nil || src == nil { + return false + } + if strings.Compare(*p.BeIp, *src) != 0 { + return false + } + return true +} +func (p *TJobMeta) Field3DeepEqual(src *int32) bool { + + if p.BrpcPort == src { + return true + } else if p.BrpcPort == nil || src == nil { + return false + } + if *p.BrpcPort != *src { + return false + } + return true +} +func (p *TJobMeta) Field4DeepEqual(src []int64) bool { + + if len(p.TabletIds) != len(src) { + return false + } + for i, v := range p.TabletIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TWarmUpTabletsRequest struct { + JobId int64 `thrift:"job_id,1,required" frugal:"1,required,i64" json:"job_id"` + BatchId int64 `thrift:"batch_id,2,required" frugal:"2,required,i64" json:"batch_id"` + JobMetas []*TJobMeta `thrift:"job_metas,3,optional" frugal:"3,optional,list" json:"job_metas,omitempty"` + Type TWarmUpTabletsRequestType `thrift:"type,4,required" frugal:"4,required,TWarmUpTabletsRequestType" json:"type"` +} + +func NewTWarmUpTabletsRequest() *TWarmUpTabletsRequest { + return &TWarmUpTabletsRequest{} +} + +func (p *TWarmUpTabletsRequest) InitDefault() { +} + +func (p *TWarmUpTabletsRequest) GetJobId() (v int64) { + return p.JobId +} + +func (p *TWarmUpTabletsRequest) GetBatchId() (v int64) { + return p.BatchId +} + +var TWarmUpTabletsRequest_JobMetas_DEFAULT []*TJobMeta + +func (p *TWarmUpTabletsRequest) GetJobMetas() (v []*TJobMeta) { + if !p.IsSetJobMetas() { + return TWarmUpTabletsRequest_JobMetas_DEFAULT + } + return p.JobMetas +} + +func (p *TWarmUpTabletsRequest) GetType() (v TWarmUpTabletsRequestType) { + return p.Type +} +func (p *TWarmUpTabletsRequest) SetJobId(val int64) { + p.JobId = val +} +func (p *TWarmUpTabletsRequest) SetBatchId(val int64) { + p.BatchId = val +} +func (p *TWarmUpTabletsRequest) SetJobMetas(val []*TJobMeta) { + p.JobMetas = val +} +func (p *TWarmUpTabletsRequest) SetType(val TWarmUpTabletsRequestType) { + p.Type = val +} + +var fieldIDToName_TWarmUpTabletsRequest = map[int16]string{ + 1: "job_id", + 2: "batch_id", + 3: "job_metas", + 4: "type", +} + +func (p *TWarmUpTabletsRequest) IsSetJobMetas() bool { + return p.JobMetas != nil +} + +func (p *TWarmUpTabletsRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetJobId bool = false + var issetBatchId bool = false + var issetType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetJobId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetBatchId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetJobId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetBatchId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetType { + fieldId = 4 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsRequest[fieldId])) +} + +func (p *TWarmUpTabletsRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.JobId = _field + return nil +} +func (p *TWarmUpTabletsRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.BatchId = _field + return nil +} +func (p *TWarmUpTabletsRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TJobMeta, 0, size) + values := make([]TJobMeta, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.JobMetas = _field + return nil +} +func (p *TWarmUpTabletsRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field TWarmUpTabletsRequestType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TWarmUpTabletsRequestType(v) + } + p.Type = _field + return nil +} + +func (p *TWarmUpTabletsRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWarmUpTabletsRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWarmUpTabletsRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("job_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.JobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWarmUpTabletsRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("batch_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.BatchId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWarmUpTabletsRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetJobMetas() { + if err = oprot.WriteFieldBegin("job_metas", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.JobMetas)); err != nil { + return err + } + for _, v := range p.JobMetas { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TWarmUpTabletsRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("type", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TWarmUpTabletsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWarmUpTabletsRequest(%+v)", *p) + +} + +func (p *TWarmUpTabletsRequest) DeepEqual(ano *TWarmUpTabletsRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.JobId) { + return false + } + if !p.Field2DeepEqual(ano.BatchId) { + return false + } + if !p.Field3DeepEqual(ano.JobMetas) { + return false + } + if !p.Field4DeepEqual(ano.Type) { + return false + } + return true +} + +func (p *TWarmUpTabletsRequest) Field1DeepEqual(src int64) bool { + + if p.JobId != src { + return false + } + return true +} +func (p *TWarmUpTabletsRequest) Field2DeepEqual(src int64) bool { + + if p.BatchId != src { + return false + } + return true +} +func (p *TWarmUpTabletsRequest) Field3DeepEqual(src []*TJobMeta) bool { + + if len(p.JobMetas) != len(src) { + return false + } + for i, v := range p.JobMetas { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TWarmUpTabletsRequest) Field4DeepEqual(src TWarmUpTabletsRequestType) bool { + + if p.Type != src { + return false + } + return true +} + +type TWarmUpTabletsResponse struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + JobId *int64 `thrift:"job_id,2,optional" frugal:"2,optional,i64" json:"job_id,omitempty"` + BatchId *int64 `thrift:"batch_id,3,optional" frugal:"3,optional,i64" json:"batch_id,omitempty"` + PendingJobSize *int64 `thrift:"pending_job_size,4,optional" frugal:"4,optional,i64" json:"pending_job_size,omitempty"` + FinishJobSize *int64 `thrift:"finish_job_size,5,optional" frugal:"5,optional,i64" json:"finish_job_size,omitempty"` +} + +func NewTWarmUpTabletsResponse() *TWarmUpTabletsResponse { + return &TWarmUpTabletsResponse{} +} + +func (p *TWarmUpTabletsResponse) InitDefault() { +} + +var TWarmUpTabletsResponse_Status_DEFAULT *status.TStatus + +func (p *TWarmUpTabletsResponse) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TWarmUpTabletsResponse_Status_DEFAULT + } + return p.Status +} + +var TWarmUpTabletsResponse_JobId_DEFAULT int64 + +func (p *TWarmUpTabletsResponse) GetJobId() (v int64) { + if !p.IsSetJobId() { + return TWarmUpTabletsResponse_JobId_DEFAULT + } + return *p.JobId +} + +var TWarmUpTabletsResponse_BatchId_DEFAULT int64 + +func (p *TWarmUpTabletsResponse) GetBatchId() (v int64) { + if !p.IsSetBatchId() { + return TWarmUpTabletsResponse_BatchId_DEFAULT + } + return *p.BatchId +} + +var TWarmUpTabletsResponse_PendingJobSize_DEFAULT int64 + +func (p *TWarmUpTabletsResponse) GetPendingJobSize() (v int64) { + if !p.IsSetPendingJobSize() { + return TWarmUpTabletsResponse_PendingJobSize_DEFAULT + } + return *p.PendingJobSize +} + +var TWarmUpTabletsResponse_FinishJobSize_DEFAULT int64 + +func (p *TWarmUpTabletsResponse) GetFinishJobSize() (v int64) { + if !p.IsSetFinishJobSize() { + return TWarmUpTabletsResponse_FinishJobSize_DEFAULT + } + return *p.FinishJobSize +} +func (p *TWarmUpTabletsResponse) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TWarmUpTabletsResponse) SetJobId(val *int64) { + p.JobId = val +} +func (p *TWarmUpTabletsResponse) SetBatchId(val *int64) { + p.BatchId = val +} +func (p *TWarmUpTabletsResponse) SetPendingJobSize(val *int64) { + p.PendingJobSize = val +} +func (p *TWarmUpTabletsResponse) SetFinishJobSize(val *int64) { + p.FinishJobSize = val +} + +var fieldIDToName_TWarmUpTabletsResponse = map[int16]string{ + 1: "status", + 2: "job_id", + 3: "batch_id", + 4: "pending_job_size", + 5: "finish_job_size", +} + +func (p *TWarmUpTabletsResponse) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TWarmUpTabletsResponse) IsSetJobId() bool { + return p.JobId != nil +} + +func (p *TWarmUpTabletsResponse) IsSetBatchId() bool { + return p.BatchId != nil +} + +func (p *TWarmUpTabletsResponse) IsSetPendingJobSize() bool { + return p.PendingJobSize != nil +} + +func (p *TWarmUpTabletsResponse) IsSetFinishJobSize() bool { + return p.FinishJobSize != nil +} + +func (p *TWarmUpTabletsResponse) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsResponse[fieldId])) +} + +func (p *TWarmUpTabletsResponse) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TWarmUpTabletsResponse) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.JobId = _field + return nil +} +func (p *TWarmUpTabletsResponse) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BatchId = _field + return nil +} +func (p *TWarmUpTabletsResponse) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.PendingJobSize = _field + return nil +} +func (p *TWarmUpTabletsResponse) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FinishJobSize = _field + return nil +} + +func (p *TWarmUpTabletsResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWarmUpTabletsResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWarmUpTabletsResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWarmUpTabletsResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetJobId() { + if err = oprot.WriteFieldBegin("job_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.JobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWarmUpTabletsResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBatchId() { + if err = oprot.WriteFieldBegin("batch_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BatchId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TWarmUpTabletsResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPendingJobSize() { + if err = oprot.WriteFieldBegin("pending_job_size", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.PendingJobSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TWarmUpTabletsResponse) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetFinishJobSize() { + if err = oprot.WriteFieldBegin("finish_job_size", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FinishJobSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TWarmUpTabletsResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWarmUpTabletsResponse(%+v)", *p) + +} + +func (p *TWarmUpTabletsResponse) DeepEqual(ano *TWarmUpTabletsResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.JobId) { + return false + } + if !p.Field3DeepEqual(ano.BatchId) { + return false + } + if !p.Field4DeepEqual(ano.PendingJobSize) { + return false + } + if !p.Field5DeepEqual(ano.FinishJobSize) { + return false + } + return true +} + +func (p *TWarmUpTabletsResponse) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TWarmUpTabletsResponse) Field2DeepEqual(src *int64) bool { + + if p.JobId == src { + return true + } else if p.JobId == nil || src == nil { + return false + } + if *p.JobId != *src { + return false + } + return true +} +func (p *TWarmUpTabletsResponse) Field3DeepEqual(src *int64) bool { + + if p.BatchId == src { + return true + } else if p.BatchId == nil || src == nil { + return false + } + if *p.BatchId != *src { + return false + } + return true +} +func (p *TWarmUpTabletsResponse) Field4DeepEqual(src *int64) bool { + + if p.PendingJobSize == src { + return true + } else if p.PendingJobSize == nil || src == nil { + return false + } + if *p.PendingJobSize != *src { + return false + } + return true +} +func (p *TWarmUpTabletsResponse) Field5DeepEqual(src *int64) bool { + + if p.FinishJobSize == src { + return true + } else if p.FinishJobSize == nil || src == nil { + return false + } + if *p.FinishJobSize != *src { + return false + } + return true +} + +type TIngestBinlogRequest struct { + TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"` + RemoteTabletId *int64 `thrift:"remote_tablet_id,2,optional" frugal:"2,optional,i64" json:"remote_tablet_id,omitempty"` + BinlogVersion *int64 `thrift:"binlog_version,3,optional" frugal:"3,optional,i64" json:"binlog_version,omitempty"` + RemoteHost *string `thrift:"remote_host,4,optional" frugal:"4,optional,string" json:"remote_host,omitempty"` + RemotePort *string `thrift:"remote_port,5,optional" frugal:"5,optional,string" json:"remote_port,omitempty"` + PartitionId *int64 `thrift:"partition_id,6,optional" frugal:"6,optional,i64" json:"partition_id,omitempty"` + LocalTabletId *int64 `thrift:"local_tablet_id,7,optional" frugal:"7,optional,i64" json:"local_tablet_id,omitempty"` + LoadId *types.TUniqueId `thrift:"load_id,8,optional" frugal:"8,optional,types.TUniqueId" json:"load_id,omitempty"` +} + +func NewTIngestBinlogRequest() *TIngestBinlogRequest { + return &TIngestBinlogRequest{} +} + +func (p *TIngestBinlogRequest) InitDefault() { +} + +var TIngestBinlogRequest_TxnId_DEFAULT int64 + +func (p *TIngestBinlogRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TIngestBinlogRequest_TxnId_DEFAULT + } + return *p.TxnId +} + +var TIngestBinlogRequest_RemoteTabletId_DEFAULT int64 + +func (p *TIngestBinlogRequest) GetRemoteTabletId() (v int64) { + if !p.IsSetRemoteTabletId() { + return TIngestBinlogRequest_RemoteTabletId_DEFAULT + } + return *p.RemoteTabletId +} + +var TIngestBinlogRequest_BinlogVersion_DEFAULT int64 + +func (p *TIngestBinlogRequest) GetBinlogVersion() (v int64) { + if !p.IsSetBinlogVersion() { + return TIngestBinlogRequest_BinlogVersion_DEFAULT + } + return *p.BinlogVersion +} + +var TIngestBinlogRequest_RemoteHost_DEFAULT string + +func (p *TIngestBinlogRequest) GetRemoteHost() (v string) { + if !p.IsSetRemoteHost() { + return TIngestBinlogRequest_RemoteHost_DEFAULT + } + return *p.RemoteHost +} + +var TIngestBinlogRequest_RemotePort_DEFAULT string + +func (p *TIngestBinlogRequest) GetRemotePort() (v string) { + if !p.IsSetRemotePort() { + return TIngestBinlogRequest_RemotePort_DEFAULT + } + return *p.RemotePort +} + +var TIngestBinlogRequest_PartitionId_DEFAULT int64 + +func (p *TIngestBinlogRequest) GetPartitionId() (v int64) { + if !p.IsSetPartitionId() { + return TIngestBinlogRequest_PartitionId_DEFAULT + } + return *p.PartitionId +} + +var TIngestBinlogRequest_LocalTabletId_DEFAULT int64 + +func (p *TIngestBinlogRequest) GetLocalTabletId() (v int64) { + if !p.IsSetLocalTabletId() { + return TIngestBinlogRequest_LocalTabletId_DEFAULT + } + return *p.LocalTabletId +} + +var TIngestBinlogRequest_LoadId_DEFAULT *types.TUniqueId + +func (p *TIngestBinlogRequest) GetLoadId() (v *types.TUniqueId) { + if !p.IsSetLoadId() { + return TIngestBinlogRequest_LoadId_DEFAULT + } + return p.LoadId +} +func (p *TIngestBinlogRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TIngestBinlogRequest) SetRemoteTabletId(val *int64) { + p.RemoteTabletId = val +} +func (p *TIngestBinlogRequest) SetBinlogVersion(val *int64) { + p.BinlogVersion = val +} +func (p *TIngestBinlogRequest) SetRemoteHost(val *string) { + p.RemoteHost = val +} +func (p *TIngestBinlogRequest) SetRemotePort(val *string) { + p.RemotePort = val +} +func (p *TIngestBinlogRequest) SetPartitionId(val *int64) { + p.PartitionId = val +} +func (p *TIngestBinlogRequest) SetLocalTabletId(val *int64) { + p.LocalTabletId = val +} +func (p *TIngestBinlogRequest) SetLoadId(val *types.TUniqueId) { + p.LoadId = val +} + +var fieldIDToName_TIngestBinlogRequest = map[int16]string{ + 1: "txn_id", + 2: "remote_tablet_id", + 3: "binlog_version", + 4: "remote_host", + 5: "remote_port", + 6: "partition_id", + 7: "local_tablet_id", + 8: "load_id", +} + +func (p *TIngestBinlogRequest) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TIngestBinlogRequest) IsSetRemoteTabletId() bool { + return p.RemoteTabletId != nil +} + +func (p *TIngestBinlogRequest) IsSetBinlogVersion() bool { + return p.BinlogVersion != nil +} + +func (p *TIngestBinlogRequest) IsSetRemoteHost() bool { + return p.RemoteHost != nil +} + +func (p *TIngestBinlogRequest) IsSetRemotePort() bool { + return p.RemotePort != nil +} + +func (p *TIngestBinlogRequest) IsSetPartitionId() bool { + return p.PartitionId != nil +} + +func (p *TIngestBinlogRequest) IsSetLocalTabletId() bool { + return p.LocalTabletId != nil +} + +func (p *TIngestBinlogRequest) IsSetLoadId() bool { + return p.LoadId != nil +} + +func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIngestBinlogRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RemoteTabletId = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BinlogVersion = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.RemoteHost = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.RemotePort = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.PartitionId = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LocalTabletId = _field + return nil +} +func (p *TIngestBinlogRequest) ReadField8(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.LoadId = _field + return nil +} + +func (p *TIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TIngestBinlogRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteTabletId() { + if err = oprot.WriteFieldBegin("remote_tablet_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RemoteTabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBinlogVersion() { + if err = oprot.WriteFieldBegin("binlog_version", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BinlogVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteHost() { + if err = oprot.WriteFieldBegin("remote_host", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.RemoteHost); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRemotePort() { + if err = oprot.WriteFieldBegin("remote_port", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.RemotePort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionId() { + if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.PartitionId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetLocalTabletId() { + if err = oprot.WriteFieldBegin("local_tablet_id", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LocalTabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadId() { + if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TIngestBinlogRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TIngestBinlogRequest(%+v)", *p) + +} + +func (p *TIngestBinlogRequest) DeepEqual(ano *TIngestBinlogRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TxnId) { + return false + } + if !p.Field2DeepEqual(ano.RemoteTabletId) { + return false + } + if !p.Field3DeepEqual(ano.BinlogVersion) { + return false + } + if !p.Field4DeepEqual(ano.RemoteHost) { + return false + } + if !p.Field5DeepEqual(ano.RemotePort) { + return false + } + if !p.Field6DeepEqual(ano.PartitionId) { + return false + } + if !p.Field7DeepEqual(ano.LocalTabletId) { + return false + } + if !p.Field8DeepEqual(ano.LoadId) { + return false + } + return true +} + +func (p *TIngestBinlogRequest) Field1DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field2DeepEqual(src *int64) bool { + + if p.RemoteTabletId == src { + return true + } else if p.RemoteTabletId == nil || src == nil { + return false + } + if *p.RemoteTabletId != *src { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field3DeepEqual(src *int64) bool { + + if p.BinlogVersion == src { + return true + } else if p.BinlogVersion == nil || src == nil { + return false + } + if *p.BinlogVersion != *src { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field4DeepEqual(src *string) bool { + + if p.RemoteHost == src { + return true + } else if p.RemoteHost == nil || src == nil { + return false + } + if strings.Compare(*p.RemoteHost, *src) != 0 { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field5DeepEqual(src *string) bool { + + if p.RemotePort == src { + return true + } else if p.RemotePort == nil || src == nil { + return false + } + if strings.Compare(*p.RemotePort, *src) != 0 { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field6DeepEqual(src *int64) bool { + + if p.PartitionId == src { + return true + } else if p.PartitionId == nil || src == nil { + return false + } + if *p.PartitionId != *src { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field7DeepEqual(src *int64) bool { + + if p.LocalTabletId == src { + return true + } else if p.LocalTabletId == nil || src == nil { + return false + } + if *p.LocalTabletId != *src { + return false + } + return true +} +func (p *TIngestBinlogRequest) Field8DeepEqual(src *types.TUniqueId) bool { + + if !p.LoadId.DeepEqual(src) { + return false + } + return true +} + +type TIngestBinlogResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + IsAsync *bool `thrift:"is_async,2,optional" frugal:"2,optional,bool" json:"is_async,omitempty"` +} + +func NewTIngestBinlogResult_() *TIngestBinlogResult_ { + return &TIngestBinlogResult_{} +} + +func (p *TIngestBinlogResult_) InitDefault() { +} + +var TIngestBinlogResult__Status_DEFAULT *status.TStatus + +func (p *TIngestBinlogResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TIngestBinlogResult__Status_DEFAULT + } + return p.Status +} + +var TIngestBinlogResult__IsAsync_DEFAULT bool + +func (p *TIngestBinlogResult_) GetIsAsync() (v bool) { + if !p.IsSetIsAsync() { + return TIngestBinlogResult__IsAsync_DEFAULT + } + return *p.IsAsync +} +func (p *TIngestBinlogResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TIngestBinlogResult_) SetIsAsync(val *bool) { + p.IsAsync = val +} + +var fieldIDToName_TIngestBinlogResult_ = map[int16]string{ + 1: "status", + 2: "is_async", +} + +func (p *TIngestBinlogResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TIngestBinlogResult_) IsSetIsAsync() bool { + return p.IsAsync != nil +} + +func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIngestBinlogResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TIngestBinlogResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsAsync = _field + return nil +} + +func (p *TIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TIngestBinlogResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TIngestBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TIngestBinlogResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIsAsync() { + if err = oprot.WriteFieldBegin("is_async", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsAsync); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TIngestBinlogResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TIngestBinlogResult_(%+v)", *p) + +} + +func (p *TIngestBinlogResult_) DeepEqual(ano *TIngestBinlogResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.IsAsync) { + return false + } + return true +} + +func (p *TIngestBinlogResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TIngestBinlogResult_) Field2DeepEqual(src *bool) bool { + + if p.IsAsync == src { + return true + } else if p.IsAsync == nil || src == nil { + return false + } + if *p.IsAsync != *src { + return false + } + return true +} + +type TQueryIngestBinlogRequest struct { + TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"` + PartitionId *int64 `thrift:"partition_id,2,optional" frugal:"2,optional,i64" json:"partition_id,omitempty"` + TabletId *int64 `thrift:"tablet_id,3,optional" frugal:"3,optional,i64" json:"tablet_id,omitempty"` + LoadId *types.TUniqueId `thrift:"load_id,4,optional" frugal:"4,optional,types.TUniqueId" json:"load_id,omitempty"` +} + +func NewTQueryIngestBinlogRequest() *TQueryIngestBinlogRequest { + return &TQueryIngestBinlogRequest{} +} + +func (p *TQueryIngestBinlogRequest) InitDefault() { +} + +var TQueryIngestBinlogRequest_TxnId_DEFAULT int64 + +func (p *TQueryIngestBinlogRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TQueryIngestBinlogRequest_TxnId_DEFAULT + } + return *p.TxnId +} + +var TQueryIngestBinlogRequest_PartitionId_DEFAULT int64 + +func (p *TQueryIngestBinlogRequest) GetPartitionId() (v int64) { + if !p.IsSetPartitionId() { + return TQueryIngestBinlogRequest_PartitionId_DEFAULT + } + return *p.PartitionId +} + +var TQueryIngestBinlogRequest_TabletId_DEFAULT int64 + +func (p *TQueryIngestBinlogRequest) GetTabletId() (v int64) { + if !p.IsSetTabletId() { + return TQueryIngestBinlogRequest_TabletId_DEFAULT + } + return *p.TabletId +} + +var TQueryIngestBinlogRequest_LoadId_DEFAULT *types.TUniqueId + +func (p *TQueryIngestBinlogRequest) GetLoadId() (v *types.TUniqueId) { + if !p.IsSetLoadId() { + return TQueryIngestBinlogRequest_LoadId_DEFAULT + } + return p.LoadId +} +func (p *TQueryIngestBinlogRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TQueryIngestBinlogRequest) SetPartitionId(val *int64) { + p.PartitionId = val +} +func (p *TQueryIngestBinlogRequest) SetTabletId(val *int64) { + p.TabletId = val +} +func (p *TQueryIngestBinlogRequest) SetLoadId(val *types.TUniqueId) { + p.LoadId = val +} + +var fieldIDToName_TQueryIngestBinlogRequest = map[int16]string{ + 1: "txn_id", + 2: "partition_id", + 3: "tablet_id", + 4: "load_id", +} + +func (p *TQueryIngestBinlogRequest) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TQueryIngestBinlogRequest) IsSetPartitionId() bool { + return p.PartitionId != nil +} + +func (p *TQueryIngestBinlogRequest) IsSetTabletId() bool { + return p.TabletId != nil +} + +func (p *TQueryIngestBinlogRequest) IsSetLoadId() bool { + return p.LoadId != nil +} + +func (p *TQueryIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TQueryIngestBinlogRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.PartitionId = _field + return nil +} +func (p *TQueryIngestBinlogRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TabletId = _field + return nil +} +func (p *TQueryIngestBinlogRequest) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.LoadId = _field + return nil +} + +func (p *TQueryIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TQueryIngestBinlogRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionId() { + if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.PartitionId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletId() { + if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadId() { + if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryIngestBinlogRequest(%+v)", *p) + +} + +func (p *TQueryIngestBinlogRequest) DeepEqual(ano *TQueryIngestBinlogRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TxnId) { + return false + } + if !p.Field2DeepEqual(ano.PartitionId) { + return false + } + if !p.Field3DeepEqual(ano.TabletId) { + return false + } + if !p.Field4DeepEqual(ano.LoadId) { + return false + } + return true +} + +func (p *TQueryIngestBinlogRequest) Field1DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TQueryIngestBinlogRequest) Field2DeepEqual(src *int64) bool { + + if p.PartitionId == src { + return true + } else if p.PartitionId == nil || src == nil { + return false + } + if *p.PartitionId != *src { + return false + } + return true +} +func (p *TQueryIngestBinlogRequest) Field3DeepEqual(src *int64) bool { + + if p.TabletId == src { + return true + } else if p.TabletId == nil || src == nil { + return false + } + if *p.TabletId != *src { + return false + } + return true +} +func (p *TQueryIngestBinlogRequest) Field4DeepEqual(src *types.TUniqueId) bool { + + if !p.LoadId.DeepEqual(src) { + return false + } + return true +} + +type TQueryIngestBinlogResult_ struct { + Status *TIngestBinlogStatus `thrift:"status,1,optional" frugal:"1,optional,TIngestBinlogStatus" json:"status,omitempty"` + ErrMsg *string `thrift:"err_msg,2,optional" frugal:"2,optional,string" json:"err_msg,omitempty"` +} + +func NewTQueryIngestBinlogResult_() *TQueryIngestBinlogResult_ { + return &TQueryIngestBinlogResult_{} +} + +func (p *TQueryIngestBinlogResult_) InitDefault() { +} + +var TQueryIngestBinlogResult__Status_DEFAULT TIngestBinlogStatus + +func (p *TQueryIngestBinlogResult_) GetStatus() (v TIngestBinlogStatus) { + if !p.IsSetStatus() { + return TQueryIngestBinlogResult__Status_DEFAULT + } + return *p.Status +} + +var TQueryIngestBinlogResult__ErrMsg_DEFAULT string + +func (p *TQueryIngestBinlogResult_) GetErrMsg() (v string) { + if !p.IsSetErrMsg() { + return TQueryIngestBinlogResult__ErrMsg_DEFAULT + } + return *p.ErrMsg +} +func (p *TQueryIngestBinlogResult_) SetStatus(val *TIngestBinlogStatus) { + p.Status = val +} +func (p *TQueryIngestBinlogResult_) SetErrMsg(val *string) { + p.ErrMsg = val +} + +var fieldIDToName_TQueryIngestBinlogResult_ = map[int16]string{ + 1: "status", + 2: "err_msg", +} + +func (p *TQueryIngestBinlogResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TQueryIngestBinlogResult_) IsSetErrMsg() bool { + return p.ErrMsg != nil +} + +func (p *TQueryIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryIngestBinlogResult_) ReadField1(iprot thrift.TProtocol) error { + + var _field *TIngestBinlogStatus + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TIngestBinlogStatus(v) + _field = &tmp + } + p.Status = _field + return nil +} +func (p *TQueryIngestBinlogResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ErrMsg = _field + return nil +} + +func (p *TQueryIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TQueryIngestBinlogResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TQueryIngestBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Status)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TQueryIngestBinlogResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetErrMsg() { + if err = oprot.WriteFieldBegin("err_msg", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ErrMsg); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TQueryIngestBinlogResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryIngestBinlogResult_(%+v)", *p) + +} + +func (p *TQueryIngestBinlogResult_) DeepEqual(ano *TQueryIngestBinlogResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.ErrMsg) { + return false + } + return true +} + +func (p *TQueryIngestBinlogResult_) Field1DeepEqual(src *TIngestBinlogStatus) bool { + + if p.Status == src { + return true + } else if p.Status == nil || src == nil { + return false + } + if *p.Status != *src { + return false + } + return true +} +func (p *TQueryIngestBinlogResult_) Field2DeepEqual(src *string) bool { + + if p.ErrMsg == src { + return true + } else if p.ErrMsg == nil || src == nil { + return false + } + if strings.Compare(*p.ErrMsg, *src) != 0 { + return false + } + return true +} + +type TWorkloadGroupInfo struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"` + CpuShare *int64 `thrift:"cpu_share,4,optional" frugal:"4,optional,i64" json:"cpu_share,omitempty"` + CpuHardLimit *int32 `thrift:"cpu_hard_limit,5,optional" frugal:"5,optional,i32" json:"cpu_hard_limit,omitempty"` + MemLimit *string `thrift:"mem_limit,6,optional" frugal:"6,optional,string" json:"mem_limit,omitempty"` + EnableMemoryOvercommit *bool `thrift:"enable_memory_overcommit,7,optional" frugal:"7,optional,bool" json:"enable_memory_overcommit,omitempty"` + EnableCpuHardLimit *bool `thrift:"enable_cpu_hard_limit,8,optional" frugal:"8,optional,bool" json:"enable_cpu_hard_limit,omitempty"` + ScanThreadNum *int32 `thrift:"scan_thread_num,9,optional" frugal:"9,optional,i32" json:"scan_thread_num,omitempty"` + MaxRemoteScanThreadNum *int32 `thrift:"max_remote_scan_thread_num,10,optional" frugal:"10,optional,i32" json:"max_remote_scan_thread_num,omitempty"` + MinRemoteScanThreadNum *int32 `thrift:"min_remote_scan_thread_num,11,optional" frugal:"11,optional,i32" json:"min_remote_scan_thread_num,omitempty"` + MemoryLowWatermark *int32 `thrift:"memory_low_watermark,12,optional" frugal:"12,optional,i32" json:"memory_low_watermark,omitempty"` + MemoryHighWatermark *int32 `thrift:"memory_high_watermark,13,optional" frugal:"13,optional,i32" json:"memory_high_watermark,omitempty"` + ReadBytesPerSecond *int64 `thrift:"read_bytes_per_second,14,optional" frugal:"14,optional,i64" json:"read_bytes_per_second,omitempty"` + RemoteReadBytesPerSecond *int64 `thrift:"remote_read_bytes_per_second,15,optional" frugal:"15,optional,i64" json:"remote_read_bytes_per_second,omitempty"` + Tag *string `thrift:"tag,16,optional" frugal:"16,optional,string" json:"tag,omitempty"` +} + +func NewTWorkloadGroupInfo() *TWorkloadGroupInfo { + return &TWorkloadGroupInfo{} +} + +func (p *TWorkloadGroupInfo) InitDefault() { +} + +var TWorkloadGroupInfo_Id_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetId() (v int64) { + if !p.IsSetId() { + return TWorkloadGroupInfo_Id_DEFAULT + } + return *p.Id +} + +var TWorkloadGroupInfo_Name_DEFAULT string + +func (p *TWorkloadGroupInfo) GetName() (v string) { + if !p.IsSetName() { + return TWorkloadGroupInfo_Name_DEFAULT + } + return *p.Name +} + +var TWorkloadGroupInfo_Version_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetVersion() (v int64) { + if !p.IsSetVersion() { + return TWorkloadGroupInfo_Version_DEFAULT + } + return *p.Version +} + +var TWorkloadGroupInfo_CpuShare_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetCpuShare() (v int64) { + if !p.IsSetCpuShare() { + return TWorkloadGroupInfo_CpuShare_DEFAULT + } + return *p.CpuShare +} + +var TWorkloadGroupInfo_CpuHardLimit_DEFAULT int32 + +func (p *TWorkloadGroupInfo) GetCpuHardLimit() (v int32) { + if !p.IsSetCpuHardLimit() { + return TWorkloadGroupInfo_CpuHardLimit_DEFAULT + } + return *p.CpuHardLimit +} + +var TWorkloadGroupInfo_MemLimit_DEFAULT string + +func (p *TWorkloadGroupInfo) GetMemLimit() (v string) { + if !p.IsSetMemLimit() { + return TWorkloadGroupInfo_MemLimit_DEFAULT + } + return *p.MemLimit +} + +var TWorkloadGroupInfo_EnableMemoryOvercommit_DEFAULT bool + +func (p *TWorkloadGroupInfo) GetEnableMemoryOvercommit() (v bool) { + if !p.IsSetEnableMemoryOvercommit() { + return TWorkloadGroupInfo_EnableMemoryOvercommit_DEFAULT + } + return *p.EnableMemoryOvercommit +} + +var TWorkloadGroupInfo_EnableCpuHardLimit_DEFAULT bool + +func (p *TWorkloadGroupInfo) GetEnableCpuHardLimit() (v bool) { + if !p.IsSetEnableCpuHardLimit() { + return TWorkloadGroupInfo_EnableCpuHardLimit_DEFAULT + } + return *p.EnableCpuHardLimit +} + +var TWorkloadGroupInfo_ScanThreadNum_DEFAULT int32 + +func (p *TWorkloadGroupInfo) GetScanThreadNum() (v int32) { + if !p.IsSetScanThreadNum() { + return TWorkloadGroupInfo_ScanThreadNum_DEFAULT + } + return *p.ScanThreadNum +} + +var TWorkloadGroupInfo_MaxRemoteScanThreadNum_DEFAULT int32 + +func (p *TWorkloadGroupInfo) GetMaxRemoteScanThreadNum() (v int32) { + if !p.IsSetMaxRemoteScanThreadNum() { + return TWorkloadGroupInfo_MaxRemoteScanThreadNum_DEFAULT + } + return *p.MaxRemoteScanThreadNum +} + +var TWorkloadGroupInfo_MinRemoteScanThreadNum_DEFAULT int32 + +func (p *TWorkloadGroupInfo) GetMinRemoteScanThreadNum() (v int32) { + if !p.IsSetMinRemoteScanThreadNum() { + return TWorkloadGroupInfo_MinRemoteScanThreadNum_DEFAULT + } + return *p.MinRemoteScanThreadNum +} + +var TWorkloadGroupInfo_MemoryLowWatermark_DEFAULT int32 + +func (p *TWorkloadGroupInfo) GetMemoryLowWatermark() (v int32) { + if !p.IsSetMemoryLowWatermark() { + return TWorkloadGroupInfo_MemoryLowWatermark_DEFAULT + } + return *p.MemoryLowWatermark +} + +var TWorkloadGroupInfo_MemoryHighWatermark_DEFAULT int32 + +func (p *TWorkloadGroupInfo) GetMemoryHighWatermark() (v int32) { + if !p.IsSetMemoryHighWatermark() { + return TWorkloadGroupInfo_MemoryHighWatermark_DEFAULT + } + return *p.MemoryHighWatermark +} + +var TWorkloadGroupInfo_ReadBytesPerSecond_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetReadBytesPerSecond() (v int64) { + if !p.IsSetReadBytesPerSecond() { + return TWorkloadGroupInfo_ReadBytesPerSecond_DEFAULT + } + return *p.ReadBytesPerSecond +} + +var TWorkloadGroupInfo_RemoteReadBytesPerSecond_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetRemoteReadBytesPerSecond() (v int64) { + if !p.IsSetRemoteReadBytesPerSecond() { + return TWorkloadGroupInfo_RemoteReadBytesPerSecond_DEFAULT + } + return *p.RemoteReadBytesPerSecond +} + +var TWorkloadGroupInfo_Tag_DEFAULT string + +func (p *TWorkloadGroupInfo) GetTag() (v string) { + if !p.IsSetTag() { + return TWorkloadGroupInfo_Tag_DEFAULT + } + return *p.Tag +} +func (p *TWorkloadGroupInfo) SetId(val *int64) { + p.Id = val +} +func (p *TWorkloadGroupInfo) SetName(val *string) { + p.Name = val +} +func (p *TWorkloadGroupInfo) SetVersion(val *int64) { + p.Version = val +} +func (p *TWorkloadGroupInfo) SetCpuShare(val *int64) { + p.CpuShare = val +} +func (p *TWorkloadGroupInfo) SetCpuHardLimit(val *int32) { + p.CpuHardLimit = val +} +func (p *TWorkloadGroupInfo) SetMemLimit(val *string) { + p.MemLimit = val +} +func (p *TWorkloadGroupInfo) SetEnableMemoryOvercommit(val *bool) { + p.EnableMemoryOvercommit = val +} +func (p *TWorkloadGroupInfo) SetEnableCpuHardLimit(val *bool) { + p.EnableCpuHardLimit = val +} +func (p *TWorkloadGroupInfo) SetScanThreadNum(val *int32) { + p.ScanThreadNum = val +} +func (p *TWorkloadGroupInfo) SetMaxRemoteScanThreadNum(val *int32) { + p.MaxRemoteScanThreadNum = val +} +func (p *TWorkloadGroupInfo) SetMinRemoteScanThreadNum(val *int32) { + p.MinRemoteScanThreadNum = val +} +func (p *TWorkloadGroupInfo) SetMemoryLowWatermark(val *int32) { + p.MemoryLowWatermark = val +} +func (p *TWorkloadGroupInfo) SetMemoryHighWatermark(val *int32) { + p.MemoryHighWatermark = val +} +func (p *TWorkloadGroupInfo) SetReadBytesPerSecond(val *int64) { + p.ReadBytesPerSecond = val +} +func (p *TWorkloadGroupInfo) SetRemoteReadBytesPerSecond(val *int64) { + p.RemoteReadBytesPerSecond = val +} +func (p *TWorkloadGroupInfo) SetTag(val *string) { + p.Tag = val +} + +var fieldIDToName_TWorkloadGroupInfo = map[int16]string{ + 1: "id", + 2: "name", + 3: "version", + 4: "cpu_share", + 5: "cpu_hard_limit", + 6: "mem_limit", + 7: "enable_memory_overcommit", + 8: "enable_cpu_hard_limit", + 9: "scan_thread_num", + 10: "max_remote_scan_thread_num", + 11: "min_remote_scan_thread_num", + 12: "memory_low_watermark", + 13: "memory_high_watermark", + 14: "read_bytes_per_second", + 15: "remote_read_bytes_per_second", + 16: "tag", +} + +func (p *TWorkloadGroupInfo) IsSetId() bool { + return p.Id != nil +} + +func (p *TWorkloadGroupInfo) IsSetName() bool { + return p.Name != nil +} + +func (p *TWorkloadGroupInfo) IsSetVersion() bool { + return p.Version != nil +} + +func (p *TWorkloadGroupInfo) IsSetCpuShare() bool { + return p.CpuShare != nil +} + +func (p *TWorkloadGroupInfo) IsSetCpuHardLimit() bool { + return p.CpuHardLimit != nil +} + +func (p *TWorkloadGroupInfo) IsSetMemLimit() bool { + return p.MemLimit != nil +} + +func (p *TWorkloadGroupInfo) IsSetEnableMemoryOvercommit() bool { + return p.EnableMemoryOvercommit != nil +} + +func (p *TWorkloadGroupInfo) IsSetEnableCpuHardLimit() bool { + return p.EnableCpuHardLimit != nil +} + +func (p *TWorkloadGroupInfo) IsSetScanThreadNum() bool { + return p.ScanThreadNum != nil +} + +func (p *TWorkloadGroupInfo) IsSetMaxRemoteScanThreadNum() bool { + return p.MaxRemoteScanThreadNum != nil +} + +func (p *TWorkloadGroupInfo) IsSetMinRemoteScanThreadNum() bool { + return p.MinRemoteScanThreadNum != nil +} + +func (p *TWorkloadGroupInfo) IsSetMemoryLowWatermark() bool { + return p.MemoryLowWatermark != nil +} + +func (p *TWorkloadGroupInfo) IsSetMemoryHighWatermark() bool { + return p.MemoryHighWatermark != nil +} + +func (p *TWorkloadGroupInfo) IsSetReadBytesPerSecond() bool { + return p.ReadBytesPerSecond != nil +} + +func (p *TWorkloadGroupInfo) IsSetRemoteReadBytesPerSecond() bool { + return p.RemoteReadBytesPerSecond != nil +} + +func (p *TWorkloadGroupInfo) IsSetTag() bool { + return p.Tag != nil +} + +func (p *TWorkloadGroupInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I32 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I32 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I32 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I64 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.STRING { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadGroupInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Id = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Version = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CpuShare = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.CpuHardLimit = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.MemLimit = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableMemoryOvercommit = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableCpuHardLimit = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField9(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ScanThreadNum = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField10(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.MaxRemoteScanThreadNum = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField11(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.MinRemoteScanThreadNum = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField12(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.MemoryLowWatermark = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField13(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.MemoryHighWatermark = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReadBytesPerSecond = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField15(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RemoteReadBytesPerSecond = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField16(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Tag = _field + return nil +} + +func (p *TWorkloadGroupInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWorkloadGroupInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetCpuShare() { + if err = oprot.WriteFieldBegin("cpu_share", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CpuShare); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCpuHardLimit() { + if err = oprot.WriteFieldBegin("cpu_hard_limit", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.CpuHardLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMemLimit() { + if err = oprot.WriteFieldBegin("mem_limit", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.MemLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableMemoryOvercommit() { + if err = oprot.WriteFieldBegin("enable_memory_overcommit", thrift.BOOL, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableMemoryOvercommit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableCpuHardLimit() { + if err = oprot.WriteFieldBegin("enable_cpu_hard_limit", thrift.BOOL, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableCpuHardLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetScanThreadNum() { + if err = oprot.WriteFieldBegin("scan_thread_num", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ScanThreadNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxRemoteScanThreadNum() { + if err = oprot.WriteFieldBegin("max_remote_scan_thread_num", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.MaxRemoteScanThreadNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetMinRemoteScanThreadNum() { + if err = oprot.WriteFieldBegin("min_remote_scan_thread_num", thrift.I32, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.MinRemoteScanThreadNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetMemoryLowWatermark() { + if err = oprot.WriteFieldBegin("memory_low_watermark", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.MemoryLowWatermark); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetMemoryHighWatermark() { + if err = oprot.WriteFieldBegin("memory_high_watermark", thrift.I32, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.MemoryHighWatermark); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetReadBytesPerSecond() { + if err = oprot.WriteFieldBegin("read_bytes_per_second", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReadBytesPerSecond); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReadBytesPerSecond() { + if err = oprot.WriteFieldBegin("remote_read_bytes_per_second", thrift.I64, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RemoteReadBytesPerSecond); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetTag() { + if err = oprot.WriteFieldBegin("tag", thrift.STRING, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Tag); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWorkloadGroupInfo(%+v)", *p) + +} + +func (p *TWorkloadGroupInfo) DeepEqual(ano *TWorkloadGroupInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Id) { + return false + } + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Version) { + return false + } + if !p.Field4DeepEqual(ano.CpuShare) { + return false + } + if !p.Field5DeepEqual(ano.CpuHardLimit) { + return false + } + if !p.Field6DeepEqual(ano.MemLimit) { + return false + } + if !p.Field7DeepEqual(ano.EnableMemoryOvercommit) { + return false + } + if !p.Field8DeepEqual(ano.EnableCpuHardLimit) { + return false + } + if !p.Field9DeepEqual(ano.ScanThreadNum) { + return false + } + if !p.Field10DeepEqual(ano.MaxRemoteScanThreadNum) { + return false + } + if !p.Field11DeepEqual(ano.MinRemoteScanThreadNum) { + return false + } + if !p.Field12DeepEqual(ano.MemoryLowWatermark) { + return false + } + if !p.Field13DeepEqual(ano.MemoryHighWatermark) { + return false + } + if !p.Field14DeepEqual(ano.ReadBytesPerSecond) { + return false + } + if !p.Field15DeepEqual(ano.RemoteReadBytesPerSecond) { + return false + } + if !p.Field16DeepEqual(ano.Tag) { + return false + } + return true +} + +func (p *TWorkloadGroupInfo) Field1DeepEqual(src *int64) bool { + + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field2DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field3DeepEqual(src *int64) bool { + + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false + } + if *p.Version != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field4DeepEqual(src *int64) bool { + + if p.CpuShare == src { + return true + } else if p.CpuShare == nil || src == nil { + return false + } + if *p.CpuShare != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field5DeepEqual(src *int32) bool { + + if p.CpuHardLimit == src { + return true + } else if p.CpuHardLimit == nil || src == nil { + return false + } + if *p.CpuHardLimit != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field6DeepEqual(src *string) bool { + + if p.MemLimit == src { + return true + } else if p.MemLimit == nil || src == nil { + return false + } + if strings.Compare(*p.MemLimit, *src) != 0 { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field7DeepEqual(src *bool) bool { + + if p.EnableMemoryOvercommit == src { + return true + } else if p.EnableMemoryOvercommit == nil || src == nil { + return false + } + if *p.EnableMemoryOvercommit != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field8DeepEqual(src *bool) bool { + + if p.EnableCpuHardLimit == src { + return true + } else if p.EnableCpuHardLimit == nil || src == nil { + return false + } + if *p.EnableCpuHardLimit != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field9DeepEqual(src *int32) bool { + + if p.ScanThreadNum == src { + return true + } else if p.ScanThreadNum == nil || src == nil { + return false + } + if *p.ScanThreadNum != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field10DeepEqual(src *int32) bool { + + if p.MaxRemoteScanThreadNum == src { + return true + } else if p.MaxRemoteScanThreadNum == nil || src == nil { + return false + } + if *p.MaxRemoteScanThreadNum != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field11DeepEqual(src *int32) bool { + + if p.MinRemoteScanThreadNum == src { + return true + } else if p.MinRemoteScanThreadNum == nil || src == nil { + return false + } + if *p.MinRemoteScanThreadNum != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field12DeepEqual(src *int32) bool { + + if p.MemoryLowWatermark == src { + return true + } else if p.MemoryLowWatermark == nil || src == nil { + return false + } + if *p.MemoryLowWatermark != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field13DeepEqual(src *int32) bool { + + if p.MemoryHighWatermark == src { + return true + } else if p.MemoryHighWatermark == nil || src == nil { + return false + } + if *p.MemoryHighWatermark != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field14DeepEqual(src *int64) bool { + + if p.ReadBytesPerSecond == src { + return true + } else if p.ReadBytesPerSecond == nil || src == nil { + return false + } + if *p.ReadBytesPerSecond != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field15DeepEqual(src *int64) bool { + + if p.RemoteReadBytesPerSecond == src { + return true + } else if p.RemoteReadBytesPerSecond == nil || src == nil { + return false + } + if *p.RemoteReadBytesPerSecond != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field16DeepEqual(src *string) bool { + + if p.Tag == src { + return true + } else if p.Tag == nil || src == nil { + return false + } + if strings.Compare(*p.Tag, *src) != 0 { + return false + } + return true +} + +type TWorkloadCondition struct { + MetricName *TWorkloadMetricType `thrift:"metric_name,1,optional" frugal:"1,optional,TWorkloadMetricType" json:"metric_name,omitempty"` + Op *TCompareOperator `thrift:"op,2,optional" frugal:"2,optional,TCompareOperator" json:"op,omitempty"` + Value *string `thrift:"value,3,optional" frugal:"3,optional,string" json:"value,omitempty"` +} + +func NewTWorkloadCondition() *TWorkloadCondition { + return &TWorkloadCondition{} +} + +func (p *TWorkloadCondition) InitDefault() { +} + +var TWorkloadCondition_MetricName_DEFAULT TWorkloadMetricType + +func (p *TWorkloadCondition) GetMetricName() (v TWorkloadMetricType) { + if !p.IsSetMetricName() { + return TWorkloadCondition_MetricName_DEFAULT + } + return *p.MetricName +} + +var TWorkloadCondition_Op_DEFAULT TCompareOperator + +func (p *TWorkloadCondition) GetOp() (v TCompareOperator) { + if !p.IsSetOp() { + return TWorkloadCondition_Op_DEFAULT + } + return *p.Op +} + +var TWorkloadCondition_Value_DEFAULT string + +func (p *TWorkloadCondition) GetValue() (v string) { + if !p.IsSetValue() { + return TWorkloadCondition_Value_DEFAULT + } + return *p.Value +} +func (p *TWorkloadCondition) SetMetricName(val *TWorkloadMetricType) { + p.MetricName = val +} +func (p *TWorkloadCondition) SetOp(val *TCompareOperator) { + p.Op = val +} +func (p *TWorkloadCondition) SetValue(val *string) { + p.Value = val +} + +var fieldIDToName_TWorkloadCondition = map[int16]string{ + 1: "metric_name", + 2: "op", + 3: "value", +} + +func (p *TWorkloadCondition) IsSetMetricName() bool { + return p.MetricName != nil +} + +func (p *TWorkloadCondition) IsSetOp() bool { + return p.Op != nil +} + +func (p *TWorkloadCondition) IsSetValue() bool { + return p.Value != nil +} + +func (p *TWorkloadCondition) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadCondition[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadCondition) ReadField1(iprot thrift.TProtocol) error { + + var _field *TWorkloadMetricType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TWorkloadMetricType(v) + _field = &tmp + } + p.MetricName = _field + return nil +} +func (p *TWorkloadCondition) ReadField2(iprot thrift.TProtocol) error { + + var _field *TCompareOperator + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TCompareOperator(v) + _field = &tmp + } + p.Op = _field + return nil +} +func (p *TWorkloadCondition) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Value = _field + return nil +} + +func (p *TWorkloadCondition) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWorkloadCondition"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWorkloadCondition) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetMetricName() { + if err = oprot.WriteFieldBegin("metric_name", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.MetricName)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWorkloadCondition) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetOp() { + if err = oprot.WriteFieldBegin("op", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Op)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWorkloadCondition) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err = oprot.WriteFieldBegin("value", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Value); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TWorkloadCondition) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWorkloadCondition(%+v)", *p) + +} + +func (p *TWorkloadCondition) DeepEqual(ano *TWorkloadCondition) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.MetricName) { + return false + } + if !p.Field2DeepEqual(ano.Op) { + return false + } + if !p.Field3DeepEqual(ano.Value) { + return false + } + return true +} + +func (p *TWorkloadCondition) Field1DeepEqual(src *TWorkloadMetricType) bool { + + if p.MetricName == src { + return true + } else if p.MetricName == nil || src == nil { + return false + } + if *p.MetricName != *src { + return false + } + return true +} +func (p *TWorkloadCondition) Field2DeepEqual(src *TCompareOperator) bool { + + if p.Op == src { + return true + } else if p.Op == nil || src == nil { + return false + } + if *p.Op != *src { + return false + } + return true +} +func (p *TWorkloadCondition) Field3DeepEqual(src *string) bool { + + if p.Value == src { + return true + } else if p.Value == nil || src == nil { + return false + } + if strings.Compare(*p.Value, *src) != 0 { + return false + } + return true +} + +type TWorkloadAction struct { + Action *TWorkloadActionType `thrift:"action,1,optional" frugal:"1,optional,TWorkloadActionType" json:"action,omitempty"` + ActionArgs_ *string `thrift:"action_args,2,optional" frugal:"2,optional,string" json:"action_args,omitempty"` +} + +func NewTWorkloadAction() *TWorkloadAction { + return &TWorkloadAction{} +} + +func (p *TWorkloadAction) InitDefault() { +} + +var TWorkloadAction_Action_DEFAULT TWorkloadActionType + +func (p *TWorkloadAction) GetAction() (v TWorkloadActionType) { + if !p.IsSetAction() { + return TWorkloadAction_Action_DEFAULT + } + return *p.Action +} + +var TWorkloadAction_ActionArgs__DEFAULT string + +func (p *TWorkloadAction) GetActionArgs_() (v string) { + if !p.IsSetActionArgs_() { + return TWorkloadAction_ActionArgs__DEFAULT + } + return *p.ActionArgs_ +} +func (p *TWorkloadAction) SetAction(val *TWorkloadActionType) { + p.Action = val +} +func (p *TWorkloadAction) SetActionArgs_(val *string) { + p.ActionArgs_ = val +} + +var fieldIDToName_TWorkloadAction = map[int16]string{ + 1: "action", + 2: "action_args", +} + +func (p *TWorkloadAction) IsSetAction() bool { + return p.Action != nil +} + +func (p *TWorkloadAction) IsSetActionArgs_() bool { + return p.ActionArgs_ != nil +} + +func (p *TWorkloadAction) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadAction[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadAction) ReadField1(iprot thrift.TProtocol) error { + + var _field *TWorkloadActionType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TWorkloadActionType(v) + _field = &tmp + } + p.Action = _field + return nil +} +func (p *TWorkloadAction) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ActionArgs_ = _field + return nil +} + +func (p *TWorkloadAction) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWorkloadAction"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWorkloadAction) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetAction() { + if err = oprot.WriteFieldBegin("action", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Action)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWorkloadAction) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetActionArgs_() { + if err = oprot.WriteFieldBegin("action_args", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ActionArgs_); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWorkloadAction) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWorkloadAction(%+v)", *p) + +} + +func (p *TWorkloadAction) DeepEqual(ano *TWorkloadAction) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Action) { + return false + } + if !p.Field2DeepEqual(ano.ActionArgs_) { + return false + } + return true +} + +func (p *TWorkloadAction) Field1DeepEqual(src *TWorkloadActionType) bool { + + if p.Action == src { + return true + } else if p.Action == nil || src == nil { + return false + } + if *p.Action != *src { + return false + } + return true +} +func (p *TWorkloadAction) Field2DeepEqual(src *string) bool { + + if p.ActionArgs_ == src { + return true + } else if p.ActionArgs_ == nil || src == nil { + return false + } + if strings.Compare(*p.ActionArgs_, *src) != 0 { + return false + } + return true +} + +type TWorkloadSchedPolicy struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Version *int32 `thrift:"version,3,optional" frugal:"3,optional,i32" json:"version,omitempty"` + Priority *int32 `thrift:"priority,4,optional" frugal:"4,optional,i32" json:"priority,omitempty"` + Enabled *bool `thrift:"enabled,5,optional" frugal:"5,optional,bool" json:"enabled,omitempty"` + ConditionList []*TWorkloadCondition `thrift:"condition_list,6,optional" frugal:"6,optional,list" json:"condition_list,omitempty"` + ActionList []*TWorkloadAction `thrift:"action_list,7,optional" frugal:"7,optional,list" json:"action_list,omitempty"` + WgIdList []int64 `thrift:"wg_id_list,8,optional" frugal:"8,optional,list" json:"wg_id_list,omitempty"` +} + +func NewTWorkloadSchedPolicy() *TWorkloadSchedPolicy { + return &TWorkloadSchedPolicy{} +} + +func (p *TWorkloadSchedPolicy) InitDefault() { +} + +var TWorkloadSchedPolicy_Id_DEFAULT int64 + +func (p *TWorkloadSchedPolicy) GetId() (v int64) { + if !p.IsSetId() { + return TWorkloadSchedPolicy_Id_DEFAULT + } + return *p.Id +} + +var TWorkloadSchedPolicy_Name_DEFAULT string + +func (p *TWorkloadSchedPolicy) GetName() (v string) { + if !p.IsSetName() { + return TWorkloadSchedPolicy_Name_DEFAULT + } + return *p.Name +} + +var TWorkloadSchedPolicy_Version_DEFAULT int32 + +func (p *TWorkloadSchedPolicy) GetVersion() (v int32) { + if !p.IsSetVersion() { + return TWorkloadSchedPolicy_Version_DEFAULT + } + return *p.Version +} + +var TWorkloadSchedPolicy_Priority_DEFAULT int32 + +func (p *TWorkloadSchedPolicy) GetPriority() (v int32) { + if !p.IsSetPriority() { + return TWorkloadSchedPolicy_Priority_DEFAULT + } + return *p.Priority +} + +var TWorkloadSchedPolicy_Enabled_DEFAULT bool + +func (p *TWorkloadSchedPolicy) GetEnabled() (v bool) { + if !p.IsSetEnabled() { + return TWorkloadSchedPolicy_Enabled_DEFAULT + } + return *p.Enabled +} + +var TWorkloadSchedPolicy_ConditionList_DEFAULT []*TWorkloadCondition + +func (p *TWorkloadSchedPolicy) GetConditionList() (v []*TWorkloadCondition) { + if !p.IsSetConditionList() { + return TWorkloadSchedPolicy_ConditionList_DEFAULT + } + return p.ConditionList +} + +var TWorkloadSchedPolicy_ActionList_DEFAULT []*TWorkloadAction + +func (p *TWorkloadSchedPolicy) GetActionList() (v []*TWorkloadAction) { + if !p.IsSetActionList() { + return TWorkloadSchedPolicy_ActionList_DEFAULT + } + return p.ActionList +} + +var TWorkloadSchedPolicy_WgIdList_DEFAULT []int64 + +func (p *TWorkloadSchedPolicy) GetWgIdList() (v []int64) { + if !p.IsSetWgIdList() { + return TWorkloadSchedPolicy_WgIdList_DEFAULT + } + return p.WgIdList +} +func (p *TWorkloadSchedPolicy) SetId(val *int64) { + p.Id = val +} +func (p *TWorkloadSchedPolicy) SetName(val *string) { + p.Name = val +} +func (p *TWorkloadSchedPolicy) SetVersion(val *int32) { + p.Version = val +} +func (p *TWorkloadSchedPolicy) SetPriority(val *int32) { + p.Priority = val +} +func (p *TWorkloadSchedPolicy) SetEnabled(val *bool) { + p.Enabled = val +} +func (p *TWorkloadSchedPolicy) SetConditionList(val []*TWorkloadCondition) { + p.ConditionList = val +} +func (p *TWorkloadSchedPolicy) SetActionList(val []*TWorkloadAction) { + p.ActionList = val +} +func (p *TWorkloadSchedPolicy) SetWgIdList(val []int64) { + p.WgIdList = val +} + +var fieldIDToName_TWorkloadSchedPolicy = map[int16]string{ + 1: "id", + 2: "name", + 3: "version", + 4: "priority", + 5: "enabled", + 6: "condition_list", + 7: "action_list", + 8: "wg_id_list", +} + +func (p *TWorkloadSchedPolicy) IsSetId() bool { + return p.Id != nil +} + +func (p *TWorkloadSchedPolicy) IsSetName() bool { + return p.Name != nil +} + +func (p *TWorkloadSchedPolicy) IsSetVersion() bool { + return p.Version != nil +} + +func (p *TWorkloadSchedPolicy) IsSetPriority() bool { + return p.Priority != nil +} + +func (p *TWorkloadSchedPolicy) IsSetEnabled() bool { + return p.Enabled != nil +} + +func (p *TWorkloadSchedPolicy) IsSetConditionList() bool { + return p.ConditionList != nil +} + +func (p *TWorkloadSchedPolicy) IsSetActionList() bool { + return p.ActionList != nil +} + +func (p *TWorkloadSchedPolicy) IsSetWgIdList() bool { + return p.WgIdList != nil +} + +func (p *TWorkloadSchedPolicy) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.LIST { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadSchedPolicy[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Id = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Version = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Priority = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Enabled = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TWorkloadCondition, 0, size) + values := make([]TWorkloadCondition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ConditionList = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TWorkloadAction, 0, size) + values := make([]TWorkloadAction, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ActionList = _field + return nil +} +func (p *TWorkloadSchedPolicy) ReadField8(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.WgIdList = _field + return nil +} + +func (p *TWorkloadSchedPolicy) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWorkloadSchedPolicy"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPriority() { + if err = oprot.WriteFieldBegin("priority", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Priority); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetEnabled() { + if err = oprot.WriteFieldBegin("enabled", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Enabled); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetConditionList() { + if err = oprot.WriteFieldBegin("condition_list", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ConditionList)); err != nil { + return err + } + for _, v := range p.ConditionList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetActionList() { + if err = oprot.WriteFieldBegin("action_list", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ActionList)); err != nil { + return err + } + for _, v := range p.ActionList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetWgIdList() { + if err = oprot.WriteFieldBegin("wg_id_list", thrift.LIST, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.WgIdList)); err != nil { + return err + } + for _, v := range p.WgIdList { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWorkloadSchedPolicy(%+v)", *p) + +} + +func (p *TWorkloadSchedPolicy) DeepEqual(ano *TWorkloadSchedPolicy) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Id) { + return false + } + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Version) { + return false + } + if !p.Field4DeepEqual(ano.Priority) { + return false + } + if !p.Field5DeepEqual(ano.Enabled) { + return false + } + if !p.Field6DeepEqual(ano.ConditionList) { + return false + } + if !p.Field7DeepEqual(ano.ActionList) { + return false + } + if !p.Field8DeepEqual(ano.WgIdList) { + return false + } + return true +} + +func (p *TWorkloadSchedPolicy) Field1DeepEqual(src *int64) bool { + + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { + return false + } + return true +} +func (p *TWorkloadSchedPolicy) Field2DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *TWorkloadSchedPolicy) Field3DeepEqual(src *int32) bool { + + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false + } + if *p.Version != *src { + return false + } + return true +} +func (p *TWorkloadSchedPolicy) Field4DeepEqual(src *int32) bool { + + if p.Priority == src { + return true + } else if p.Priority == nil || src == nil { + return false + } + if *p.Priority != *src { + return false + } + return true +} +func (p *TWorkloadSchedPolicy) Field5DeepEqual(src *bool) bool { + + if p.Enabled == src { + return true + } else if p.Enabled == nil || src == nil { + return false + } + if *p.Enabled != *src { + return false + } + return true +} +func (p *TWorkloadSchedPolicy) Field6DeepEqual(src []*TWorkloadCondition) bool { + + if len(p.ConditionList) != len(src) { + return false + } + for i, v := range p.ConditionList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TWorkloadSchedPolicy) Field7DeepEqual(src []*TWorkloadAction) bool { + + if len(p.ActionList) != len(src) { + return false + } + for i, v := range p.ActionList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TWorkloadSchedPolicy) Field8DeepEqual(src []int64) bool { + + if len(p.WgIdList) != len(src) { + return false + } + for i, v := range p.WgIdList { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TopicInfo struct { + WorkloadGroupInfo *TWorkloadGroupInfo `thrift:"workload_group_info,1,optional" frugal:"1,optional,TWorkloadGroupInfo" json:"workload_group_info,omitempty"` + WorkloadSchedPolicy *TWorkloadSchedPolicy `thrift:"workload_sched_policy,2,optional" frugal:"2,optional,TWorkloadSchedPolicy" json:"workload_sched_policy,omitempty"` +} + +func NewTopicInfo() *TopicInfo { + return &TopicInfo{} +} + +func (p *TopicInfo) InitDefault() { +} + +var TopicInfo_WorkloadGroupInfo_DEFAULT *TWorkloadGroupInfo + +func (p *TopicInfo) GetWorkloadGroupInfo() (v *TWorkloadGroupInfo) { + if !p.IsSetWorkloadGroupInfo() { + return TopicInfo_WorkloadGroupInfo_DEFAULT + } + return p.WorkloadGroupInfo +} + +var TopicInfo_WorkloadSchedPolicy_DEFAULT *TWorkloadSchedPolicy + +func (p *TopicInfo) GetWorkloadSchedPolicy() (v *TWorkloadSchedPolicy) { + if !p.IsSetWorkloadSchedPolicy() { + return TopicInfo_WorkloadSchedPolicy_DEFAULT + } + return p.WorkloadSchedPolicy +} +func (p *TopicInfo) SetWorkloadGroupInfo(val *TWorkloadGroupInfo) { + p.WorkloadGroupInfo = val +} +func (p *TopicInfo) SetWorkloadSchedPolicy(val *TWorkloadSchedPolicy) { + p.WorkloadSchedPolicy = val +} + +var fieldIDToName_TopicInfo = map[int16]string{ + 1: "workload_group_info", + 2: "workload_sched_policy", +} + +func (p *TopicInfo) IsSetWorkloadGroupInfo() bool { + return p.WorkloadGroupInfo != nil +} + +func (p *TopicInfo) IsSetWorkloadSchedPolicy() bool { + return p.WorkloadSchedPolicy != nil +} + +func (p *TopicInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TopicInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TopicInfo) ReadField1(iprot thrift.TProtocol) error { + _field := NewTWorkloadGroupInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.WorkloadGroupInfo = _field + return nil +} +func (p *TopicInfo) ReadField2(iprot thrift.TProtocol) error { + _field := NewTWorkloadSchedPolicy() + if err := _field.Read(iprot); err != nil { + return err + } + p.WorkloadSchedPolicy = _field + return nil +} + +func (p *TopicInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TopicInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TopicInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadGroupInfo() { + if err = oprot.WriteFieldBegin("workload_group_info", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.WorkloadGroupInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TopicInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadSchedPolicy() { + if err = oprot.WriteFieldBegin("workload_sched_policy", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.WorkloadSchedPolicy.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TopicInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TopicInfo(%+v)", *p) + +} + +func (p *TopicInfo) DeepEqual(ano *TopicInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WorkloadGroupInfo) { + return false + } + if !p.Field2DeepEqual(ano.WorkloadSchedPolicy) { + return false + } + return true +} + +func (p *TopicInfo) Field1DeepEqual(src *TWorkloadGroupInfo) bool { + + if !p.WorkloadGroupInfo.DeepEqual(src) { + return false + } + return true +} +func (p *TopicInfo) Field2DeepEqual(src *TWorkloadSchedPolicy) bool { + + if !p.WorkloadSchedPolicy.DeepEqual(src) { + return false + } + return true +} + +type TPublishTopicRequest struct { + TopicMap map[TTopicInfoType][]*TopicInfo `thrift:"topic_map,1,required" frugal:"1,required,map>" json:"topic_map"` +} + +func NewTPublishTopicRequest() *TPublishTopicRequest { + return &TPublishTopicRequest{} +} + +func (p *TPublishTopicRequest) InitDefault() { +} + +func (p *TPublishTopicRequest) GetTopicMap() (v map[TTopicInfoType][]*TopicInfo) { + return p.TopicMap +} +func (p *TPublishTopicRequest) SetTopicMap(val map[TTopicInfoType][]*TopicInfo) { + p.TopicMap = val +} + +var fieldIDToName_TPublishTopicRequest = map[int16]string{ + 1: "topic_map", +} + +func (p *TPublishTopicRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetTopicMap bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTopicMap = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTopicMap { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicRequest[fieldId])) +} + +func (p *TPublishTopicRequest) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[TTopicInfoType][]*TopicInfo, size) + for i := 0; i < size; i++ { + var _key TTopicInfoType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = TTopicInfoType(v) + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*TopicInfo, 0, size) + values := make([]TopicInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TopicMap = _field + return nil +} + +func (p *TPublishTopicRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPublishTopicRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPublishTopicRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("topic_map", thrift.MAP, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.TopicMap)); err != nil { + return err + } + for k, v := range p.TopicMap { + if err := oprot.WriteI32(int32(k)); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPublishTopicRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPublishTopicRequest(%+v)", *p) + +} + +func (p *TPublishTopicRequest) DeepEqual(ano *TPublishTopicRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TopicMap) { + return false + } + return true +} + +func (p *TPublishTopicRequest) Field1DeepEqual(src map[TTopicInfoType][]*TopicInfo) bool { + + if len(p.TopicMap) != len(src) { + return false + } + for k, v := range p.TopicMap { + _src := src[k] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true +} + +type TPublishTopicResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +} + +func NewTPublishTopicResult_() *TPublishTopicResult_ { + return &TPublishTopicResult_{} +} + +func (p *TPublishTopicResult_) InitDefault() { +} + +var TPublishTopicResult__Status_DEFAULT *status.TStatus + +func (p *TPublishTopicResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TPublishTopicResult__Status_DEFAULT + } + return p.Status +} +func (p *TPublishTopicResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TPublishTopicResult_ = map[int16]string{ + 1: "status", +} + +func (p *TPublishTopicResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TPublishTopicResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicResult_[fieldId])) +} + +func (p *TPublishTopicResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TPublishTopicResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPublishTopicResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPublishTopicResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPublishTopicResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPublishTopicResult_(%+v)", *p) + +} + +func (p *TPublishTopicResult_) DeepEqual(ano *TPublishTopicResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TPublishTopicResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TGetRealtimeExecStatusRequest struct { + Id *types.TUniqueId `thrift:"id,1,optional" frugal:"1,optional,types.TUniqueId" json:"id,omitempty"` +} + +func NewTGetRealtimeExecStatusRequest() *TGetRealtimeExecStatusRequest { + return &TGetRealtimeExecStatusRequest{} +} + +func (p *TGetRealtimeExecStatusRequest) InitDefault() { +} + +var TGetRealtimeExecStatusRequest_Id_DEFAULT *types.TUniqueId + +func (p *TGetRealtimeExecStatusRequest) GetId() (v *types.TUniqueId) { + if !p.IsSetId() { + return TGetRealtimeExecStatusRequest_Id_DEFAULT + } + return p.Id +} +func (p *TGetRealtimeExecStatusRequest) SetId(val *types.TUniqueId) { + p.Id = val +} + +var fieldIDToName_TGetRealtimeExecStatusRequest = map[int16]string{ + 1: "id", +} + +func (p *TGetRealtimeExecStatusRequest) IsSetId() bool { + return p.Id != nil +} + +func (p *TGetRealtimeExecStatusRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusRequest) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.Id = _field + return nil +} + +func (p *TGetRealtimeExecStatusRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetRealtimeExecStatusRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Id.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetRealtimeExecStatusRequest(%+v)", *p) + +} + +func (p *TGetRealtimeExecStatusRequest) DeepEqual(ano *TGetRealtimeExecStatusRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Id) { + return false + } + return true +} + +func (p *TGetRealtimeExecStatusRequest) Field1DeepEqual(src *types.TUniqueId) bool { + + if !p.Id.DeepEqual(src) { + return false + } + return true +} + +type TGetRealtimeExecStatusResponse struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + ReportExecStatusParams *frontendservice.TReportExecStatusParams `thrift:"report_exec_status_params,2,optional" frugal:"2,optional,frontendservice.TReportExecStatusParams" json:"report_exec_status_params,omitempty"` +} + +func NewTGetRealtimeExecStatusResponse() *TGetRealtimeExecStatusResponse { + return &TGetRealtimeExecStatusResponse{} +} + +func (p *TGetRealtimeExecStatusResponse) InitDefault() { +} + +var TGetRealtimeExecStatusResponse_Status_DEFAULT *status.TStatus + +func (p *TGetRealtimeExecStatusResponse) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetRealtimeExecStatusResponse_Status_DEFAULT + } + return p.Status +} + +var TGetRealtimeExecStatusResponse_ReportExecStatusParams_DEFAULT *frontendservice.TReportExecStatusParams + +func (p *TGetRealtimeExecStatusResponse) GetReportExecStatusParams() (v *frontendservice.TReportExecStatusParams) { + if !p.IsSetReportExecStatusParams() { + return TGetRealtimeExecStatusResponse_ReportExecStatusParams_DEFAULT + } + return p.ReportExecStatusParams +} +func (p *TGetRealtimeExecStatusResponse) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetRealtimeExecStatusResponse) SetReportExecStatusParams(val *frontendservice.TReportExecStatusParams) { + p.ReportExecStatusParams = val +} + +var fieldIDToName_TGetRealtimeExecStatusResponse = map[int16]string{ + 1: "status", + 2: "report_exec_status_params", +} + +func (p *TGetRealtimeExecStatusResponse) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetRealtimeExecStatusResponse) IsSetReportExecStatusParams() bool { + return p.ReportExecStatusParams != nil +} + +func (p *TGetRealtimeExecStatusResponse) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusResponse) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetRealtimeExecStatusResponse) ReadField2(iprot thrift.TProtocol) error { + _field := frontendservice.NewTReportExecStatusParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.ReportExecStatusParams = _field + return nil +} + +func (p *TGetRealtimeExecStatusResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetRealtimeExecStatusResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusResponse) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetReportExecStatusParams() { + if err = oprot.WriteFieldBegin("report_exec_status_params", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.ReportExecStatusParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetRealtimeExecStatusResponse(%+v)", *p) + +} + +func (p *TGetRealtimeExecStatusResponse) DeepEqual(ano *TGetRealtimeExecStatusResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.ReportExecStatusParams) { + return false + } + return true +} + +func (p *TGetRealtimeExecStatusResponse) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TGetRealtimeExecStatusResponse) Field2DeepEqual(src *frontendservice.TReportExecStatusParams) bool { + + if !p.ReportExecStatusParams.DeepEqual(src) { + return false + } + return true +} + +type BackendService interface { + ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error) + + CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error) + + TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error) + + SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error) + + MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error) + + ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error) + + PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error) + + SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error) + + GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error) + + EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error) + + GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error) + + GetTrashUsedCapacity(ctx context.Context) (r int64, err error) + + GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error) + + SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error) + + OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error) + + GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error) + + CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error) + + GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error) + + CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error) + + WarmUpCacheAsync(ctx context.Context, request *TWarmUpCacheAsyncRequest) (r *TWarmUpCacheAsyncResponse, err error) + + CheckWarmUpCacheAsync(ctx context.Context, request *TCheckWarmUpCacheAsyncRequest) (r *TCheckWarmUpCacheAsyncResponse, err error) + + SyncLoadForTablets(ctx context.Context, request *TSyncLoadForTabletsRequest) (r *TSyncLoadForTabletsResponse, err error) + + GetTopNHotPartitions(ctx context.Context, request *TGetTopNHotPartitionsRequest) (r *TGetTopNHotPartitionsResponse, err error) + + WarmUpTablets(ctx context.Context, request *TWarmUpTabletsRequest) (r *TWarmUpTabletsResponse, err error) + + IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error) + + QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *TQueryIngestBinlogRequest) (r *TQueryIngestBinlogResult_, err error) + + PublishTopicInfo(ctx context.Context, topicRequest *TPublishTopicRequest) (r *TPublishTopicResult_, err error) + + GetRealtimeExecStatus(ctx context.Context, request *TGetRealtimeExecStatusRequest) (r *TGetRealtimeExecStatusResponse, err error) +} + +type BackendServiceClient struct { + c thrift.TClient +} + +func NewBackendServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BackendServiceClient { + return &BackendServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewBackendServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BackendServiceClient { + return &BackendServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewBackendServiceClient(c thrift.TClient) *BackendServiceClient { + return &BackendServiceClient{ + c: c, + } +} + +func (p *BackendServiceClient) Client_() thrift.TClient { + return p.c +} + +func (p *BackendServiceClient) ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error) { + var _args BackendServiceExecPlanFragmentArgs + _args.Params = params + var _result BackendServiceExecPlanFragmentResult + if err = p.Client_().Call(ctx, "exec_plan_fragment", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error) { + var _args BackendServiceCancelPlanFragmentArgs + _args.Params = params + var _result BackendServiceCancelPlanFragmentResult + if err = p.Client_().Call(ctx, "cancel_plan_fragment", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error) { + var _args BackendServiceTransmitDataArgs + _args.Params = params + var _result BackendServiceTransmitDataResult + if err = p.Client_().Call(ctx, "transmit_data", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error) { + var _args BackendServiceSubmitTasksArgs + _args.Tasks = tasks + var _result BackendServiceSubmitTasksResult + if err = p.Client_().Call(ctx, "submit_tasks", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error) { + var _args BackendServiceMakeSnapshotArgs + _args.SnapshotRequest = snapshotRequest + var _result BackendServiceMakeSnapshotResult + if err = p.Client_().Call(ctx, "make_snapshot", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error) { + var _args BackendServiceReleaseSnapshotArgs + _args.SnapshotPath = snapshotPath + var _result BackendServiceReleaseSnapshotResult + if err = p.Client_().Call(ctx, "release_snapshot", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error) { + var _args BackendServicePublishClusterStateArgs + _args.Request = request + var _result BackendServicePublishClusterStateResult + if err = p.Client_().Call(ctx, "publish_cluster_state", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error) { + var _args BackendServiceSubmitExportTaskArgs + _args.Request = request + var _result BackendServiceSubmitExportTaskResult + if err = p.Client_().Call(ctx, "submit_export_task", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error) { + var _args BackendServiceGetExportStatusArgs + _args.TaskId = taskId + var _result BackendServiceGetExportStatusResult + if err = p.Client_().Call(ctx, "get_export_status", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error) { + var _args BackendServiceEraseExportTaskArgs + _args.TaskId = taskId + var _result BackendServiceEraseExportTaskResult + if err = p.Client_().Call(ctx, "erase_export_task", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error) { + var _args BackendServiceGetTabletStatArgs + var _result BackendServiceGetTabletStatResult + if err = p.Client_().Call(ctx, "get_tablet_stat", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetTrashUsedCapacity(ctx context.Context) (r int64, err error) { + var _args BackendServiceGetTrashUsedCapacityArgs + var _result BackendServiceGetTrashUsedCapacityResult + if err = p.Client_().Call(ctx, "get_trash_used_capacity", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error) { + var _args BackendServiceGetDiskTrashUsedCapacityArgs + var _result BackendServiceGetDiskTrashUsedCapacityResult + if err = p.Client_().Call(ctx, "get_disk_trash_used_capacity", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error) { + var _args BackendServiceSubmitRoutineLoadTaskArgs + _args.Tasks = tasks + var _result BackendServiceSubmitRoutineLoadTaskResult + if err = p.Client_().Call(ctx, "submit_routine_load_task", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error) { + var _args BackendServiceOpenScannerArgs + _args.Params = params + var _result BackendServiceOpenScannerResult + if err = p.Client_().Call(ctx, "open_scanner", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error) { + var _args BackendServiceGetNextArgs + _args.Params = params + var _result BackendServiceGetNextResult + if err = p.Client_().Call(ctx, "get_next", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error) { + var _args BackendServiceCloseScannerArgs + _args.Params = params + var _result BackendServiceCloseScannerResult + if err = p.Client_().Call(ctx, "close_scanner", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error) { var _args BackendServiceGetStreamLoadRecordArgs _args.LastStreamRecordTime = lastStreamRecordTime var _result BackendServiceGetStreamLoadRecordResult if err = p.Client_().Call(ctx, "get_stream_load_record", &_args, &_result); err != nil { return } - return _result.GetSuccess(), nil + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error) { + var _args BackendServiceCheckStorageFormatArgs + var _result BackendServiceCheckStorageFormatResult + if err = p.Client_().Call(ctx, "check_storage_format", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) WarmUpCacheAsync(ctx context.Context, request *TWarmUpCacheAsyncRequest) (r *TWarmUpCacheAsyncResponse, err error) { + var _args BackendServiceWarmUpCacheAsyncArgs + _args.Request = request + var _result BackendServiceWarmUpCacheAsyncResult + if err = p.Client_().Call(ctx, "warm_up_cache_async", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) CheckWarmUpCacheAsync(ctx context.Context, request *TCheckWarmUpCacheAsyncRequest) (r *TCheckWarmUpCacheAsyncResponse, err error) { + var _args BackendServiceCheckWarmUpCacheAsyncArgs + _args.Request = request + var _result BackendServiceCheckWarmUpCacheAsyncResult + if err = p.Client_().Call(ctx, "check_warm_up_cache_async", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) SyncLoadForTablets(ctx context.Context, request *TSyncLoadForTabletsRequest) (r *TSyncLoadForTabletsResponse, err error) { + var _args BackendServiceSyncLoadForTabletsArgs + _args.Request = request + var _result BackendServiceSyncLoadForTabletsResult + if err = p.Client_().Call(ctx, "sync_load_for_tablets", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetTopNHotPartitions(ctx context.Context, request *TGetTopNHotPartitionsRequest) (r *TGetTopNHotPartitionsResponse, err error) { + var _args BackendServiceGetTopNHotPartitionsArgs + _args.Request = request + var _result BackendServiceGetTopNHotPartitionsResult + if err = p.Client_().Call(ctx, "get_top_n_hot_partitions", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) WarmUpTablets(ctx context.Context, request *TWarmUpTabletsRequest) (r *TWarmUpTabletsResponse, err error) { + var _args BackendServiceWarmUpTabletsArgs + _args.Request = request + var _result BackendServiceWarmUpTabletsResult + if err = p.Client_().Call(ctx, "warm_up_tablets", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error) { + var _args BackendServiceIngestBinlogArgs + _args.IngestBinlogRequest = ingestBinlogRequest + var _result BackendServiceIngestBinlogResult + if err = p.Client_().Call(ctx, "ingest_binlog", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *TQueryIngestBinlogRequest) (r *TQueryIngestBinlogResult_, err error) { + var _args BackendServiceQueryIngestBinlogArgs + _args.QueryIngestBinlogRequest = queryIngestBinlogRequest + var _result BackendServiceQueryIngestBinlogResult + if err = p.Client_().Call(ctx, "query_ingest_binlog", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) PublishTopicInfo(ctx context.Context, topicRequest *TPublishTopicRequest) (r *TPublishTopicResult_, err error) { + var _args BackendServicePublishTopicInfoArgs + _args.TopicRequest = topicRequest + var _result BackendServicePublishTopicInfoResult + if err = p.Client_().Call(ctx, "publish_topic_info", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *BackendServiceClient) GetRealtimeExecStatus(ctx context.Context, request *TGetRealtimeExecStatusRequest) (r *TGetRealtimeExecStatusResponse, err error) { + var _args BackendServiceGetRealtimeExecStatusArgs + _args.Request = request + var _result BackendServiceGetRealtimeExecStatusResult + if err = p.Client_().Call(ctx, "get_realtime_exec_status", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +type BackendServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler BackendService +} + +func (p *BackendServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *BackendServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *BackendServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewBackendServiceProcessor(handler BackendService) *BackendServiceProcessor { + self := &BackendServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self.AddToProcessorMap("exec_plan_fragment", &backendServiceProcessorExecPlanFragment{handler: handler}) + self.AddToProcessorMap("cancel_plan_fragment", &backendServiceProcessorCancelPlanFragment{handler: handler}) + self.AddToProcessorMap("transmit_data", &backendServiceProcessorTransmitData{handler: handler}) + self.AddToProcessorMap("submit_tasks", &backendServiceProcessorSubmitTasks{handler: handler}) + self.AddToProcessorMap("make_snapshot", &backendServiceProcessorMakeSnapshot{handler: handler}) + self.AddToProcessorMap("release_snapshot", &backendServiceProcessorReleaseSnapshot{handler: handler}) + self.AddToProcessorMap("publish_cluster_state", &backendServiceProcessorPublishClusterState{handler: handler}) + self.AddToProcessorMap("submit_export_task", &backendServiceProcessorSubmitExportTask{handler: handler}) + self.AddToProcessorMap("get_export_status", &backendServiceProcessorGetExportStatus{handler: handler}) + self.AddToProcessorMap("erase_export_task", &backendServiceProcessorEraseExportTask{handler: handler}) + self.AddToProcessorMap("get_tablet_stat", &backendServiceProcessorGetTabletStat{handler: handler}) + self.AddToProcessorMap("get_trash_used_capacity", &backendServiceProcessorGetTrashUsedCapacity{handler: handler}) + self.AddToProcessorMap("get_disk_trash_used_capacity", &backendServiceProcessorGetDiskTrashUsedCapacity{handler: handler}) + self.AddToProcessorMap("submit_routine_load_task", &backendServiceProcessorSubmitRoutineLoadTask{handler: handler}) + self.AddToProcessorMap("open_scanner", &backendServiceProcessorOpenScanner{handler: handler}) + self.AddToProcessorMap("get_next", &backendServiceProcessorGetNext{handler: handler}) + self.AddToProcessorMap("close_scanner", &backendServiceProcessorCloseScanner{handler: handler}) + self.AddToProcessorMap("get_stream_load_record", &backendServiceProcessorGetStreamLoadRecord{handler: handler}) + self.AddToProcessorMap("check_storage_format", &backendServiceProcessorCheckStorageFormat{handler: handler}) + self.AddToProcessorMap("warm_up_cache_async", &backendServiceProcessorWarmUpCacheAsync{handler: handler}) + self.AddToProcessorMap("check_warm_up_cache_async", &backendServiceProcessorCheckWarmUpCacheAsync{handler: handler}) + self.AddToProcessorMap("sync_load_for_tablets", &backendServiceProcessorSyncLoadForTablets{handler: handler}) + self.AddToProcessorMap("get_top_n_hot_partitions", &backendServiceProcessorGetTopNHotPartitions{handler: handler}) + self.AddToProcessorMap("warm_up_tablets", &backendServiceProcessorWarmUpTablets{handler: handler}) + self.AddToProcessorMap("ingest_binlog", &backendServiceProcessorIngestBinlog{handler: handler}) + self.AddToProcessorMap("query_ingest_binlog", &backendServiceProcessorQueryIngestBinlog{handler: handler}) + self.AddToProcessorMap("publish_topic_info", &backendServiceProcessorPublishTopicInfo{handler: handler}) + self.AddToProcessorMap("get_realtime_exec_status", &backendServiceProcessorGetRealtimeExecStatus{handler: handler}) + return self +} +func (p *BackendServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x +} + +type backendServiceProcessorExecPlanFragment struct { + handler BackendService +} + +func (p *backendServiceProcessorExecPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceExecPlanFragmentArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceExecPlanFragmentResult{} + var retval *palointernalservice.TExecPlanFragmentResult_ + if retval, err2 = p.handler.ExecPlanFragment(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing exec_plan_fragment: "+err2.Error()) + oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("exec_plan_fragment", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorCancelPlanFragment struct { + handler BackendService +} + +func (p *backendServiceProcessorCancelPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceCancelPlanFragmentArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceCancelPlanFragmentResult{} + var retval *palointernalservice.TCancelPlanFragmentResult_ + if retval, err2 = p.handler.CancelPlanFragment(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing cancel_plan_fragment: "+err2.Error()) + oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("cancel_plan_fragment", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorTransmitData struct { + handler BackendService +} + +func (p *backendServiceProcessorTransmitData) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceTransmitDataArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceTransmitDataResult{} + var retval *palointernalservice.TTransmitDataResult_ + if retval, err2 = p.handler.TransmitData(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing transmit_data: "+err2.Error()) + oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("transmit_data", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorSubmitTasks struct { + handler BackendService +} + +func (p *backendServiceProcessorSubmitTasks) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceSubmitTasksArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceSubmitTasksResult{} + var retval *agentservice.TAgentResult_ + if retval, err2 = p.handler.SubmitTasks(ctx, args.Tasks); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_tasks: "+err2.Error()) + oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("submit_tasks", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorMakeSnapshot struct { + handler BackendService +} + +func (p *backendServiceProcessorMakeSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceMakeSnapshotArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceMakeSnapshotResult{} + var retval *agentservice.TAgentResult_ + if retval, err2 = p.handler.MakeSnapshot(ctx, args.SnapshotRequest); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing make_snapshot: "+err2.Error()) + oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("make_snapshot", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorReleaseSnapshot struct { + handler BackendService +} + +func (p *backendServiceProcessorReleaseSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceReleaseSnapshotArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceReleaseSnapshotResult{} + var retval *agentservice.TAgentResult_ + if retval, err2 = p.handler.ReleaseSnapshot(ctx, args.SnapshotPath); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing release_snapshot: "+err2.Error()) + oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("release_snapshot", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorPublishClusterState struct { + handler BackendService +} + +func (p *backendServiceProcessorPublishClusterState) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServicePublishClusterStateArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServicePublishClusterStateResult{} + var retval *agentservice.TAgentResult_ + if retval, err2 = p.handler.PublishClusterState(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing publish_cluster_state: "+err2.Error()) + oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("publish_cluster_state", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorSubmitExportTask struct { + handler BackendService +} + +func (p *backendServiceProcessorSubmitExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceSubmitExportTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceSubmitExportTaskResult{} + var retval *status.TStatus + if retval, err2 = p.handler.SubmitExportTask(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_export_task: "+err2.Error()) + oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("submit_export_task", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetExportStatus struct { + handler BackendService +} + +func (p *backendServiceProcessorGetExportStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetExportStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetExportStatusResult{} + var retval *palointernalservice.TExportStatusResult_ + if retval, err2 = p.handler.GetExportStatus(ctx, args.TaskId); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_export_status: "+err2.Error()) + oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_export_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorEraseExportTask struct { + handler BackendService +} + +func (p *backendServiceProcessorEraseExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceEraseExportTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceEraseExportTaskResult{} + var retval *status.TStatus + if retval, err2 = p.handler.EraseExportTask(ctx, args.TaskId); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing erase_export_task: "+err2.Error()) + oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("erase_export_task", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetTabletStat struct { + handler BackendService +} + +func (p *backendServiceProcessorGetTabletStat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetTabletStatArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetTabletStatResult{} + var retval *TTabletStatResult_ + if retval, err2 = p.handler.GetTabletStat(ctx); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_tablet_stat: "+err2.Error()) + oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_tablet_stat", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetTrashUsedCapacity struct { + handler BackendService +} + +func (p *backendServiceProcessorGetTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetTrashUsedCapacityArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetTrashUsedCapacityResult{} + var retval int64 + if retval, err2 = p.handler.GetTrashUsedCapacity(ctx); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_trash_used_capacity: "+err2.Error()) + oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = &retval + } + if err2 = oprot.WriteMessageBegin("get_trash_used_capacity", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetDiskTrashUsedCapacity struct { + handler BackendService +} + +func (p *backendServiceProcessorGetDiskTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetDiskTrashUsedCapacityArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetDiskTrashUsedCapacityResult{} + var retval []*TDiskTrashInfo + if retval, err2 = p.handler.GetDiskTrashUsedCapacity(ctx); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_disk_trash_used_capacity: "+err2.Error()) + oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorSubmitRoutineLoadTask struct { + handler BackendService +} + +func (p *backendServiceProcessorSubmitRoutineLoadTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceSubmitRoutineLoadTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceSubmitRoutineLoadTaskResult{} + var retval *status.TStatus + if retval, err2 = p.handler.SubmitRoutineLoadTask(ctx, args.Tasks); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_routine_load_task: "+err2.Error()) + oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("submit_routine_load_task", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorOpenScanner struct { + handler BackendService +} + +func (p *backendServiceProcessorOpenScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceOpenScannerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceOpenScannerResult{} + var retval *dorisexternalservice.TScanOpenResult_ + if retval, err2 = p.handler.OpenScanner(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing open_scanner: "+err2.Error()) + oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("open_scanner", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetNext struct { + handler BackendService +} + +func (p *backendServiceProcessorGetNext) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetNextArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetNextResult{} + var retval *dorisexternalservice.TScanBatchResult_ + if retval, err2 = p.handler.GetNext(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_next: "+err2.Error()) + oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_next", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorCloseScanner struct { + handler BackendService +} + +func (p *backendServiceProcessorCloseScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceCloseScannerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceCloseScannerResult{} + var retval *dorisexternalservice.TScanCloseResult_ + if retval, err2 = p.handler.CloseScanner(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing close_scanner: "+err2.Error()) + oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("close_scanner", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetStreamLoadRecord struct { + handler BackendService +} + +func (p *backendServiceProcessorGetStreamLoadRecord) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetStreamLoadRecordArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetStreamLoadRecordResult{} + var retval *TStreamLoadRecordResult_ + if retval, err2 = p.handler.GetStreamLoadRecord(ctx, args.LastStreamRecordTime); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_stream_load_record: "+err2.Error()) + oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_stream_load_record", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorCheckStorageFormat struct { + handler BackendService +} + +func (p *backendServiceProcessorCheckStorageFormat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceCheckStorageFormatArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceCheckStorageFormatResult{} + var retval *TCheckStorageFormatResult_ + if retval, err2 = p.handler.CheckStorageFormat(ctx); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_storage_format: "+err2.Error()) + oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("check_storage_format", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorWarmUpCacheAsync struct { + handler BackendService +} + +func (p *backendServiceProcessorWarmUpCacheAsync) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceWarmUpCacheAsyncArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("warm_up_cache_async", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceWarmUpCacheAsyncResult{} + var retval *TWarmUpCacheAsyncResponse + if retval, err2 = p.handler.WarmUpCacheAsync(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing warm_up_cache_async: "+err2.Error()) + oprot.WriteMessageBegin("warm_up_cache_async", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("warm_up_cache_async", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorCheckWarmUpCacheAsync struct { + handler BackendService +} + +func (p *backendServiceProcessorCheckWarmUpCacheAsync) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceCheckWarmUpCacheAsyncArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("check_warm_up_cache_async", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceCheckWarmUpCacheAsyncResult{} + var retval *TCheckWarmUpCacheAsyncResponse + if retval, err2 = p.handler.CheckWarmUpCacheAsync(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_warm_up_cache_async: "+err2.Error()) + oprot.WriteMessageBegin("check_warm_up_cache_async", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("check_warm_up_cache_async", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorSyncLoadForTablets struct { + handler BackendService +} + +func (p *backendServiceProcessorSyncLoadForTablets) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceSyncLoadForTabletsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("sync_load_for_tablets", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceSyncLoadForTabletsResult{} + var retval *TSyncLoadForTabletsResponse + if retval, err2 = p.handler.SyncLoadForTablets(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing sync_load_for_tablets: "+err2.Error()) + oprot.WriteMessageBegin("sync_load_for_tablets", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("sync_load_for_tablets", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetTopNHotPartitions struct { + handler BackendService +} + +func (p *backendServiceProcessorGetTopNHotPartitions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetTopNHotPartitionsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_top_n_hot_partitions", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetTopNHotPartitionsResult{} + var retval *TGetTopNHotPartitionsResponse + if retval, err2 = p.handler.GetTopNHotPartitions(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_top_n_hot_partitions: "+err2.Error()) + oprot.WriteMessageBegin("get_top_n_hot_partitions", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_top_n_hot_partitions", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorWarmUpTablets struct { + handler BackendService +} + +func (p *backendServiceProcessorWarmUpTablets) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceWarmUpTabletsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("warm_up_tablets", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceWarmUpTabletsResult{} + var retval *TWarmUpTabletsResponse + if retval, err2 = p.handler.WarmUpTablets(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing warm_up_tablets: "+err2.Error()) + oprot.WriteMessageBegin("warm_up_tablets", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("warm_up_tablets", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorIngestBinlog struct { + handler BackendService +} + +func (p *backendServiceProcessorIngestBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceIngestBinlogArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceIngestBinlogResult{} + var retval *TIngestBinlogResult_ + if retval, err2 = p.handler.IngestBinlog(ctx, args.IngestBinlogRequest); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ingest_binlog: "+err2.Error()) + oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ingest_binlog", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorQueryIngestBinlog struct { + handler BackendService +} + +func (p *backendServiceProcessorQueryIngestBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceQueryIngestBinlogArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_ingest_binlog", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceQueryIngestBinlogResult{} + var retval *TQueryIngestBinlogResult_ + if retval, err2 = p.handler.QueryIngestBinlog(ctx, args.QueryIngestBinlogRequest); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_ingest_binlog: "+err2.Error()) + oprot.WriteMessageBegin("query_ingest_binlog", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_ingest_binlog", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorPublishTopicInfo struct { + handler BackendService +} + +func (p *backendServiceProcessorPublishTopicInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServicePublishTopicInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("publish_topic_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServicePublishTopicInfoResult{} + var retval *TPublishTopicResult_ + if retval, err2 = p.handler.PublishTopicInfo(ctx, args.TopicRequest); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing publish_topic_info: "+err2.Error()) + oprot.WriteMessageBegin("publish_topic_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("publish_topic_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type backendServiceProcessorGetRealtimeExecStatus struct { + handler BackendService +} + +func (p *backendServiceProcessorGetRealtimeExecStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BackendServiceGetRealtimeExecStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_realtime_exec_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := BackendServiceGetRealtimeExecStatusResult{} + var retval *TGetRealtimeExecStatusResponse + if retval, err2 = p.handler.GetRealtimeExecStatus(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_realtime_exec_status: "+err2.Error()) + oprot.WriteMessageBegin("get_realtime_exec_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_realtime_exec_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type BackendServiceExecPlanFragmentArgs struct { + Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TExecPlanFragmentParams" json:"params"` +} + +func NewBackendServiceExecPlanFragmentArgs() *BackendServiceExecPlanFragmentArgs { + return &BackendServiceExecPlanFragmentArgs{} +} + +func (p *BackendServiceExecPlanFragmentArgs) InitDefault() { +} + +var BackendServiceExecPlanFragmentArgs_Params_DEFAULT *palointernalservice.TExecPlanFragmentParams + +func (p *BackendServiceExecPlanFragmentArgs) GetParams() (v *palointernalservice.TExecPlanFragmentParams) { + if !p.IsSetParams() { + return BackendServiceExecPlanFragmentArgs_Params_DEFAULT + } + return p.Params +} +func (p *BackendServiceExecPlanFragmentArgs) SetParams(val *palointernalservice.TExecPlanFragmentParams) { + p.Params = val +} + +var fieldIDToName_BackendServiceExecPlanFragmentArgs = map[int16]string{ + 1: "params", +} + +func (p *BackendServiceExecPlanFragmentArgs) IsSetParams() bool { + return p.Params != nil +} + +func (p *BackendServiceExecPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTExecPlanFragmentParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Params = _field + return nil +} + +func (p *BackendServiceExecPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("exec_plan_fragment_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceExecPlanFragmentArgs(%+v)", *p) + +} + +func (p *BackendServiceExecPlanFragmentArgs) DeepEqual(ano *BackendServiceExecPlanFragmentArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Params) { + return false + } + return true +} + +func (p *BackendServiceExecPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TExecPlanFragmentParams) bool { + + if !p.Params.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceExecPlanFragmentResult struct { + Success *palointernalservice.TExecPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExecPlanFragmentResult_" json:"success,omitempty"` +} + +func NewBackendServiceExecPlanFragmentResult() *BackendServiceExecPlanFragmentResult { + return &BackendServiceExecPlanFragmentResult{} +} + +func (p *BackendServiceExecPlanFragmentResult) InitDefault() { +} + +var BackendServiceExecPlanFragmentResult_Success_DEFAULT *palointernalservice.TExecPlanFragmentResult_ + +func (p *BackendServiceExecPlanFragmentResult) GetSuccess() (v *palointernalservice.TExecPlanFragmentResult_) { + if !p.IsSetSuccess() { + return BackendServiceExecPlanFragmentResult_Success_DEFAULT + } + return p.Success +} +func (p *BackendServiceExecPlanFragmentResult) SetSuccess(x interface{}) { + p.Success = x.(*palointernalservice.TExecPlanFragmentResult_) +} + +var fieldIDToName_BackendServiceExecPlanFragmentResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServiceExecPlanFragmentResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServiceExecPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTExecPlanFragmentResult_() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *BackendServiceExecPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("exec_plan_fragment_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceExecPlanFragmentResult(%+v)", *p) + +} + +func (p *BackendServiceExecPlanFragmentResult) DeepEqual(ano *BackendServiceExecPlanFragmentResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *BackendServiceExecPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TExecPlanFragmentResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceCancelPlanFragmentArgs struct { + Params *palointernalservice.TCancelPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TCancelPlanFragmentParams" json:"params"` +} + +func NewBackendServiceCancelPlanFragmentArgs() *BackendServiceCancelPlanFragmentArgs { + return &BackendServiceCancelPlanFragmentArgs{} +} + +func (p *BackendServiceCancelPlanFragmentArgs) InitDefault() { +} + +var BackendServiceCancelPlanFragmentArgs_Params_DEFAULT *palointernalservice.TCancelPlanFragmentParams + +func (p *BackendServiceCancelPlanFragmentArgs) GetParams() (v *palointernalservice.TCancelPlanFragmentParams) { + if !p.IsSetParams() { + return BackendServiceCancelPlanFragmentArgs_Params_DEFAULT + } + return p.Params +} +func (p *BackendServiceCancelPlanFragmentArgs) SetParams(val *palointernalservice.TCancelPlanFragmentParams) { + p.Params = val +} + +var fieldIDToName_BackendServiceCancelPlanFragmentArgs = map[int16]string{ + 1: "params", +} + +func (p *BackendServiceCancelPlanFragmentArgs) IsSetParams() bool { + return p.Params != nil +} + +func (p *BackendServiceCancelPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTCancelPlanFragmentParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Params = _field + return nil +} + +func (p *BackendServiceCancelPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("cancel_plan_fragment_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceCancelPlanFragmentArgs(%+v)", *p) + +} + +func (p *BackendServiceCancelPlanFragmentArgs) DeepEqual(ano *BackendServiceCancelPlanFragmentArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Params) { + return false + } + return true +} + +func (p *BackendServiceCancelPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TCancelPlanFragmentParams) bool { + + if !p.Params.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceCancelPlanFragmentResult struct { + Success *palointernalservice.TCancelPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TCancelPlanFragmentResult_" json:"success,omitempty"` +} + +func NewBackendServiceCancelPlanFragmentResult() *BackendServiceCancelPlanFragmentResult { + return &BackendServiceCancelPlanFragmentResult{} +} + +func (p *BackendServiceCancelPlanFragmentResult) InitDefault() { +} + +var BackendServiceCancelPlanFragmentResult_Success_DEFAULT *palointernalservice.TCancelPlanFragmentResult_ + +func (p *BackendServiceCancelPlanFragmentResult) GetSuccess() (v *palointernalservice.TCancelPlanFragmentResult_) { + if !p.IsSetSuccess() { + return BackendServiceCancelPlanFragmentResult_Success_DEFAULT + } + return p.Success +} +func (p *BackendServiceCancelPlanFragmentResult) SetSuccess(x interface{}) { + p.Success = x.(*palointernalservice.TCancelPlanFragmentResult_) +} + +var fieldIDToName_BackendServiceCancelPlanFragmentResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServiceCancelPlanFragmentResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServiceCancelPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTCancelPlanFragmentResult_() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *BackendServiceCancelPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("cancel_plan_fragment_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceCancelPlanFragmentResult(%+v)", *p) + +} + +func (p *BackendServiceCancelPlanFragmentResult) DeepEqual(ano *BackendServiceCancelPlanFragmentResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *BackendServiceCancelPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TCancelPlanFragmentResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceTransmitDataArgs struct { + Params *palointernalservice.TTransmitDataParams `thrift:"params,1" frugal:"1,default,palointernalservice.TTransmitDataParams" json:"params"` +} + +func NewBackendServiceTransmitDataArgs() *BackendServiceTransmitDataArgs { + return &BackendServiceTransmitDataArgs{} +} + +func (p *BackendServiceTransmitDataArgs) InitDefault() { +} + +var BackendServiceTransmitDataArgs_Params_DEFAULT *palointernalservice.TTransmitDataParams + +func (p *BackendServiceTransmitDataArgs) GetParams() (v *palointernalservice.TTransmitDataParams) { + if !p.IsSetParams() { + return BackendServiceTransmitDataArgs_Params_DEFAULT + } + return p.Params +} +func (p *BackendServiceTransmitDataArgs) SetParams(val *palointernalservice.TTransmitDataParams) { + p.Params = val +} + +var fieldIDToName_BackendServiceTransmitDataArgs = map[int16]string{ + 1: "params", +} + +func (p *BackendServiceTransmitDataArgs) IsSetParams() bool { + return p.Params != nil +} + +func (p *BackendServiceTransmitDataArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceTransmitDataArgs) ReadField1(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTTransmitDataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Params = _field + return nil +} + +func (p *BackendServiceTransmitDataArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("transmit_data_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceTransmitDataArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceTransmitDataArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceTransmitDataArgs(%+v)", *p) + +} + +func (p *BackendServiceTransmitDataArgs) DeepEqual(ano *BackendServiceTransmitDataArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Params) { + return false + } + return true +} + +func (p *BackendServiceTransmitDataArgs) Field1DeepEqual(src *palointernalservice.TTransmitDataParams) bool { + + if !p.Params.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceTransmitDataResult struct { + Success *palointernalservice.TTransmitDataResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TTransmitDataResult_" json:"success,omitempty"` +} + +func NewBackendServiceTransmitDataResult() *BackendServiceTransmitDataResult { + return &BackendServiceTransmitDataResult{} +} + +func (p *BackendServiceTransmitDataResult) InitDefault() { +} + +var BackendServiceTransmitDataResult_Success_DEFAULT *palointernalservice.TTransmitDataResult_ + +func (p *BackendServiceTransmitDataResult) GetSuccess() (v *palointernalservice.TTransmitDataResult_) { + if !p.IsSetSuccess() { + return BackendServiceTransmitDataResult_Success_DEFAULT + } + return p.Success +} +func (p *BackendServiceTransmitDataResult) SetSuccess(x interface{}) { + p.Success = x.(*palointernalservice.TTransmitDataResult_) +} + +var fieldIDToName_BackendServiceTransmitDataResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServiceTransmitDataResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServiceTransmitDataResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceTransmitDataResult) ReadField0(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTTransmitDataResult_() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *BackendServiceTransmitDataResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("transmit_data_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceTransmitDataResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *BackendServiceTransmitDataResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceTransmitDataResult(%+v)", *p) + +} + +func (p *BackendServiceTransmitDataResult) DeepEqual(ano *BackendServiceTransmitDataResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *BackendServiceTransmitDataResult) Field0DeepEqual(src *palointernalservice.TTransmitDataResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceSubmitTasksArgs struct { + Tasks []*agentservice.TAgentTaskRequest `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"` +} + +func NewBackendServiceSubmitTasksArgs() *BackendServiceSubmitTasksArgs { + return &BackendServiceSubmitTasksArgs{} +} + +func (p *BackendServiceSubmitTasksArgs) InitDefault() { +} + +func (p *BackendServiceSubmitTasksArgs) GetTasks() (v []*agentservice.TAgentTaskRequest) { + return p.Tasks +} +func (p *BackendServiceSubmitTasksArgs) SetTasks(val []*agentservice.TAgentTaskRequest) { + p.Tasks = val +} + +var fieldIDToName_BackendServiceSubmitTasksArgs = map[int16]string{ + 1: "tasks", +} + +func (p *BackendServiceSubmitTasksArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksArgs) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*agentservice.TAgentTaskRequest, 0, size) + values := make([]agentservice.TAgentTaskRequest, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tasks = _field + return nil +} + +func (p *BackendServiceSubmitTasksArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("submit_tasks_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil { + return err + } + for _, v := range p.Tasks { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceSubmitTasksArgs(%+v)", *p) + +} + +func (p *BackendServiceSubmitTasksArgs) DeepEqual(ano *BackendServiceSubmitTasksArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Tasks) { + return false + } + return true +} + +func (p *BackendServiceSubmitTasksArgs) Field1DeepEqual(src []*agentservice.TAgentTaskRequest) bool { + + if len(p.Tasks) != len(src) { + return false + } + for i, v := range p.Tasks { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type BackendServiceSubmitTasksResult struct { + Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` +} + +func NewBackendServiceSubmitTasksResult() *BackendServiceSubmitTasksResult { + return &BackendServiceSubmitTasksResult{} +} + +func (p *BackendServiceSubmitTasksResult) InitDefault() { +} + +var BackendServiceSubmitTasksResult_Success_DEFAULT *agentservice.TAgentResult_ + +func (p *BackendServiceSubmitTasksResult) GetSuccess() (v *agentservice.TAgentResult_) { + if !p.IsSetSuccess() { + return BackendServiceSubmitTasksResult_Success_DEFAULT + } + return p.Success +} +func (p *BackendServiceSubmitTasksResult) SetSuccess(x interface{}) { + p.Success = x.(*agentservice.TAgentResult_) +} + +var fieldIDToName_BackendServiceSubmitTasksResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServiceSubmitTasksResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServiceSubmitTasksResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksResult) ReadField0(iprot thrift.TProtocol) error { + _field := agentservice.NewTAgentResult_() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *BackendServiceSubmitTasksResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("submit_tasks_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceSubmitTasksResult(%+v)", *p) + +} + +func (p *BackendServiceSubmitTasksResult) DeepEqual(ano *BackendServiceSubmitTasksResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *BackendServiceSubmitTasksResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceMakeSnapshotArgs struct { + SnapshotRequest *agentservice.TSnapshotRequest `thrift:"snapshot_request,1" frugal:"1,default,agentservice.TSnapshotRequest" json:"snapshot_request"` +} + +func NewBackendServiceMakeSnapshotArgs() *BackendServiceMakeSnapshotArgs { + return &BackendServiceMakeSnapshotArgs{} +} + +func (p *BackendServiceMakeSnapshotArgs) InitDefault() { +} + +var BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT *agentservice.TSnapshotRequest + +func (p *BackendServiceMakeSnapshotArgs) GetSnapshotRequest() (v *agentservice.TSnapshotRequest) { + if !p.IsSetSnapshotRequest() { + return BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT + } + return p.SnapshotRequest +} +func (p *BackendServiceMakeSnapshotArgs) SetSnapshotRequest(val *agentservice.TSnapshotRequest) { + p.SnapshotRequest = val +} + +var fieldIDToName_BackendServiceMakeSnapshotArgs = map[int16]string{ + 1: "snapshot_request", +} + +func (p *BackendServiceMakeSnapshotArgs) IsSetSnapshotRequest() bool { + return p.SnapshotRequest != nil +} + +func (p *BackendServiceMakeSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceMakeSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { + _field := agentservice.NewTSnapshotRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.SnapshotRequest = _field + return nil +} + +func (p *BackendServiceMakeSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("make_snapshot_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackendServiceMakeSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("snapshot_request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.SnapshotRequest.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceMakeSnapshotArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackendServiceMakeSnapshotArgs(%+v)", *p) + +} + +func (p *BackendServiceMakeSnapshotArgs) DeepEqual(ano *BackendServiceMakeSnapshotArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SnapshotRequest) { + return false + } + return true +} + +func (p *BackendServiceMakeSnapshotArgs) Field1DeepEqual(src *agentservice.TSnapshotRequest) bool { + + if !p.SnapshotRequest.DeepEqual(src) { + return false + } + return true +} + +type BackendServiceMakeSnapshotResult struct { + Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` +} + +func NewBackendServiceMakeSnapshotResult() *BackendServiceMakeSnapshotResult { + return &BackendServiceMakeSnapshotResult{} +} + +func (p *BackendServiceMakeSnapshotResult) InitDefault() { +} + +var BackendServiceMakeSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_ + +func (p *BackendServiceMakeSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) { + if !p.IsSetSuccess() { + return BackendServiceMakeSnapshotResult_Success_DEFAULT + } + return p.Success +} +func (p *BackendServiceMakeSnapshotResult) SetSuccess(x interface{}) { + p.Success = x.(*agentservice.TAgentResult_) +} + +var fieldIDToName_BackendServiceMakeSnapshotResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServiceMakeSnapshotResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServiceMakeSnapshotResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceMakeSnapshotResult) ReadField0(iprot thrift.TProtocol) error { + _field := agentservice.NewTAgentResult_() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *BackendServiceMakeSnapshotResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("make_snapshot_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceClient) CleanTrash(ctx context.Context) (err error) { - var _args BackendServiceCleanTrashArgs - if err = p.Client_().Call(ctx, "clean_trash", &_args, nil); err != nil { - return + +func (p *BackendServiceMakeSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceClient) CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error) { - var _args BackendServiceCheckStorageFormatArgs - var _result BackendServiceCheckStorageFormatResult - if err = p.Client_().Call(ctx, "check_storage_format", &_args, &_result); err != nil { - return + +func (p *BackendServiceMakeSnapshotResult) String() string { + if p == nil { + return "" } - return _result.GetSuccess(), nil + return fmt.Sprintf("BackendServiceMakeSnapshotResult(%+v)", *p) + } -func (p *BackendServiceClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error) { - var _args BackendServiceIngestBinlogArgs - _args.IngestBinlogRequest = ingestBinlogRequest - var _result BackendServiceIngestBinlogResult - if err = p.Client_().Call(ctx, "ingest_binlog", &_args, &_result); err != nil { - return + +func (p *BackendServiceMakeSnapshotResult) DeepEqual(ano *BackendServiceMakeSnapshotResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return _result.GetSuccess(), nil + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true } -type BackendServiceProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler BackendService +func (p *BackendServiceMakeSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true } -func (p *BackendServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor +type BackendServiceReleaseSnapshotArgs struct { + SnapshotPath string `thrift:"snapshot_path,1" frugal:"1,default,string" json:"snapshot_path"` } -func (p *BackendServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok +func NewBackendServiceReleaseSnapshotArgs() *BackendServiceReleaseSnapshotArgs { + return &BackendServiceReleaseSnapshotArgs{} } -func (p *BackendServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap +func (p *BackendServiceReleaseSnapshotArgs) InitDefault() { } -func NewBackendServiceProcessor(handler BackendService) *BackendServiceProcessor { - self := &BackendServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self.AddToProcessorMap("exec_plan_fragment", &backendServiceProcessorExecPlanFragment{handler: handler}) - self.AddToProcessorMap("cancel_plan_fragment", &backendServiceProcessorCancelPlanFragment{handler: handler}) - self.AddToProcessorMap("transmit_data", &backendServiceProcessorTransmitData{handler: handler}) - self.AddToProcessorMap("submit_tasks", &backendServiceProcessorSubmitTasks{handler: handler}) - self.AddToProcessorMap("make_snapshot", &backendServiceProcessorMakeSnapshot{handler: handler}) - self.AddToProcessorMap("release_snapshot", &backendServiceProcessorReleaseSnapshot{handler: handler}) - self.AddToProcessorMap("publish_cluster_state", &backendServiceProcessorPublishClusterState{handler: handler}) - self.AddToProcessorMap("submit_export_task", &backendServiceProcessorSubmitExportTask{handler: handler}) - self.AddToProcessorMap("get_export_status", &backendServiceProcessorGetExportStatus{handler: handler}) - self.AddToProcessorMap("erase_export_task", &backendServiceProcessorEraseExportTask{handler: handler}) - self.AddToProcessorMap("get_tablet_stat", &backendServiceProcessorGetTabletStat{handler: handler}) - self.AddToProcessorMap("get_trash_used_capacity", &backendServiceProcessorGetTrashUsedCapacity{handler: handler}) - self.AddToProcessorMap("get_disk_trash_used_capacity", &backendServiceProcessorGetDiskTrashUsedCapacity{handler: handler}) - self.AddToProcessorMap("submit_routine_load_task", &backendServiceProcessorSubmitRoutineLoadTask{handler: handler}) - self.AddToProcessorMap("open_scanner", &backendServiceProcessorOpenScanner{handler: handler}) - self.AddToProcessorMap("get_next", &backendServiceProcessorGetNext{handler: handler}) - self.AddToProcessorMap("close_scanner", &backendServiceProcessorCloseScanner{handler: handler}) - self.AddToProcessorMap("get_stream_load_record", &backendServiceProcessorGetStreamLoadRecord{handler: handler}) - self.AddToProcessorMap("clean_trash", &backendServiceProcessorCleanTrash{handler: handler}) - self.AddToProcessorMap("check_storage_format", &backendServiceProcessorCheckStorageFormat{handler: handler}) - self.AddToProcessorMap("ingest_binlog", &backendServiceProcessorIngestBinlog{handler: handler}) - return self +func (p *BackendServiceReleaseSnapshotArgs) GetSnapshotPath() (v string) { + return p.SnapshotPath } -func (p *BackendServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x +func (p *BackendServiceReleaseSnapshotArgs) SetSnapshotPath(val string) { + p.SnapshotPath = val } -type backendServiceProcessorExecPlanFragment struct { - handler BackendService +var fieldIDToName_BackendServiceReleaseSnapshotArgs = map[int16]string{ + 1: "snapshot_path", } -func (p *backendServiceProcessorExecPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceExecPlanFragmentArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceExecPlanFragmentResult{} - var retval *palointernalservice.TExecPlanFragmentResult_ - if retval, err2 = p.handler.ExecPlanFragment(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing exec_plan_fragment: "+err2.Error()) - oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("exec_plan_fragment", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} +func (p *BackendServiceReleaseSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { -type backendServiceProcessorCancelPlanFragment struct { - handler BackendService -} + var fieldTypeId thrift.TType + var fieldId int16 -func (p *backendServiceProcessorCancelPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceCancelPlanFragmentArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceCancelPlanFragmentResult{} - var retval *palointernalservice.TCancelPlanFragmentResult_ - if retval, err2 = p.handler.CancelPlanFragment(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing cancel_plan_fragment: "+err2.Error()) - oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("cancel_plan_fragment", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type backendServiceProcessorTransmitData struct { - handler BackendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *backendServiceProcessorTransmitData) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceTransmitDataArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *BackendServiceReleaseSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceTransmitDataResult{} - var retval *palointernalservice.TTransmitDataResult_ - if retval, err2 = p.handler.TransmitData(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing transmit_data: "+err2.Error()) - oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 + var _field string + if v, err := iprot.ReadString(); err != nil { + return err } else { - result.Success = retval + _field = v } - if err2 = oprot.WriteMessageBegin("transmit_data", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + p.SnapshotPath = _field + return nil +} + +func (p *BackendServiceReleaseSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("release_snapshot_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type backendServiceProcessorSubmitTasks struct { - handler BackendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *backendServiceProcessorSubmitTasks) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceSubmitTasksArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceReleaseSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceSubmitTasksResult{} - var retval *agentservice.TAgentResult_ - if retval, err2 = p.handler.SubmitTasks(ctx, args.Tasks); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_tasks: "+err2.Error()) - oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := oprot.WriteString(p.SnapshotPath); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("submit_tasks", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceReleaseSnapshotArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("BackendServiceReleaseSnapshotArgs(%+v)", *p) + +} + +func (p *BackendServiceReleaseSnapshotArgs) DeepEqual(ano *BackendServiceReleaseSnapshotArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.SnapshotPath) { + return false } - if err != nil { - return + return true +} + +func (p *BackendServiceReleaseSnapshotArgs) Field1DeepEqual(src string) bool { + + if strings.Compare(p.SnapshotPath, src) != 0 { + return false } - return true, err + return true } -type backendServiceProcessorMakeSnapshot struct { - handler BackendService +type BackendServiceReleaseSnapshotResult struct { + Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` } -func (p *backendServiceProcessorMakeSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceMakeSnapshotArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewBackendServiceReleaseSnapshotResult() *BackendServiceReleaseSnapshotResult { + return &BackendServiceReleaseSnapshotResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceMakeSnapshotResult{} - var retval *agentservice.TAgentResult_ - if retval, err2 = p.handler.MakeSnapshot(ctx, args.SnapshotRequest); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing make_snapshot: "+err2.Error()) - oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("make_snapshot", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServiceReleaseSnapshotResult) InitDefault() { +} + +var BackendServiceReleaseSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_ + +func (p *BackendServiceReleaseSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) { + if !p.IsSetSuccess() { + return BackendServiceReleaseSnapshotResult_Success_DEFAULT } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return p.Success +} +func (p *BackendServiceReleaseSnapshotResult) SetSuccess(x interface{}) { + p.Success = x.(*agentservice.TAgentResult_) +} + +var fieldIDToName_BackendServiceReleaseSnapshotResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServiceReleaseSnapshotResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServiceReleaseSnapshotResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type backendServiceProcessorReleaseSnapshot struct { - handler BackendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *backendServiceProcessorReleaseSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceReleaseSnapshotArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceReleaseSnapshotResult) ReadField0(iprot thrift.TProtocol) error { + _field := agentservice.NewTAgentResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceReleaseSnapshotResult{} - var retval *agentservice.TAgentResult_ - if retval, err2 = p.handler.ReleaseSnapshot(ctx, args.SnapshotPath); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing release_snapshot: "+err2.Error()) - oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("release_snapshot", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServiceReleaseSnapshotResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("release_snapshot_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type backendServiceProcessorPublishClusterState struct { - handler BackendService +func (p *BackendServiceReleaseSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *backendServiceProcessorPublishClusterState) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServicePublishClusterStateArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceReleaseSnapshotResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("BackendServiceReleaseSnapshotResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := BackendServicePublishClusterStateResult{} - var retval *agentservice.TAgentResult_ - if retval, err2 = p.handler.PublishClusterState(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing publish_cluster_state: "+err2.Error()) - oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("publish_cluster_state", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *BackendServiceReleaseSnapshotResult) DeepEqual(ano *BackendServiceReleaseSnapshotResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *BackendServiceReleaseSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type backendServiceProcessorSubmitExportTask struct { - handler BackendService +type BackendServicePublishClusterStateArgs struct { + Request *agentservice.TAgentPublishRequest `thrift:"request,1" frugal:"1,default,agentservice.TAgentPublishRequest" json:"request"` } -func (p *backendServiceProcessorSubmitExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceSubmitExportTaskArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewBackendServicePublishClusterStateArgs() *BackendServicePublishClusterStateArgs { + return &BackendServicePublishClusterStateArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceSubmitExportTaskResult{} - var retval *status.TStatus - if retval, err2 = p.handler.SubmitExportTask(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_export_task: "+err2.Error()) - oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("submit_export_task", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServicePublishClusterStateArgs) InitDefault() { +} + +var BackendServicePublishClusterStateArgs_Request_DEFAULT *agentservice.TAgentPublishRequest + +func (p *BackendServicePublishClusterStateArgs) GetRequest() (v *agentservice.TAgentPublishRequest) { + if !p.IsSetRequest() { + return BackendServicePublishClusterStateArgs_Request_DEFAULT } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return p.Request +} +func (p *BackendServicePublishClusterStateArgs) SetRequest(val *agentservice.TAgentPublishRequest) { + p.Request = val +} + +var fieldIDToName_BackendServicePublishClusterStateArgs = map[int16]string{ + 1: "request", +} + +func (p *BackendServicePublishClusterStateArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *BackendServicePublishClusterStateArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type backendServiceProcessorGetExportStatus struct { - handler BackendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *backendServiceProcessorGetExportStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceGetExportStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServicePublishClusterStateArgs) ReadField1(iprot thrift.TProtocol) error { + _field := agentservice.NewTAgentPublishRequest() + if err := _field.Read(iprot); err != nil { + return err } + p.Request = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceGetExportStatusResult{} - var retval *palointernalservice.TExportStatusResult_ - if retval, err2 = p.handler.GetExportStatus(ctx, args.TaskId); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_export_status: "+err2.Error()) - oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get_export_status", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServicePublishClusterStateArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("publish_cluster_state_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type backendServiceProcessorEraseExportTask struct { - handler BackendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *backendServiceProcessorEraseExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceEraseExportTaskArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServicePublishClusterStateArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceEraseExportTaskResult{} - var retval *status.TStatus - if retval, err2 = p.handler.EraseExportTask(ctx, args.TaskId); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing erase_export_task: "+err2.Error()) - oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Request.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("erase_export_task", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServicePublishClusterStateArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("BackendServicePublishClusterStateArgs(%+v)", *p) + +} + +func (p *BackendServicePublishClusterStateArgs) DeepEqual(ano *BackendServicePublishClusterStateArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Request) { + return false } - if err != nil { - return + return true +} + +func (p *BackendServicePublishClusterStateArgs) Field1DeepEqual(src *agentservice.TAgentPublishRequest) bool { + + if !p.Request.DeepEqual(src) { + return false } - return true, err + return true } -type backendServiceProcessorGetTabletStat struct { - handler BackendService +type BackendServicePublishClusterStateResult struct { + Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` } -func (p *backendServiceProcessorGetTabletStat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceGetTabletStatArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewBackendServicePublishClusterStateResult() *BackendServicePublishClusterStateResult { + return &BackendServicePublishClusterStateResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceGetTabletStatResult{} - var retval *TTabletStatResult_ - if retval, err2 = p.handler.GetTabletStat(ctx); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_tablet_stat: "+err2.Error()) - oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get_tablet_stat", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServicePublishClusterStateResult) InitDefault() { +} + +var BackendServicePublishClusterStateResult_Success_DEFAULT *agentservice.TAgentResult_ + +func (p *BackendServicePublishClusterStateResult) GetSuccess() (v *agentservice.TAgentResult_) { + if !p.IsSetSuccess() { + return BackendServicePublishClusterStateResult_Success_DEFAULT } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return p.Success +} +func (p *BackendServicePublishClusterStateResult) SetSuccess(x interface{}) { + p.Success = x.(*agentservice.TAgentResult_) +} + +var fieldIDToName_BackendServicePublishClusterStateResult = map[int16]string{ + 0: "success", +} + +func (p *BackendServicePublishClusterStateResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BackendServicePublishClusterStateResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type backendServiceProcessorGetTrashUsedCapacity struct { - handler BackendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *backendServiceProcessorGetTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceGetTrashUsedCapacityArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServicePublishClusterStateResult) ReadField0(iprot thrift.TProtocol) error { + _field := agentservice.NewTAgentResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceGetTrashUsedCapacityResult{} - var retval int64 - if retval, err2 = p.handler.GetTrashUsedCapacity(ctx); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_trash_used_capacity: "+err2.Error()) - oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = &retval - } - if err2 = oprot.WriteMessageBegin("get_trash_used_capacity", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServicePublishClusterStateResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("publish_cluster_state_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type backendServiceProcessorGetDiskTrashUsedCapacity struct { - handler BackendService +func (p *BackendServicePublishClusterStateResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *backendServiceProcessorGetDiskTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceGetDiskTrashUsedCapacityArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServicePublishClusterStateResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("BackendServicePublishClusterStateResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceGetDiskTrashUsedCapacityResult{} - var retval []*TDiskTrashInfo - if retval, err2 = p.handler.GetDiskTrashUsedCapacity(ctx); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_disk_trash_used_capacity: "+err2.Error()) - oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *BackendServicePublishClusterStateResult) DeepEqual(ano *BackendServicePublishClusterStateResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *BackendServicePublishClusterStateResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type backendServiceProcessorSubmitRoutineLoadTask struct { - handler BackendService +type BackendServiceSubmitExportTaskArgs struct { + Request *TExportTaskRequest `thrift:"request,1" frugal:"1,default,TExportTaskRequest" json:"request"` } -func (p *backendServiceProcessorSubmitRoutineLoadTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceSubmitRoutineLoadTaskArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewBackendServiceSubmitExportTaskArgs() *BackendServiceSubmitExportTaskArgs { + return &BackendServiceSubmitExportTaskArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceSubmitRoutineLoadTaskResult{} - var retval *status.TStatus - if retval, err2 = p.handler.SubmitRoutineLoadTask(ctx, args.Tasks); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_routine_load_task: "+err2.Error()) - oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("submit_routine_load_task", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServiceSubmitExportTaskArgs) InitDefault() { +} + +var BackendServiceSubmitExportTaskArgs_Request_DEFAULT *TExportTaskRequest + +func (p *BackendServiceSubmitExportTaskArgs) GetRequest() (v *TExportTaskRequest) { + if !p.IsSetRequest() { + return BackendServiceSubmitExportTaskArgs_Request_DEFAULT } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return p.Request +} +func (p *BackendServiceSubmitExportTaskArgs) SetRequest(val *TExportTaskRequest) { + p.Request = val +} + +var fieldIDToName_BackendServiceSubmitExportTaskArgs = map[int16]string{ + 1: "request", +} + +func (p *BackendServiceSubmitExportTaskArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *BackendServiceSubmitExportTaskArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type backendServiceProcessorOpenScanner struct { - handler BackendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *backendServiceProcessorOpenScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceOpenScannerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceSubmitExportTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTExportTaskRequest() + if err := _field.Read(iprot); err != nil { + return err } + p.Request = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceOpenScannerResult{} - var retval *dorisexternalservice.TScanOpenResult_ - if retval, err2 = p.handler.OpenScanner(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing open_scanner: "+err2.Error()) - oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("open_scanner", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServiceSubmitExportTaskArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("submit_export_task_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type backendServiceProcessorGetNext struct { - handler BackendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *backendServiceProcessorGetNext) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceGetNextArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceSubmitExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceGetNextResult{} - var retval *dorisexternalservice.TScanBatchResult_ - if retval, err2 = p.handler.GetNext(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_next: "+err2.Error()) - oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Request.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("get_next", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceSubmitExportTaskArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("BackendServiceSubmitExportTaskArgs(%+v)", *p) + +} + +func (p *BackendServiceSubmitExportTaskArgs) DeepEqual(ano *BackendServiceSubmitExportTaskArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Request) { + return false } - if err != nil { - return + return true +} + +func (p *BackendServiceSubmitExportTaskArgs) Field1DeepEqual(src *TExportTaskRequest) bool { + + if !p.Request.DeepEqual(src) { + return false } - return true, err + return true } -type backendServiceProcessorCloseScanner struct { - handler BackendService +type BackendServiceSubmitExportTaskResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func (p *backendServiceProcessorCloseScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceCloseScannerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewBackendServiceSubmitExportTaskResult() *BackendServiceSubmitExportTaskResult { + return &BackendServiceSubmitExportTaskResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceCloseScannerResult{} - var retval *dorisexternalservice.TScanCloseResult_ - if retval, err2 = p.handler.CloseScanner(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing close_scanner: "+err2.Error()) - oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("close_scanner", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *BackendServiceSubmitExportTaskResult) InitDefault() { +} + +var BackendServiceSubmitExportTaskResult_Success_DEFAULT *status.TStatus + +func (p *BackendServiceSubmitExportTaskResult) GetSuccess() (v *status.TStatus) { + if !p.IsSetSuccess() { + return BackendServiceSubmitExportTaskResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *BackendServiceSubmitExportTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -type backendServiceProcessorGetStreamLoadRecord struct { - handler BackendService +var fieldIDToName_BackendServiceSubmitExportTaskResult = map[int16]string{ + 0: "success", } -func (p *backendServiceProcessorGetStreamLoadRecord) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceGetStreamLoadRecordArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *BackendServiceSubmitExportTaskResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceGetStreamLoadRecordResult{} - var retval *TStreamLoadRecordResult_ - if retval, err2 = p.handler.GetStreamLoadRecord(ctx, args.LastStreamRecordTime); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_stream_load_record: "+err2.Error()) - oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get_stream_load_record", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *BackendServiceSubmitExportTaskResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return true, err -} -type backendServiceProcessorCleanTrash struct { - handler BackendService -} + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } -func (p *backendServiceProcessorCleanTrash) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceCleanTrashArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.CleanTrash(ctx); err2 != nil { - return true, err2 + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, nil -} -type backendServiceProcessorCheckStorageFormat struct { - handler BackendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *backendServiceProcessorCheckStorageFormat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceCheckStorageFormatArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceSubmitExportTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceCheckStorageFormatResult{} - var retval *TCheckStorageFormatResult_ - if retval, err2 = p.handler.CheckStorageFormat(ctx); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_storage_format: "+err2.Error()) - oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("check_storage_format", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *BackendServiceSubmitExportTaskResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("submit_export_task_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type backendServiceProcessorIngestBinlog struct { - handler BackendService +func (p *BackendServiceSubmitExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *backendServiceProcessorIngestBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BackendServiceIngestBinlogArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *BackendServiceSubmitExportTaskResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("BackendServiceSubmitExportTaskResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := BackendServiceIngestBinlogResult{} - var retval *TIngestBinlogResult_ - if retval, err2 = p.handler.IngestBinlog(ctx, args.IngestBinlogRequest); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ingest_binlog: "+err2.Error()) - oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("ingest_binlog", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *BackendServiceSubmitExportTaskResult) DeepEqual(ano *BackendServiceSubmitExportTaskResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *BackendServiceSubmitExportTaskResult) Field0DeepEqual(src *status.TStatus) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type BackendServiceExecPlanFragmentArgs struct { - Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TExecPlanFragmentParams" json:"params"` +type BackendServiceGetExportStatusArgs struct { + TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"` } -func NewBackendServiceExecPlanFragmentArgs() *BackendServiceExecPlanFragmentArgs { - return &BackendServiceExecPlanFragmentArgs{} +func NewBackendServiceGetExportStatusArgs() *BackendServiceGetExportStatusArgs { + return &BackendServiceGetExportStatusArgs{} } -func (p *BackendServiceExecPlanFragmentArgs) InitDefault() { - *p = BackendServiceExecPlanFragmentArgs{} +func (p *BackendServiceGetExportStatusArgs) InitDefault() { } -var BackendServiceExecPlanFragmentArgs_Params_DEFAULT *palointernalservice.TExecPlanFragmentParams +var BackendServiceGetExportStatusArgs_TaskId_DEFAULT *types.TUniqueId -func (p *BackendServiceExecPlanFragmentArgs) GetParams() (v *palointernalservice.TExecPlanFragmentParams) { - if !p.IsSetParams() { - return BackendServiceExecPlanFragmentArgs_Params_DEFAULT +func (p *BackendServiceGetExportStatusArgs) GetTaskId() (v *types.TUniqueId) { + if !p.IsSetTaskId() { + return BackendServiceGetExportStatusArgs_TaskId_DEFAULT } - return p.Params + return p.TaskId } -func (p *BackendServiceExecPlanFragmentArgs) SetParams(val *palointernalservice.TExecPlanFragmentParams) { - p.Params = val +func (p *BackendServiceGetExportStatusArgs) SetTaskId(val *types.TUniqueId) { + p.TaskId = val } -var fieldIDToName_BackendServiceExecPlanFragmentArgs = map[int16]string{ - 1: "params", +var fieldIDToName_BackendServiceGetExportStatusArgs = map[int16]string{ + 1: "task_id", } -func (p *BackendServiceExecPlanFragmentArgs) IsSetParams() bool { - return p.Params != nil +func (p *BackendServiceGetExportStatusArgs) IsSetTaskId() bool { + return p.TaskId != nil } -func (p *BackendServiceExecPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetExportStatusArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -7912,17 +19904,14 @@ func (p *BackendServiceExecPlanFragmentArgs) Read(iprot thrift.TProtocol) (err e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7937,7 +19926,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -7947,17 +19936,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = palointernalservice.NewTExecPlanFragmentParams() - if err := p.Params.Read(iprot); err != nil { +func (p *BackendServiceGetExportStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.TaskId = _field return nil } -func (p *BackendServiceExecPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetExportStatusArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("exec_plan_fragment_args"); err != nil { + if err = oprot.WriteStructBegin("get_export_status_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -7965,7 +19955,6 @@ func (p *BackendServiceExecPlanFragmentArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7984,11 +19973,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceGetExportStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.TaskId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -8001,66 +19990,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentArgs) String() string { +func (p *BackendServiceGetExportStatusArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceExecPlanFragmentArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetExportStatusArgs(%+v)", *p) + } -func (p *BackendServiceExecPlanFragmentArgs) DeepEqual(ano *BackendServiceExecPlanFragmentArgs) bool { +func (p *BackendServiceGetExportStatusArgs) DeepEqual(ano *BackendServiceGetExportStatusArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.TaskId) { return false } return true } -func (p *BackendServiceExecPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TExecPlanFragmentParams) bool { +func (p *BackendServiceGetExportStatusArgs) Field1DeepEqual(src *types.TUniqueId) bool { - if !p.Params.DeepEqual(src) { + if !p.TaskId.DeepEqual(src) { return false } return true } -type BackendServiceExecPlanFragmentResult struct { - Success *palointernalservice.TExecPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExecPlanFragmentResult_" json:"success,omitempty"` +type BackendServiceGetExportStatusResult struct { + Success *palointernalservice.TExportStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExportStatusResult_" json:"success,omitempty"` } -func NewBackendServiceExecPlanFragmentResult() *BackendServiceExecPlanFragmentResult { - return &BackendServiceExecPlanFragmentResult{} +func NewBackendServiceGetExportStatusResult() *BackendServiceGetExportStatusResult { + return &BackendServiceGetExportStatusResult{} } -func (p *BackendServiceExecPlanFragmentResult) InitDefault() { - *p = BackendServiceExecPlanFragmentResult{} +func (p *BackendServiceGetExportStatusResult) InitDefault() { } -var BackendServiceExecPlanFragmentResult_Success_DEFAULT *palointernalservice.TExecPlanFragmentResult_ +var BackendServiceGetExportStatusResult_Success_DEFAULT *palointernalservice.TExportStatusResult_ -func (p *BackendServiceExecPlanFragmentResult) GetSuccess() (v *palointernalservice.TExecPlanFragmentResult_) { +func (p *BackendServiceGetExportStatusResult) GetSuccess() (v *palointernalservice.TExportStatusResult_) { if !p.IsSetSuccess() { - return BackendServiceExecPlanFragmentResult_Success_DEFAULT + return BackendServiceGetExportStatusResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceExecPlanFragmentResult) SetSuccess(x interface{}) { - p.Success = x.(*palointernalservice.TExecPlanFragmentResult_) +func (p *BackendServiceGetExportStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*palointernalservice.TExportStatusResult_) } -var fieldIDToName_BackendServiceExecPlanFragmentResult = map[int16]string{ +var fieldIDToName_BackendServiceGetExportStatusResult = map[int16]string{ 0: "success", } -func (p *BackendServiceExecPlanFragmentResult) IsSetSuccess() bool { +func (p *BackendServiceGetExportStatusResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceExecPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetExportStatusResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -8084,17 +20073,14 @@ func (p *BackendServiceExecPlanFragmentResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8109,7 +20095,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -8119,17 +20105,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = palointernalservice.NewTExecPlanFragmentResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetExportStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTExportStatusResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceExecPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetExportStatusResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("exec_plan_fragment_result"); err != nil { + if err = oprot.WriteStructBegin("get_export_status_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -8137,7 +20124,6 @@ func (p *BackendServiceExecPlanFragmentResult) Write(oprot thrift.TProtocol) (er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8156,7 +20142,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetExportStatusResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -8175,14 +20161,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentResult) String() string { +func (p *BackendServiceGetExportStatusResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceExecPlanFragmentResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetExportStatusResult(%+v)", *p) + } -func (p *BackendServiceExecPlanFragmentResult) DeepEqual(ano *BackendServiceExecPlanFragmentResult) bool { +func (p *BackendServiceGetExportStatusResult) DeepEqual(ano *BackendServiceGetExportStatusResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -8194,7 +20181,7 @@ func (p *BackendServiceExecPlanFragmentResult) DeepEqual(ano *BackendServiceExec return true } -func (p *BackendServiceExecPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TExecPlanFragmentResult_) bool { +func (p *BackendServiceGetExportStatusResult) Field0DeepEqual(src *palointernalservice.TExportStatusResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -8202,39 +20189,38 @@ func (p *BackendServiceExecPlanFragmentResult) Field0DeepEqual(src *palointernal return true } -type BackendServiceCancelPlanFragmentArgs struct { - Params *palointernalservice.TCancelPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TCancelPlanFragmentParams" json:"params"` +type BackendServiceEraseExportTaskArgs struct { + TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"` } -func NewBackendServiceCancelPlanFragmentArgs() *BackendServiceCancelPlanFragmentArgs { - return &BackendServiceCancelPlanFragmentArgs{} +func NewBackendServiceEraseExportTaskArgs() *BackendServiceEraseExportTaskArgs { + return &BackendServiceEraseExportTaskArgs{} } -func (p *BackendServiceCancelPlanFragmentArgs) InitDefault() { - *p = BackendServiceCancelPlanFragmentArgs{} +func (p *BackendServiceEraseExportTaskArgs) InitDefault() { } -var BackendServiceCancelPlanFragmentArgs_Params_DEFAULT *palointernalservice.TCancelPlanFragmentParams +var BackendServiceEraseExportTaskArgs_TaskId_DEFAULT *types.TUniqueId -func (p *BackendServiceCancelPlanFragmentArgs) GetParams() (v *palointernalservice.TCancelPlanFragmentParams) { - if !p.IsSetParams() { - return BackendServiceCancelPlanFragmentArgs_Params_DEFAULT +func (p *BackendServiceEraseExportTaskArgs) GetTaskId() (v *types.TUniqueId) { + if !p.IsSetTaskId() { + return BackendServiceEraseExportTaskArgs_TaskId_DEFAULT } - return p.Params + return p.TaskId } -func (p *BackendServiceCancelPlanFragmentArgs) SetParams(val *palointernalservice.TCancelPlanFragmentParams) { - p.Params = val +func (p *BackendServiceEraseExportTaskArgs) SetTaskId(val *types.TUniqueId) { + p.TaskId = val } -var fieldIDToName_BackendServiceCancelPlanFragmentArgs = map[int16]string{ - 1: "params", +var fieldIDToName_BackendServiceEraseExportTaskArgs = map[int16]string{ + 1: "task_id", } -func (p *BackendServiceCancelPlanFragmentArgs) IsSetParams() bool { - return p.Params != nil +func (p *BackendServiceEraseExportTaskArgs) IsSetTaskId() bool { + return p.TaskId != nil } -func (p *BackendServiceCancelPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceEraseExportTaskArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -8258,17 +20244,14 @@ func (p *BackendServiceCancelPlanFragmentArgs) Read(iprot thrift.TProtocol) (err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8283,7 +20266,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -8293,17 +20276,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = palointernalservice.NewTCancelPlanFragmentParams() - if err := p.Params.Read(iprot); err != nil { +func (p *BackendServiceEraseExportTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.TaskId = _field return nil } -func (p *BackendServiceCancelPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceEraseExportTaskArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("cancel_plan_fragment_args"); err != nil { + if err = oprot.WriteStructBegin("erase_export_task_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -8311,7 +20295,6 @@ func (p *BackendServiceCancelPlanFragmentArgs) Write(oprot thrift.TProtocol) (er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8330,11 +20313,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceEraseExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.TaskId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -8347,66 +20330,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentArgs) String() string { +func (p *BackendServiceEraseExportTaskArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceCancelPlanFragmentArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceEraseExportTaskArgs(%+v)", *p) + } -func (p *BackendServiceCancelPlanFragmentArgs) DeepEqual(ano *BackendServiceCancelPlanFragmentArgs) bool { +func (p *BackendServiceEraseExportTaskArgs) DeepEqual(ano *BackendServiceEraseExportTaskArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.TaskId) { return false } return true } -func (p *BackendServiceCancelPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TCancelPlanFragmentParams) bool { +func (p *BackendServiceEraseExportTaskArgs) Field1DeepEqual(src *types.TUniqueId) bool { - if !p.Params.DeepEqual(src) { + if !p.TaskId.DeepEqual(src) { return false } return true } -type BackendServiceCancelPlanFragmentResult struct { - Success *palointernalservice.TCancelPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TCancelPlanFragmentResult_" json:"success,omitempty"` +type BackendServiceEraseExportTaskResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewBackendServiceCancelPlanFragmentResult() *BackendServiceCancelPlanFragmentResult { - return &BackendServiceCancelPlanFragmentResult{} +func NewBackendServiceEraseExportTaskResult() *BackendServiceEraseExportTaskResult { + return &BackendServiceEraseExportTaskResult{} } -func (p *BackendServiceCancelPlanFragmentResult) InitDefault() { - *p = BackendServiceCancelPlanFragmentResult{} +func (p *BackendServiceEraseExportTaskResult) InitDefault() { } -var BackendServiceCancelPlanFragmentResult_Success_DEFAULT *palointernalservice.TCancelPlanFragmentResult_ +var BackendServiceEraseExportTaskResult_Success_DEFAULT *status.TStatus -func (p *BackendServiceCancelPlanFragmentResult) GetSuccess() (v *palointernalservice.TCancelPlanFragmentResult_) { +func (p *BackendServiceEraseExportTaskResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return BackendServiceCancelPlanFragmentResult_Success_DEFAULT + return BackendServiceEraseExportTaskResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceCancelPlanFragmentResult) SetSuccess(x interface{}) { - p.Success = x.(*palointernalservice.TCancelPlanFragmentResult_) +func (p *BackendServiceEraseExportTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_BackendServiceCancelPlanFragmentResult = map[int16]string{ +var fieldIDToName_BackendServiceEraseExportTaskResult = map[int16]string{ 0: "success", } -func (p *BackendServiceCancelPlanFragmentResult) IsSetSuccess() bool { +func (p *BackendServiceEraseExportTaskResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceCancelPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceEraseExportTaskResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -8430,17 +20413,14 @@ func (p *BackendServiceCancelPlanFragmentResult) Read(iprot thrift.TProtocol) (e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8455,7 +20435,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -8465,17 +20445,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = palointernalservice.NewTCancelPlanFragmentResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceEraseExportTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceCancelPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceEraseExportTaskResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("cancel_plan_fragment_result"); err != nil { + if err = oprot.WriteStructBegin("erase_export_task_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -8483,7 +20464,6 @@ func (p *BackendServiceCancelPlanFragmentResult) Write(oprot thrift.TProtocol) ( fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8502,7 +20482,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceEraseExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -8521,14 +20501,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentResult) String() string { +func (p *BackendServiceEraseExportTaskResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceCancelPlanFragmentResult(%+v)", *p) + return fmt.Sprintf("BackendServiceEraseExportTaskResult(%+v)", *p) + } -func (p *BackendServiceCancelPlanFragmentResult) DeepEqual(ano *BackendServiceCancelPlanFragmentResult) bool { +func (p *BackendServiceEraseExportTaskResult) DeepEqual(ano *BackendServiceEraseExportTaskResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -8540,7 +20521,7 @@ func (p *BackendServiceCancelPlanFragmentResult) DeepEqual(ano *BackendServiceCa return true } -func (p *BackendServiceCancelPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TCancelPlanFragmentResult_) bool { +func (p *BackendServiceEraseExportTaskResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -8548,39 +20529,19 @@ func (p *BackendServiceCancelPlanFragmentResult) Field0DeepEqual(src *palointern return true } -type BackendServiceTransmitDataArgs struct { - Params *palointernalservice.TTransmitDataParams `thrift:"params,1" frugal:"1,default,palointernalservice.TTransmitDataParams" json:"params"` -} - -func NewBackendServiceTransmitDataArgs() *BackendServiceTransmitDataArgs { - return &BackendServiceTransmitDataArgs{} -} - -func (p *BackendServiceTransmitDataArgs) InitDefault() { - *p = BackendServiceTransmitDataArgs{} +type BackendServiceGetTabletStatArgs struct { } -var BackendServiceTransmitDataArgs_Params_DEFAULT *palointernalservice.TTransmitDataParams - -func (p *BackendServiceTransmitDataArgs) GetParams() (v *palointernalservice.TTransmitDataParams) { - if !p.IsSetParams() { - return BackendServiceTransmitDataArgs_Params_DEFAULT - } - return p.Params -} -func (p *BackendServiceTransmitDataArgs) SetParams(val *palointernalservice.TTransmitDataParams) { - p.Params = val +func NewBackendServiceGetTabletStatArgs() *BackendServiceGetTabletStatArgs { + return &BackendServiceGetTabletStatArgs{} } -var fieldIDToName_BackendServiceTransmitDataArgs = map[int16]string{ - 1: "params", +func (p *BackendServiceGetTabletStatArgs) InitDefault() { } -func (p *BackendServiceTransmitDataArgs) IsSetParams() bool { - return p.Params != nil -} +var fieldIDToName_BackendServiceGetTabletStatArgs = map[int16]string{} -func (p *BackendServiceTransmitDataArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTabletStatArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -8597,24 +20558,9 @@ func (p *BackendServiceTransmitDataArgs) Read(iprot thrift.TProtocol) (err error if fieldTypeId == thrift.STOP { break } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8628,10 +20574,8 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -8639,25 +20583,11 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceTransmitDataArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = palointernalservice.NewTTransmitDataParams() - if err := p.Params.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *BackendServiceTransmitDataArgs) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("transmit_data_args"); err != nil { +func (p *BackendServiceGetTabletStatArgs) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("get_tablet_stat_args"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8668,91 +20598,61 @@ func (p *BackendServiceTransmitDataArgs) Write(oprot thrift.TProtocol) (err erro return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *BackendServiceTransmitDataArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Params.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceTransmitDataArgs) String() string { +func (p *BackendServiceGetTabletStatArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceTransmitDataArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetTabletStatArgs(%+v)", *p) + } -func (p *BackendServiceTransmitDataArgs) DeepEqual(ano *BackendServiceTransmitDataArgs) bool { +func (p *BackendServiceGetTabletStatArgs) DeepEqual(ano *BackendServiceGetTabletStatArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { - return false - } - return true -} - -func (p *BackendServiceTransmitDataArgs) Field1DeepEqual(src *palointernalservice.TTransmitDataParams) bool { - - if !p.Params.DeepEqual(src) { - return false - } return true } -type BackendServiceTransmitDataResult struct { - Success *palointernalservice.TTransmitDataResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TTransmitDataResult_" json:"success,omitempty"` +type BackendServiceGetTabletStatResult struct { + Success *TTabletStatResult_ `thrift:"success,0,optional" frugal:"0,optional,TTabletStatResult_" json:"success,omitempty"` } -func NewBackendServiceTransmitDataResult() *BackendServiceTransmitDataResult { - return &BackendServiceTransmitDataResult{} +func NewBackendServiceGetTabletStatResult() *BackendServiceGetTabletStatResult { + return &BackendServiceGetTabletStatResult{} } -func (p *BackendServiceTransmitDataResult) InitDefault() { - *p = BackendServiceTransmitDataResult{} +func (p *BackendServiceGetTabletStatResult) InitDefault() { } -var BackendServiceTransmitDataResult_Success_DEFAULT *palointernalservice.TTransmitDataResult_ +var BackendServiceGetTabletStatResult_Success_DEFAULT *TTabletStatResult_ -func (p *BackendServiceTransmitDataResult) GetSuccess() (v *palointernalservice.TTransmitDataResult_) { +func (p *BackendServiceGetTabletStatResult) GetSuccess() (v *TTabletStatResult_) { if !p.IsSetSuccess() { - return BackendServiceTransmitDataResult_Success_DEFAULT + return BackendServiceGetTabletStatResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceTransmitDataResult) SetSuccess(x interface{}) { - p.Success = x.(*palointernalservice.TTransmitDataResult_) +func (p *BackendServiceGetTabletStatResult) SetSuccess(x interface{}) { + p.Success = x.(*TTabletStatResult_) } -var fieldIDToName_BackendServiceTransmitDataResult = map[int16]string{ +var fieldIDToName_BackendServiceGetTabletStatResult = map[int16]string{ 0: "success", } -func (p *BackendServiceTransmitDataResult) IsSetSuccess() bool { +func (p *BackendServiceGetTabletStatResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceTransmitDataResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTabletStatResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -8776,17 +20676,14 @@ func (p *BackendServiceTransmitDataResult) Read(iprot thrift.TProtocol) (err err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8801,7 +20698,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -8811,17 +20708,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceTransmitDataResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = palointernalservice.NewTTransmitDataResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetTabletStatResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTTabletStatResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceTransmitDataResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTabletStatResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("transmit_data_result"); err != nil { + if err = oprot.WriteStructBegin("get_tablet_stat_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -8829,7 +20727,6 @@ func (p *BackendServiceTransmitDataResult) Write(oprot thrift.TProtocol) (err er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8848,7 +20745,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceTransmitDataResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTabletStatResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -8867,14 +20764,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceTransmitDataResult) String() string { +func (p *BackendServiceGetTabletStatResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceTransmitDataResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetTabletStatResult(%+v)", *p) + } -func (p *BackendServiceTransmitDataResult) DeepEqual(ano *BackendServiceTransmitDataResult) bool { +func (p *BackendServiceGetTabletStatResult) DeepEqual(ano *BackendServiceGetTabletStatResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -8886,7 +20784,7 @@ func (p *BackendServiceTransmitDataResult) DeepEqual(ano *BackendServiceTransmit return true } -func (p *BackendServiceTransmitDataResult) Field0DeepEqual(src *palointernalservice.TTransmitDataResult_) bool { +func (p *BackendServiceGetTabletStatResult) Field0DeepEqual(src *TTabletStatResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -8894,30 +20792,19 @@ func (p *BackendServiceTransmitDataResult) Field0DeepEqual(src *palointernalserv return true } -type BackendServiceSubmitTasksArgs struct { - Tasks []*agentservice.TAgentTaskRequest `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"` -} - -func NewBackendServiceSubmitTasksArgs() *BackendServiceSubmitTasksArgs { - return &BackendServiceSubmitTasksArgs{} +type BackendServiceGetTrashUsedCapacityArgs struct { } -func (p *BackendServiceSubmitTasksArgs) InitDefault() { - *p = BackendServiceSubmitTasksArgs{} +func NewBackendServiceGetTrashUsedCapacityArgs() *BackendServiceGetTrashUsedCapacityArgs { + return &BackendServiceGetTrashUsedCapacityArgs{} } -func (p *BackendServiceSubmitTasksArgs) GetTasks() (v []*agentservice.TAgentTaskRequest) { - return p.Tasks -} -func (p *BackendServiceSubmitTasksArgs) SetTasks(val []*agentservice.TAgentTaskRequest) { - p.Tasks = val +func (p *BackendServiceGetTrashUsedCapacityArgs) InitDefault() { } -var fieldIDToName_BackendServiceSubmitTasksArgs = map[int16]string{ - 1: "tasks", -} +var fieldIDToName_BackendServiceGetTrashUsedCapacityArgs = map[int16]string{} -func (p *BackendServiceSubmitTasksArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -8934,24 +20821,9 @@ func (p *BackendServiceSubmitTasksArgs) Read(iprot thrift.TProtocol) (err error) if fieldTypeId == thrift.STOP { break } - - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8965,10 +20837,8 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -8976,37 +20846,11 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitTasksArgs) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.Tasks = make([]*agentservice.TAgentTaskRequest, 0, size) - for i := 0; i < size; i++ { - _elem := agentservice.NewTAgentTaskRequest() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.Tasks = append(p.Tasks, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *BackendServiceSubmitTasksArgs) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("submit_tasks_args"); err != nil { +func (p *BackendServiceGetTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("get_trash_used_capacity_args"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9017,105 +20861,61 @@ func (p *BackendServiceSubmitTasksArgs) Write(oprot thrift.TProtocol) (err error return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceSubmitTasksArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil { - return err - } - for _, v := range p.Tasks { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *BackendServiceSubmitTasksArgs) String() string { +func (p *BackendServiceGetTrashUsedCapacityArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceSubmitTasksArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetTrashUsedCapacityArgs(%+v)", *p) + } -func (p *BackendServiceSubmitTasksArgs) DeepEqual(ano *BackendServiceSubmitTasksArgs) bool { +func (p *BackendServiceGetTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetTrashUsedCapacityArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Tasks) { - return false - } - return true -} - -func (p *BackendServiceSubmitTasksArgs) Field1DeepEqual(src []*agentservice.TAgentTaskRequest) bool { - - if len(p.Tasks) != len(src) { - return false - } - for i, v := range p.Tasks { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -type BackendServiceSubmitTasksResult struct { - Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` +type BackendServiceGetTrashUsedCapacityResult struct { + Success *int64 `thrift:"success,0,optional" frugal:"0,optional,i64" json:"success,omitempty"` } -func NewBackendServiceSubmitTasksResult() *BackendServiceSubmitTasksResult { - return &BackendServiceSubmitTasksResult{} +func NewBackendServiceGetTrashUsedCapacityResult() *BackendServiceGetTrashUsedCapacityResult { + return &BackendServiceGetTrashUsedCapacityResult{} } -func (p *BackendServiceSubmitTasksResult) InitDefault() { - *p = BackendServiceSubmitTasksResult{} +func (p *BackendServiceGetTrashUsedCapacityResult) InitDefault() { } -var BackendServiceSubmitTasksResult_Success_DEFAULT *agentservice.TAgentResult_ +var BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT int64 -func (p *BackendServiceSubmitTasksResult) GetSuccess() (v *agentservice.TAgentResult_) { +func (p *BackendServiceGetTrashUsedCapacityResult) GetSuccess() (v int64) { if !p.IsSetSuccess() { - return BackendServiceSubmitTasksResult_Success_DEFAULT + return BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT } - return p.Success + return *p.Success } -func (p *BackendServiceSubmitTasksResult) SetSuccess(x interface{}) { - p.Success = x.(*agentservice.TAgentResult_) +func (p *BackendServiceGetTrashUsedCapacityResult) SetSuccess(x interface{}) { + p.Success = x.(*int64) } -var fieldIDToName_BackendServiceSubmitTasksResult = map[int16]string{ +var fieldIDToName_BackendServiceGetTrashUsedCapacityResult = map[int16]string{ 0: "success", } -func (p *BackendServiceSubmitTasksResult) IsSetSuccess() bool { +func (p *BackendServiceGetTrashUsedCapacityResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceSubmitTasksResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9135,21 +20935,18 @@ func (p *BackendServiceSubmitTasksResult) Read(iprot thrift.TProtocol) (err erro switch fieldId { case 0: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9164,7 +20961,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -9174,17 +20971,21 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitTasksResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = agentservice.NewTAgentResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.Success = _field return nil } -func (p *BackendServiceSubmitTasksResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("submit_tasks_result"); err != nil { + if err = oprot.WriteStructBegin("get_trash_used_capacity_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -9192,7 +20993,6 @@ func (p *BackendServiceSubmitTasksResult) Write(oprot thrift.TProtocol) (err err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9211,12 +21011,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceSubmitTasksResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { - if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + if err = oprot.WriteFieldBegin("success", thrift.I64, 0); err != nil { goto WriteFieldBeginError } - if err := p.Success.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.Success); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9230,14 +21030,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceSubmitTasksResult) String() string { +func (p *BackendServiceGetTrashUsedCapacityResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceSubmitTasksResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetTrashUsedCapacityResult(%+v)", *p) + } -func (p *BackendServiceSubmitTasksResult) DeepEqual(ano *BackendServiceSubmitTasksResult) bool { +func (p *BackendServiceGetTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetTrashUsedCapacityResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -9249,47 +21050,32 @@ func (p *BackendServiceSubmitTasksResult) DeepEqual(ano *BackendServiceSubmitTas return true } -func (p *BackendServiceSubmitTasksResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { +func (p *BackendServiceGetTrashUsedCapacityResult) Field0DeepEqual(src *int64) bool { - if !p.Success.DeepEqual(src) { + if p.Success == src { + return true + } else if p.Success == nil || src == nil { + return false + } + if *p.Success != *src { return false } return true } -type BackendServiceMakeSnapshotArgs struct { - SnapshotRequest *agentservice.TSnapshotRequest `thrift:"snapshot_request,1" frugal:"1,default,agentservice.TSnapshotRequest" json:"snapshot_request"` -} - -func NewBackendServiceMakeSnapshotArgs() *BackendServiceMakeSnapshotArgs { - return &BackendServiceMakeSnapshotArgs{} -} - -func (p *BackendServiceMakeSnapshotArgs) InitDefault() { - *p = BackendServiceMakeSnapshotArgs{} +type BackendServiceGetDiskTrashUsedCapacityArgs struct { } -var BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT *agentservice.TSnapshotRequest - -func (p *BackendServiceMakeSnapshotArgs) GetSnapshotRequest() (v *agentservice.TSnapshotRequest) { - if !p.IsSetSnapshotRequest() { - return BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT - } - return p.SnapshotRequest -} -func (p *BackendServiceMakeSnapshotArgs) SetSnapshotRequest(val *agentservice.TSnapshotRequest) { - p.SnapshotRequest = val +func NewBackendServiceGetDiskTrashUsedCapacityArgs() *BackendServiceGetDiskTrashUsedCapacityArgs { + return &BackendServiceGetDiskTrashUsedCapacityArgs{} } -var fieldIDToName_BackendServiceMakeSnapshotArgs = map[int16]string{ - 1: "snapshot_request", +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) InitDefault() { } -func (p *BackendServiceMakeSnapshotArgs) IsSetSnapshotRequest() bool { - return p.SnapshotRequest != nil -} +var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityArgs = map[int16]string{} -func (p *BackendServiceMakeSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9306,24 +21092,9 @@ func (p *BackendServiceMakeSnapshotArgs) Read(iprot thrift.TProtocol) (err error if fieldTypeId == thrift.STOP { break } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9337,10 +21108,8 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -9348,25 +21117,11 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceMakeSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { - p.SnapshotRequest = agentservice.NewTSnapshotRequest() - if err := p.SnapshotRequest.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *BackendServiceMakeSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("make_snapshot_args"); err != nil { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_args"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9377,91 +21132,61 @@ func (p *BackendServiceMakeSnapshotArgs) Write(oprot thrift.TProtocol) (err erro return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceMakeSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("snapshot_request", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.SnapshotRequest.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *BackendServiceMakeSnapshotArgs) String() string { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceMakeSnapshotArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityArgs(%+v)", *p) + } -func (p *BackendServiceMakeSnapshotArgs) DeepEqual(ano *BackendServiceMakeSnapshotArgs) bool { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.SnapshotRequest) { - return false - } - return true -} - -func (p *BackendServiceMakeSnapshotArgs) Field1DeepEqual(src *agentservice.TSnapshotRequest) bool { - - if !p.SnapshotRequest.DeepEqual(src) { - return false - } return true } -type BackendServiceMakeSnapshotResult struct { - Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` +type BackendServiceGetDiskTrashUsedCapacityResult struct { + Success []*TDiskTrashInfo `thrift:"success,0,optional" frugal:"0,optional,list" json:"success,omitempty"` } -func NewBackendServiceMakeSnapshotResult() *BackendServiceMakeSnapshotResult { - return &BackendServiceMakeSnapshotResult{} +func NewBackendServiceGetDiskTrashUsedCapacityResult() *BackendServiceGetDiskTrashUsedCapacityResult { + return &BackendServiceGetDiskTrashUsedCapacityResult{} } -func (p *BackendServiceMakeSnapshotResult) InitDefault() { - *p = BackendServiceMakeSnapshotResult{} +func (p *BackendServiceGetDiskTrashUsedCapacityResult) InitDefault() { } -var BackendServiceMakeSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_ +var BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT []*TDiskTrashInfo -func (p *BackendServiceMakeSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) GetSuccess() (v []*TDiskTrashInfo) { if !p.IsSetSuccess() { - return BackendServiceMakeSnapshotResult_Success_DEFAULT + return BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceMakeSnapshotResult) SetSuccess(x interface{}) { - p.Success = x.(*agentservice.TAgentResult_) +func (p *BackendServiceGetDiskTrashUsedCapacityResult) SetSuccess(x interface{}) { + p.Success = x.([]*TDiskTrashInfo) } -var fieldIDToName_BackendServiceMakeSnapshotResult = map[int16]string{ +var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult = map[int16]string{ 0: "success", } -func (p *BackendServiceMakeSnapshotResult) IsSetSuccess() bool { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceMakeSnapshotResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9481,21 +21206,18 @@ func (p *BackendServiceMakeSnapshotResult) Read(iprot thrift.TProtocol) (err err switch fieldId { case 0: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9510,7 +21232,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -9520,17 +21242,33 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceMakeSnapshotResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = agentservice.NewTAgentResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TDiskTrashInfo, 0, size) + values := make([]TDiskTrashInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceMakeSnapshotResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("make_snapshot_result"); err != nil { + if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -9538,7 +21276,6 @@ func (p *BackendServiceMakeSnapshotResult) Write(oprot thrift.TProtocol) (err er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9557,12 +21294,20 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceMakeSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { - if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + if err = oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { goto WriteFieldBeginError } - if err := p.Success.Write(oprot); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { + return err + } + for _, v := range p.Success { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9576,14 +21321,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceMakeSnapshotResult) String() string { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceMakeSnapshotResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityResult(%+v)", *p) + } -func (p *BackendServiceMakeSnapshotResult) DeepEqual(ano *BackendServiceMakeSnapshotResult) bool { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -9595,38 +21341,43 @@ func (p *BackendServiceMakeSnapshotResult) DeepEqual(ano *BackendServiceMakeSnap return true } -func (p *BackendServiceMakeSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) Field0DeepEqual(src []*TDiskTrashInfo) bool { - if !p.Success.DeepEqual(src) { + if len(p.Success) != len(src) { return false } + for i, v := range p.Success { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } return true } -type BackendServiceReleaseSnapshotArgs struct { - SnapshotPath string `thrift:"snapshot_path,1" frugal:"1,default,string" json:"snapshot_path"` +type BackendServiceSubmitRoutineLoadTaskArgs struct { + Tasks []*TRoutineLoadTask `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"` } -func NewBackendServiceReleaseSnapshotArgs() *BackendServiceReleaseSnapshotArgs { - return &BackendServiceReleaseSnapshotArgs{} +func NewBackendServiceSubmitRoutineLoadTaskArgs() *BackendServiceSubmitRoutineLoadTaskArgs { + return &BackendServiceSubmitRoutineLoadTaskArgs{} } -func (p *BackendServiceReleaseSnapshotArgs) InitDefault() { - *p = BackendServiceReleaseSnapshotArgs{} +func (p *BackendServiceSubmitRoutineLoadTaskArgs) InitDefault() { } -func (p *BackendServiceReleaseSnapshotArgs) GetSnapshotPath() (v string) { - return p.SnapshotPath +func (p *BackendServiceSubmitRoutineLoadTaskArgs) GetTasks() (v []*TRoutineLoadTask) { + return p.Tasks } -func (p *BackendServiceReleaseSnapshotArgs) SetSnapshotPath(val string) { - p.SnapshotPath = val +func (p *BackendServiceSubmitRoutineLoadTaskArgs) SetTasks(val []*TRoutineLoadTask) { + p.Tasks = val } -var fieldIDToName_BackendServiceReleaseSnapshotArgs = map[int16]string{ - 1: "snapshot_path", +var fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs = map[int16]string{ + 1: "tasks", } -func (p *BackendServiceReleaseSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9646,21 +21397,18 @@ func (p *BackendServiceReleaseSnapshotArgs) Read(iprot thrift.TProtocol) (err er switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9675,7 +21423,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -9685,18 +21433,33 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.SnapshotPath = v } + _field := make([]*TRoutineLoadTask, 0, size) + values := make([]TRoutineLoadTask, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tasks = _field return nil } -func (p *BackendServiceReleaseSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("release_snapshot_args"); err != nil { + if err = oprot.WriteStructBegin("submit_routine_load_task_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -9704,7 +21467,6 @@ func (p *BackendServiceReleaseSnapshotArgs) Write(oprot thrift.TProtocol) (err e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9723,11 +21485,19 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.SnapshotPath); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil { + return err + } + for _, v := range p.Tasks { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9740,66 +21510,72 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotArgs) String() string { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceReleaseSnapshotArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskArgs(%+v)", *p) + } -func (p *BackendServiceReleaseSnapshotArgs) DeepEqual(ano *BackendServiceReleaseSnapshotArgs) bool { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.SnapshotPath) { + if !p.Field1DeepEqual(ano.Tasks) { return false } return true } -func (p *BackendServiceReleaseSnapshotArgs) Field1DeepEqual(src string) bool { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) Field1DeepEqual(src []*TRoutineLoadTask) bool { - if strings.Compare(p.SnapshotPath, src) != 0 { + if len(p.Tasks) != len(src) { return false } + for i, v := range p.Tasks { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } return true } -type BackendServiceReleaseSnapshotResult struct { - Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` +type BackendServiceSubmitRoutineLoadTaskResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewBackendServiceReleaseSnapshotResult() *BackendServiceReleaseSnapshotResult { - return &BackendServiceReleaseSnapshotResult{} +func NewBackendServiceSubmitRoutineLoadTaskResult() *BackendServiceSubmitRoutineLoadTaskResult { + return &BackendServiceSubmitRoutineLoadTaskResult{} } -func (p *BackendServiceReleaseSnapshotResult) InitDefault() { - *p = BackendServiceReleaseSnapshotResult{} +func (p *BackendServiceSubmitRoutineLoadTaskResult) InitDefault() { } -var BackendServiceReleaseSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_ +var BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT *status.TStatus -func (p *BackendServiceReleaseSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) { +func (p *BackendServiceSubmitRoutineLoadTaskResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return BackendServiceReleaseSnapshotResult_Success_DEFAULT + return BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceReleaseSnapshotResult) SetSuccess(x interface{}) { - p.Success = x.(*agentservice.TAgentResult_) +func (p *BackendServiceSubmitRoutineLoadTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_BackendServiceReleaseSnapshotResult = map[int16]string{ +var fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult = map[int16]string{ 0: "success", } -func (p *BackendServiceReleaseSnapshotResult) IsSetSuccess() bool { +func (p *BackendServiceSubmitRoutineLoadTaskResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceReleaseSnapshotResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceSubmitRoutineLoadTaskResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9823,17 +21599,14 @@ func (p *BackendServiceReleaseSnapshotResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9848,7 +21621,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -9858,17 +21631,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = agentservice.NewTAgentResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceSubmitRoutineLoadTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceReleaseSnapshotResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceSubmitRoutineLoadTaskResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("release_snapshot_result"); err != nil { + if err = oprot.WriteStructBegin("submit_routine_load_task_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -9876,7 +21650,6 @@ func (p *BackendServiceReleaseSnapshotResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9895,7 +21668,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceSubmitRoutineLoadTaskResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -9914,14 +21687,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotResult) String() string { +func (p *BackendServiceSubmitRoutineLoadTaskResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceReleaseSnapshotResult(%+v)", *p) + return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskResult(%+v)", *p) + } -func (p *BackendServiceReleaseSnapshotResult) DeepEqual(ano *BackendServiceReleaseSnapshotResult) bool { +func (p *BackendServiceSubmitRoutineLoadTaskResult) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -9933,7 +21707,7 @@ func (p *BackendServiceReleaseSnapshotResult) DeepEqual(ano *BackendServiceRelea return true } -func (p *BackendServiceReleaseSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { +func (p *BackendServiceSubmitRoutineLoadTaskResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -9941,39 +21715,38 @@ func (p *BackendServiceReleaseSnapshotResult) Field0DeepEqual(src *agentservice. return true } -type BackendServicePublishClusterStateArgs struct { - Request *agentservice.TAgentPublishRequest `thrift:"request,1" frugal:"1,default,agentservice.TAgentPublishRequest" json:"request"` +type BackendServiceOpenScannerArgs struct { + Params *dorisexternalservice.TScanOpenParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanOpenParams" json:"params"` } -func NewBackendServicePublishClusterStateArgs() *BackendServicePublishClusterStateArgs { - return &BackendServicePublishClusterStateArgs{} +func NewBackendServiceOpenScannerArgs() *BackendServiceOpenScannerArgs { + return &BackendServiceOpenScannerArgs{} } -func (p *BackendServicePublishClusterStateArgs) InitDefault() { - *p = BackendServicePublishClusterStateArgs{} +func (p *BackendServiceOpenScannerArgs) InitDefault() { } -var BackendServicePublishClusterStateArgs_Request_DEFAULT *agentservice.TAgentPublishRequest +var BackendServiceOpenScannerArgs_Params_DEFAULT *dorisexternalservice.TScanOpenParams -func (p *BackendServicePublishClusterStateArgs) GetRequest() (v *agentservice.TAgentPublishRequest) { - if !p.IsSetRequest() { - return BackendServicePublishClusterStateArgs_Request_DEFAULT +func (p *BackendServiceOpenScannerArgs) GetParams() (v *dorisexternalservice.TScanOpenParams) { + if !p.IsSetParams() { + return BackendServiceOpenScannerArgs_Params_DEFAULT } - return p.Request + return p.Params } -func (p *BackendServicePublishClusterStateArgs) SetRequest(val *agentservice.TAgentPublishRequest) { - p.Request = val +func (p *BackendServiceOpenScannerArgs) SetParams(val *dorisexternalservice.TScanOpenParams) { + p.Params = val } -var fieldIDToName_BackendServicePublishClusterStateArgs = map[int16]string{ - 1: "request", +var fieldIDToName_BackendServiceOpenScannerArgs = map[int16]string{ + 1: "params", } -func (p *BackendServicePublishClusterStateArgs) IsSetRequest() bool { - return p.Request != nil +func (p *BackendServiceOpenScannerArgs) IsSetParams() bool { + return p.Params != nil } -func (p *BackendServicePublishClusterStateArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9997,17 +21770,14 @@ func (p *BackendServicePublishClusterStateArgs) Read(iprot thrift.TProtocol) (er if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10022,7 +21792,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10032,17 +21802,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServicePublishClusterStateArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = agentservice.NewTAgentPublishRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *BackendServiceOpenScannerArgs) ReadField1(iprot thrift.TProtocol) error { + _field := dorisexternalservice.NewTScanOpenParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } -func (p *BackendServicePublishClusterStateArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("publish_cluster_state_args"); err != nil { + if err = oprot.WriteStructBegin("open_scanner_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10050,7 +21821,6 @@ func (p *BackendServicePublishClusterStateArgs) Write(oprot thrift.TProtocol) (e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10069,11 +21839,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServicePublishClusterStateArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceOpenScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Request.Write(oprot); err != nil { + if err := p.Params.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10086,66 +21856,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServicePublishClusterStateArgs) String() string { +func (p *BackendServiceOpenScannerArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServicePublishClusterStateArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceOpenScannerArgs(%+v)", *p) + } -func (p *BackendServicePublishClusterStateArgs) DeepEqual(ano *BackendServicePublishClusterStateArgs) bool { +func (p *BackendServiceOpenScannerArgs) DeepEqual(ano *BackendServiceOpenScannerArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Request) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *BackendServicePublishClusterStateArgs) Field1DeepEqual(src *agentservice.TAgentPublishRequest) bool { +func (p *BackendServiceOpenScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanOpenParams) bool { - if !p.Request.DeepEqual(src) { + if !p.Params.DeepEqual(src) { return false } return true } -type BackendServicePublishClusterStateResult struct { - Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"` +type BackendServiceOpenScannerResult struct { + Success *dorisexternalservice.TScanOpenResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanOpenResult_" json:"success,omitempty"` } -func NewBackendServicePublishClusterStateResult() *BackendServicePublishClusterStateResult { - return &BackendServicePublishClusterStateResult{} +func NewBackendServiceOpenScannerResult() *BackendServiceOpenScannerResult { + return &BackendServiceOpenScannerResult{} } -func (p *BackendServicePublishClusterStateResult) InitDefault() { - *p = BackendServicePublishClusterStateResult{} +func (p *BackendServiceOpenScannerResult) InitDefault() { } -var BackendServicePublishClusterStateResult_Success_DEFAULT *agentservice.TAgentResult_ +var BackendServiceOpenScannerResult_Success_DEFAULT *dorisexternalservice.TScanOpenResult_ -func (p *BackendServicePublishClusterStateResult) GetSuccess() (v *agentservice.TAgentResult_) { +func (p *BackendServiceOpenScannerResult) GetSuccess() (v *dorisexternalservice.TScanOpenResult_) { if !p.IsSetSuccess() { - return BackendServicePublishClusterStateResult_Success_DEFAULT + return BackendServiceOpenScannerResult_Success_DEFAULT } return p.Success } -func (p *BackendServicePublishClusterStateResult) SetSuccess(x interface{}) { - p.Success = x.(*agentservice.TAgentResult_) +func (p *BackendServiceOpenScannerResult) SetSuccess(x interface{}) { + p.Success = x.(*dorisexternalservice.TScanOpenResult_) } -var fieldIDToName_BackendServicePublishClusterStateResult = map[int16]string{ +var fieldIDToName_BackendServiceOpenScannerResult = map[int16]string{ 0: "success", } -func (p *BackendServicePublishClusterStateResult) IsSetSuccess() bool { +func (p *BackendServiceOpenScannerResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServicePublishClusterStateResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceOpenScannerResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10169,17 +21939,14 @@ func (p *BackendServicePublishClusterStateResult) Read(iprot thrift.TProtocol) ( if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10194,7 +21961,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10204,17 +21971,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServicePublishClusterStateResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = agentservice.NewTAgentResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceOpenScannerResult) ReadField0(iprot thrift.TProtocol) error { + _field := dorisexternalservice.NewTScanOpenResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServicePublishClusterStateResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceOpenScannerResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("publish_cluster_state_result"); err != nil { + if err = oprot.WriteStructBegin("open_scanner_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10222,7 +21990,6 @@ func (p *BackendServicePublishClusterStateResult) Write(oprot thrift.TProtocol) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10241,7 +22008,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServicePublishClusterStateResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceOpenScannerResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -10260,14 +22027,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServicePublishClusterStateResult) String() string { +func (p *BackendServiceOpenScannerResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServicePublishClusterStateResult(%+v)", *p) + return fmt.Sprintf("BackendServiceOpenScannerResult(%+v)", *p) + } -func (p *BackendServicePublishClusterStateResult) DeepEqual(ano *BackendServicePublishClusterStateResult) bool { +func (p *BackendServiceOpenScannerResult) DeepEqual(ano *BackendServiceOpenScannerResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -10279,7 +22047,7 @@ func (p *BackendServicePublishClusterStateResult) DeepEqual(ano *BackendServiceP return true } -func (p *BackendServicePublishClusterStateResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool { +func (p *BackendServiceOpenScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanOpenResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -10287,39 +22055,38 @@ func (p *BackendServicePublishClusterStateResult) Field0DeepEqual(src *agentserv return true } -type BackendServiceSubmitExportTaskArgs struct { - Request *TExportTaskRequest `thrift:"request,1" frugal:"1,default,TExportTaskRequest" json:"request"` +type BackendServiceGetNextArgs struct { + Params *dorisexternalservice.TScanNextBatchParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanNextBatchParams" json:"params"` } -func NewBackendServiceSubmitExportTaskArgs() *BackendServiceSubmitExportTaskArgs { - return &BackendServiceSubmitExportTaskArgs{} +func NewBackendServiceGetNextArgs() *BackendServiceGetNextArgs { + return &BackendServiceGetNextArgs{} } -func (p *BackendServiceSubmitExportTaskArgs) InitDefault() { - *p = BackendServiceSubmitExportTaskArgs{} +func (p *BackendServiceGetNextArgs) InitDefault() { } -var BackendServiceSubmitExportTaskArgs_Request_DEFAULT *TExportTaskRequest +var BackendServiceGetNextArgs_Params_DEFAULT *dorisexternalservice.TScanNextBatchParams -func (p *BackendServiceSubmitExportTaskArgs) GetRequest() (v *TExportTaskRequest) { - if !p.IsSetRequest() { - return BackendServiceSubmitExportTaskArgs_Request_DEFAULT +func (p *BackendServiceGetNextArgs) GetParams() (v *dorisexternalservice.TScanNextBatchParams) { + if !p.IsSetParams() { + return BackendServiceGetNextArgs_Params_DEFAULT } - return p.Request + return p.Params } -func (p *BackendServiceSubmitExportTaskArgs) SetRequest(val *TExportTaskRequest) { - p.Request = val +func (p *BackendServiceGetNextArgs) SetParams(val *dorisexternalservice.TScanNextBatchParams) { + p.Params = val } -var fieldIDToName_BackendServiceSubmitExportTaskArgs = map[int16]string{ - 1: "request", +var fieldIDToName_BackendServiceGetNextArgs = map[int16]string{ + 1: "params", } -func (p *BackendServiceSubmitExportTaskArgs) IsSetRequest() bool { - return p.Request != nil +func (p *BackendServiceGetNextArgs) IsSetParams() bool { + return p.Params != nil } -func (p *BackendServiceSubmitExportTaskArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetNextArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10343,17 +22110,14 @@ func (p *BackendServiceSubmitExportTaskArgs) Read(iprot thrift.TProtocol) (err e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10368,7 +22132,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10378,17 +22142,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTExportTaskRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *BackendServiceGetNextArgs) ReadField1(iprot thrift.TProtocol) error { + _field := dorisexternalservice.NewTScanNextBatchParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } -func (p *BackendServiceSubmitExportTaskArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetNextArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("submit_export_task_args"); err != nil { + if err = oprot.WriteStructBegin("get_next_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10396,7 +22161,6 @@ func (p *BackendServiceSubmitExportTaskArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10415,11 +22179,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceGetNextArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Request.Write(oprot); err != nil { + if err := p.Params.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10432,66 +22196,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskArgs) String() string { +func (p *BackendServiceGetNextArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceSubmitExportTaskArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetNextArgs(%+v)", *p) + } -func (p *BackendServiceSubmitExportTaskArgs) DeepEqual(ano *BackendServiceSubmitExportTaskArgs) bool { +func (p *BackendServiceGetNextArgs) DeepEqual(ano *BackendServiceGetNextArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Request) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *BackendServiceSubmitExportTaskArgs) Field1DeepEqual(src *TExportTaskRequest) bool { +func (p *BackendServiceGetNextArgs) Field1DeepEqual(src *dorisexternalservice.TScanNextBatchParams) bool { - if !p.Request.DeepEqual(src) { + if !p.Params.DeepEqual(src) { return false } return true } -type BackendServiceSubmitExportTaskResult struct { - Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` +type BackendServiceGetNextResult struct { + Success *dorisexternalservice.TScanBatchResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanBatchResult_" json:"success,omitempty"` } -func NewBackendServiceSubmitExportTaskResult() *BackendServiceSubmitExportTaskResult { - return &BackendServiceSubmitExportTaskResult{} +func NewBackendServiceGetNextResult() *BackendServiceGetNextResult { + return &BackendServiceGetNextResult{} } -func (p *BackendServiceSubmitExportTaskResult) InitDefault() { - *p = BackendServiceSubmitExportTaskResult{} +func (p *BackendServiceGetNextResult) InitDefault() { } -var BackendServiceSubmitExportTaskResult_Success_DEFAULT *status.TStatus +var BackendServiceGetNextResult_Success_DEFAULT *dorisexternalservice.TScanBatchResult_ -func (p *BackendServiceSubmitExportTaskResult) GetSuccess() (v *status.TStatus) { +func (p *BackendServiceGetNextResult) GetSuccess() (v *dorisexternalservice.TScanBatchResult_) { if !p.IsSetSuccess() { - return BackendServiceSubmitExportTaskResult_Success_DEFAULT + return BackendServiceGetNextResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceSubmitExportTaskResult) SetSuccess(x interface{}) { - p.Success = x.(*status.TStatus) +func (p *BackendServiceGetNextResult) SetSuccess(x interface{}) { + p.Success = x.(*dorisexternalservice.TScanBatchResult_) } -var fieldIDToName_BackendServiceSubmitExportTaskResult = map[int16]string{ +var fieldIDToName_BackendServiceGetNextResult = map[int16]string{ 0: "success", } -func (p *BackendServiceSubmitExportTaskResult) IsSetSuccess() bool { +func (p *BackendServiceGetNextResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceSubmitExportTaskResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetNextResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10515,17 +22279,14 @@ func (p *BackendServiceSubmitExportTaskResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10540,7 +22301,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10550,17 +22311,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = status.NewTStatus() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetNextResult) ReadField0(iprot thrift.TProtocol) error { + _field := dorisexternalservice.NewTScanBatchResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceSubmitExportTaskResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetNextResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("submit_export_task_result"); err != nil { + if err = oprot.WriteStructBegin("get_next_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10568,7 +22330,6 @@ func (p *BackendServiceSubmitExportTaskResult) Write(oprot thrift.TProtocol) (er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10587,7 +22348,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetNextResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -10606,14 +22367,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskResult) String() string { +func (p *BackendServiceGetNextResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceSubmitExportTaskResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetNextResult(%+v)", *p) + } -func (p *BackendServiceSubmitExportTaskResult) DeepEqual(ano *BackendServiceSubmitExportTaskResult) bool { +func (p *BackendServiceGetNextResult) DeepEqual(ano *BackendServiceGetNextResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -10625,7 +22387,7 @@ func (p *BackendServiceSubmitExportTaskResult) DeepEqual(ano *BackendServiceSubm return true } -func (p *BackendServiceSubmitExportTaskResult) Field0DeepEqual(src *status.TStatus) bool { +func (p *BackendServiceGetNextResult) Field0DeepEqual(src *dorisexternalservice.TScanBatchResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -10633,39 +22395,38 @@ func (p *BackendServiceSubmitExportTaskResult) Field0DeepEqual(src *status.TStat return true } -type BackendServiceGetExportStatusArgs struct { - TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"` +type BackendServiceCloseScannerArgs struct { + Params *dorisexternalservice.TScanCloseParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanCloseParams" json:"params"` } -func NewBackendServiceGetExportStatusArgs() *BackendServiceGetExportStatusArgs { - return &BackendServiceGetExportStatusArgs{} +func NewBackendServiceCloseScannerArgs() *BackendServiceCloseScannerArgs { + return &BackendServiceCloseScannerArgs{} } -func (p *BackendServiceGetExportStatusArgs) InitDefault() { - *p = BackendServiceGetExportStatusArgs{} +func (p *BackendServiceCloseScannerArgs) InitDefault() { } -var BackendServiceGetExportStatusArgs_TaskId_DEFAULT *types.TUniqueId +var BackendServiceCloseScannerArgs_Params_DEFAULT *dorisexternalservice.TScanCloseParams -func (p *BackendServiceGetExportStatusArgs) GetTaskId() (v *types.TUniqueId) { - if !p.IsSetTaskId() { - return BackendServiceGetExportStatusArgs_TaskId_DEFAULT +func (p *BackendServiceCloseScannerArgs) GetParams() (v *dorisexternalservice.TScanCloseParams) { + if !p.IsSetParams() { + return BackendServiceCloseScannerArgs_Params_DEFAULT } - return p.TaskId + return p.Params } -func (p *BackendServiceGetExportStatusArgs) SetTaskId(val *types.TUniqueId) { - p.TaskId = val +func (p *BackendServiceCloseScannerArgs) SetParams(val *dorisexternalservice.TScanCloseParams) { + p.Params = val } -var fieldIDToName_BackendServiceGetExportStatusArgs = map[int16]string{ - 1: "task_id", +var fieldIDToName_BackendServiceCloseScannerArgs = map[int16]string{ + 1: "params", } -func (p *BackendServiceGetExportStatusArgs) IsSetTaskId() bool { - return p.TaskId != nil +func (p *BackendServiceCloseScannerArgs) IsSetParams() bool { + return p.Params != nil } -func (p *BackendServiceGetExportStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10689,17 +22450,14 @@ func (p *BackendServiceGetExportStatusArgs) Read(iprot thrift.TProtocol) (err er if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10714,7 +22472,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10724,17 +22482,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetExportStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.TaskId = types.NewTUniqueId() - if err := p.TaskId.Read(iprot); err != nil { +func (p *BackendServiceCloseScannerArgs) ReadField1(iprot thrift.TProtocol) error { + _field := dorisexternalservice.NewTScanCloseParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } -func (p *BackendServiceGetExportStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_export_status_args"); err != nil { + if err = oprot.WriteStructBegin("close_scanner_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10742,7 +22501,6 @@ func (p *BackendServiceGetExportStatusArgs) Write(oprot thrift.TProtocol) (err e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10761,11 +22519,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetExportStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceCloseScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.TaskId.Write(oprot); err != nil { + if err := p.Params.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10778,66 +22536,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceGetExportStatusArgs) String() string { +func (p *BackendServiceCloseScannerArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetExportStatusArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceCloseScannerArgs(%+v)", *p) + } -func (p *BackendServiceGetExportStatusArgs) DeepEqual(ano *BackendServiceGetExportStatusArgs) bool { +func (p *BackendServiceCloseScannerArgs) DeepEqual(ano *BackendServiceCloseScannerArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TaskId) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *BackendServiceGetExportStatusArgs) Field1DeepEqual(src *types.TUniqueId) bool { +func (p *BackendServiceCloseScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanCloseParams) bool { - if !p.TaskId.DeepEqual(src) { + if !p.Params.DeepEqual(src) { return false } return true } -type BackendServiceGetExportStatusResult struct { - Success *palointernalservice.TExportStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExportStatusResult_" json:"success,omitempty"` +type BackendServiceCloseScannerResult struct { + Success *dorisexternalservice.TScanCloseResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanCloseResult_" json:"success,omitempty"` } -func NewBackendServiceGetExportStatusResult() *BackendServiceGetExportStatusResult { - return &BackendServiceGetExportStatusResult{} +func NewBackendServiceCloseScannerResult() *BackendServiceCloseScannerResult { + return &BackendServiceCloseScannerResult{} } -func (p *BackendServiceGetExportStatusResult) InitDefault() { - *p = BackendServiceGetExportStatusResult{} +func (p *BackendServiceCloseScannerResult) InitDefault() { } -var BackendServiceGetExportStatusResult_Success_DEFAULT *palointernalservice.TExportStatusResult_ +var BackendServiceCloseScannerResult_Success_DEFAULT *dorisexternalservice.TScanCloseResult_ -func (p *BackendServiceGetExportStatusResult) GetSuccess() (v *palointernalservice.TExportStatusResult_) { +func (p *BackendServiceCloseScannerResult) GetSuccess() (v *dorisexternalservice.TScanCloseResult_) { if !p.IsSetSuccess() { - return BackendServiceGetExportStatusResult_Success_DEFAULT + return BackendServiceCloseScannerResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceGetExportStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*palointernalservice.TExportStatusResult_) +func (p *BackendServiceCloseScannerResult) SetSuccess(x interface{}) { + p.Success = x.(*dorisexternalservice.TScanCloseResult_) } -var fieldIDToName_BackendServiceGetExportStatusResult = map[int16]string{ +var fieldIDToName_BackendServiceCloseScannerResult = map[int16]string{ 0: "success", } -func (p *BackendServiceGetExportStatusResult) IsSetSuccess() bool { +func (p *BackendServiceCloseScannerResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceGetExportStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceCloseScannerResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10861,17 +22619,14 @@ func (p *BackendServiceGetExportStatusResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10886,7 +22641,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10896,17 +22651,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetExportStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = palointernalservice.NewTExportStatusResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceCloseScannerResult) ReadField0(iprot thrift.TProtocol) error { + _field := dorisexternalservice.NewTScanCloseResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceGetExportStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCloseScannerResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_export_status_result"); err != nil { + if err = oprot.WriteStructBegin("close_scanner_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10914,7 +22670,6 @@ func (p *BackendServiceGetExportStatusResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10933,7 +22688,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetExportStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCloseScannerResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -10952,14 +22707,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceGetExportStatusResult) String() string { +func (p *BackendServiceCloseScannerResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetExportStatusResult(%+v)", *p) + return fmt.Sprintf("BackendServiceCloseScannerResult(%+v)", *p) + } -func (p *BackendServiceGetExportStatusResult) DeepEqual(ano *BackendServiceGetExportStatusResult) bool { +func (p *BackendServiceCloseScannerResult) DeepEqual(ano *BackendServiceCloseScannerResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -10971,7 +22727,7 @@ func (p *BackendServiceGetExportStatusResult) DeepEqual(ano *BackendServiceGetEx return true } -func (p *BackendServiceGetExportStatusResult) Field0DeepEqual(src *palointernalservice.TExportStatusResult_) bool { +func (p *BackendServiceCloseScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanCloseResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -10979,39 +22735,29 @@ func (p *BackendServiceGetExportStatusResult) Field0DeepEqual(src *palointernals return true } -type BackendServiceEraseExportTaskArgs struct { - TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"` +type BackendServiceGetStreamLoadRecordArgs struct { + LastStreamRecordTime int64 `thrift:"last_stream_record_time,1" frugal:"1,default,i64" json:"last_stream_record_time"` } -func NewBackendServiceEraseExportTaskArgs() *BackendServiceEraseExportTaskArgs { - return &BackendServiceEraseExportTaskArgs{} +func NewBackendServiceGetStreamLoadRecordArgs() *BackendServiceGetStreamLoadRecordArgs { + return &BackendServiceGetStreamLoadRecordArgs{} } -func (p *BackendServiceEraseExportTaskArgs) InitDefault() { - *p = BackendServiceEraseExportTaskArgs{} +func (p *BackendServiceGetStreamLoadRecordArgs) InitDefault() { } -var BackendServiceEraseExportTaskArgs_TaskId_DEFAULT *types.TUniqueId - -func (p *BackendServiceEraseExportTaskArgs) GetTaskId() (v *types.TUniqueId) { - if !p.IsSetTaskId() { - return BackendServiceEraseExportTaskArgs_TaskId_DEFAULT - } - return p.TaskId -} -func (p *BackendServiceEraseExportTaskArgs) SetTaskId(val *types.TUniqueId) { - p.TaskId = val +func (p *BackendServiceGetStreamLoadRecordArgs) GetLastStreamRecordTime() (v int64) { + return p.LastStreamRecordTime } - -var fieldIDToName_BackendServiceEraseExportTaskArgs = map[int16]string{ - 1: "task_id", +func (p *BackendServiceGetStreamLoadRecordArgs) SetLastStreamRecordTime(val int64) { + p.LastStreamRecordTime = val } -func (p *BackendServiceEraseExportTaskArgs) IsSetTaskId() bool { - return p.TaskId != nil +var fieldIDToName_BackendServiceGetStreamLoadRecordArgs = map[int16]string{ + 1: "last_stream_record_time", } -func (p *BackendServiceEraseExportTaskArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetStreamLoadRecordArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11031,21 +22777,18 @@ func (p *BackendServiceEraseExportTaskArgs) Read(iprot thrift.TProtocol) (err er switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11060,7 +22803,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -11070,17 +22813,21 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceEraseExportTaskArgs) ReadField1(iprot thrift.TProtocol) error { - p.TaskId = types.NewTUniqueId() - if err := p.TaskId.Read(iprot); err != nil { +func (p *BackendServiceGetStreamLoadRecordArgs) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = v } + p.LastStreamRecordTime = _field return nil } -func (p *BackendServiceEraseExportTaskArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetStreamLoadRecordArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("erase_export_task_args"); err != nil { + if err = oprot.WriteStructBegin("get_stream_load_record_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -11088,7 +22835,6 @@ func (p *BackendServiceEraseExportTaskArgs) Write(oprot thrift.TProtocol) (err e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11107,11 +22853,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceEraseExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceGetStreamLoadRecordArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("last_stream_record_time", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := p.TaskId.Write(oprot); err != nil { + if err := oprot.WriteI64(p.LastStreamRecordTime); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11124,66 +22870,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceEraseExportTaskArgs) String() string { +func (p *BackendServiceGetStreamLoadRecordArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceEraseExportTaskArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetStreamLoadRecordArgs(%+v)", *p) + } -func (p *BackendServiceEraseExportTaskArgs) DeepEqual(ano *BackendServiceEraseExportTaskArgs) bool { +func (p *BackendServiceGetStreamLoadRecordArgs) DeepEqual(ano *BackendServiceGetStreamLoadRecordArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TaskId) { + if !p.Field1DeepEqual(ano.LastStreamRecordTime) { return false } return true } -func (p *BackendServiceEraseExportTaskArgs) Field1DeepEqual(src *types.TUniqueId) bool { +func (p *BackendServiceGetStreamLoadRecordArgs) Field1DeepEqual(src int64) bool { - if !p.TaskId.DeepEqual(src) { + if p.LastStreamRecordTime != src { return false } return true } -type BackendServiceEraseExportTaskResult struct { - Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` +type BackendServiceGetStreamLoadRecordResult struct { + Success *TStreamLoadRecordResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadRecordResult_" json:"success,omitempty"` } -func NewBackendServiceEraseExportTaskResult() *BackendServiceEraseExportTaskResult { - return &BackendServiceEraseExportTaskResult{} +func NewBackendServiceGetStreamLoadRecordResult() *BackendServiceGetStreamLoadRecordResult { + return &BackendServiceGetStreamLoadRecordResult{} } -func (p *BackendServiceEraseExportTaskResult) InitDefault() { - *p = BackendServiceEraseExportTaskResult{} +func (p *BackendServiceGetStreamLoadRecordResult) InitDefault() { } -var BackendServiceEraseExportTaskResult_Success_DEFAULT *status.TStatus +var BackendServiceGetStreamLoadRecordResult_Success_DEFAULT *TStreamLoadRecordResult_ -func (p *BackendServiceEraseExportTaskResult) GetSuccess() (v *status.TStatus) { +func (p *BackendServiceGetStreamLoadRecordResult) GetSuccess() (v *TStreamLoadRecordResult_) { if !p.IsSetSuccess() { - return BackendServiceEraseExportTaskResult_Success_DEFAULT + return BackendServiceGetStreamLoadRecordResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceEraseExportTaskResult) SetSuccess(x interface{}) { - p.Success = x.(*status.TStatus) +func (p *BackendServiceGetStreamLoadRecordResult) SetSuccess(x interface{}) { + p.Success = x.(*TStreamLoadRecordResult_) } -var fieldIDToName_BackendServiceEraseExportTaskResult = map[int16]string{ +var fieldIDToName_BackendServiceGetStreamLoadRecordResult = map[int16]string{ 0: "success", } -func (p *BackendServiceEraseExportTaskResult) IsSetSuccess() bool { +func (p *BackendServiceGetStreamLoadRecordResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceEraseExportTaskResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetStreamLoadRecordResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11207,17 +22953,14 @@ func (p *BackendServiceEraseExportTaskResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11232,7 +22975,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -11242,17 +22985,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceEraseExportTaskResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = status.NewTStatus() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetStreamLoadRecordResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTStreamLoadRecordResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceEraseExportTaskResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetStreamLoadRecordResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("erase_export_task_result"); err != nil { + if err = oprot.WriteStructBegin("get_stream_load_record_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -11260,7 +23004,6 @@ func (p *BackendServiceEraseExportTaskResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11279,7 +23022,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceEraseExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetStreamLoadRecordResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -11298,14 +23041,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceEraseExportTaskResult) String() string { +func (p *BackendServiceGetStreamLoadRecordResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceEraseExportTaskResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetStreamLoadRecordResult(%+v)", *p) + } -func (p *BackendServiceEraseExportTaskResult) DeepEqual(ano *BackendServiceEraseExportTaskResult) bool { +func (p *BackendServiceGetStreamLoadRecordResult) DeepEqual(ano *BackendServiceGetStreamLoadRecordResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -11317,7 +23061,7 @@ func (p *BackendServiceEraseExportTaskResult) DeepEqual(ano *BackendServiceErase return true } -func (p *BackendServiceEraseExportTaskResult) Field0DeepEqual(src *status.TStatus) bool { +func (p *BackendServiceGetStreamLoadRecordResult) Field0DeepEqual(src *TStreamLoadRecordResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -11325,20 +23069,19 @@ func (p *BackendServiceEraseExportTaskResult) Field0DeepEqual(src *status.TStatu return true } -type BackendServiceGetTabletStatArgs struct { +type BackendServiceCheckStorageFormatArgs struct { } -func NewBackendServiceGetTabletStatArgs() *BackendServiceGetTabletStatArgs { - return &BackendServiceGetTabletStatArgs{} +func NewBackendServiceCheckStorageFormatArgs() *BackendServiceCheckStorageFormatArgs { + return &BackendServiceCheckStorageFormatArgs{} } -func (p *BackendServiceGetTabletStatArgs) InitDefault() { - *p = BackendServiceGetTabletStatArgs{} +func (p *BackendServiceCheckStorageFormatArgs) InitDefault() { } -var fieldIDToName_BackendServiceGetTabletStatArgs = map[int16]string{} +var fieldIDToName_BackendServiceCheckStorageFormatArgs = map[int16]string{} -func (p *BackendServiceGetTabletStatArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckStorageFormatArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11358,7 +23101,6 @@ func (p *BackendServiceGetTabletStatArgs) Read(iprot thrift.TProtocol) (err erro if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11381,12 +23123,11 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetTabletStatArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("get_tablet_stat_args"); err != nil { +func (p *BackendServiceCheckStorageFormatArgs) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("check_storage_format_args"); err != nil { goto WriteStructBeginError } if p != nil { - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11403,14 +23144,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetTabletStatArgs) String() string { +func (p *BackendServiceCheckStorageFormatArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetTabletStatArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceCheckStorageFormatArgs(%+v)", *p) + } -func (p *BackendServiceGetTabletStatArgs) DeepEqual(ano *BackendServiceGetTabletStatArgs) bool { +func (p *BackendServiceCheckStorageFormatArgs) DeepEqual(ano *BackendServiceCheckStorageFormatArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -11419,39 +23161,38 @@ func (p *BackendServiceGetTabletStatArgs) DeepEqual(ano *BackendServiceGetTablet return true } -type BackendServiceGetTabletStatResult struct { - Success *TTabletStatResult_ `thrift:"success,0,optional" frugal:"0,optional,TTabletStatResult_" json:"success,omitempty"` +type BackendServiceCheckStorageFormatResult struct { + Success *TCheckStorageFormatResult_ `thrift:"success,0,optional" frugal:"0,optional,TCheckStorageFormatResult_" json:"success,omitempty"` } -func NewBackendServiceGetTabletStatResult() *BackendServiceGetTabletStatResult { - return &BackendServiceGetTabletStatResult{} +func NewBackendServiceCheckStorageFormatResult() *BackendServiceCheckStorageFormatResult { + return &BackendServiceCheckStorageFormatResult{} } -func (p *BackendServiceGetTabletStatResult) InitDefault() { - *p = BackendServiceGetTabletStatResult{} +func (p *BackendServiceCheckStorageFormatResult) InitDefault() { } -var BackendServiceGetTabletStatResult_Success_DEFAULT *TTabletStatResult_ +var BackendServiceCheckStorageFormatResult_Success_DEFAULT *TCheckStorageFormatResult_ -func (p *BackendServiceGetTabletStatResult) GetSuccess() (v *TTabletStatResult_) { +func (p *BackendServiceCheckStorageFormatResult) GetSuccess() (v *TCheckStorageFormatResult_) { if !p.IsSetSuccess() { - return BackendServiceGetTabletStatResult_Success_DEFAULT + return BackendServiceCheckStorageFormatResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceGetTabletStatResult) SetSuccess(x interface{}) { - p.Success = x.(*TTabletStatResult_) +func (p *BackendServiceCheckStorageFormatResult) SetSuccess(x interface{}) { + p.Success = x.(*TCheckStorageFormatResult_) } -var fieldIDToName_BackendServiceGetTabletStatResult = map[int16]string{ +var fieldIDToName_BackendServiceCheckStorageFormatResult = map[int16]string{ 0: "success", } -func (p *BackendServiceGetTabletStatResult) IsSetSuccess() bool { +func (p *BackendServiceCheckStorageFormatResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceGetTabletStatResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckStorageFormatResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11475,17 +23216,14 @@ func (p *BackendServiceGetTabletStatResult) Read(iprot thrift.TProtocol) (err er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11500,7 +23238,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -11510,17 +23248,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetTabletStatResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTTabletStatResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceCheckStorageFormatResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTCheckStorageFormatResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceGetTabletStatResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckStorageFormatResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_tablet_stat_result"); err != nil { + if err = oprot.WriteStructBegin("check_storage_format_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -11528,7 +23267,6 @@ func (p *BackendServiceGetTabletStatResult) Write(oprot thrift.TProtocol) (err e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11547,7 +23285,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetTabletStatResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckStorageFormatResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -11566,14 +23304,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceGetTabletStatResult) String() string { +func (p *BackendServiceCheckStorageFormatResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetTabletStatResult(%+v)", *p) + return fmt.Sprintf("BackendServiceCheckStorageFormatResult(%+v)", *p) + } -func (p *BackendServiceGetTabletStatResult) DeepEqual(ano *BackendServiceGetTabletStatResult) bool { +func (p *BackendServiceCheckStorageFormatResult) DeepEqual(ano *BackendServiceCheckStorageFormatResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -11585,7 +23324,7 @@ func (p *BackendServiceGetTabletStatResult) DeepEqual(ano *BackendServiceGetTabl return true } -func (p *BackendServiceGetTabletStatResult) Field0DeepEqual(src *TTabletStatResult_) bool { +func (p *BackendServiceCheckStorageFormatResult) Field0DeepEqual(src *TCheckStorageFormatResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -11593,20 +23332,38 @@ func (p *BackendServiceGetTabletStatResult) Field0DeepEqual(src *TTabletStatResu return true } -type BackendServiceGetTrashUsedCapacityArgs struct { +type BackendServiceWarmUpCacheAsyncArgs struct { + Request *TWarmUpCacheAsyncRequest `thrift:"request,1" frugal:"1,default,TWarmUpCacheAsyncRequest" json:"request"` } -func NewBackendServiceGetTrashUsedCapacityArgs() *BackendServiceGetTrashUsedCapacityArgs { - return &BackendServiceGetTrashUsedCapacityArgs{} +func NewBackendServiceWarmUpCacheAsyncArgs() *BackendServiceWarmUpCacheAsyncArgs { + return &BackendServiceWarmUpCacheAsyncArgs{} } -func (p *BackendServiceGetTrashUsedCapacityArgs) InitDefault() { - *p = BackendServiceGetTrashUsedCapacityArgs{} +func (p *BackendServiceWarmUpCacheAsyncArgs) InitDefault() { } -var fieldIDToName_BackendServiceGetTrashUsedCapacityArgs = map[int16]string{} +var BackendServiceWarmUpCacheAsyncArgs_Request_DEFAULT *TWarmUpCacheAsyncRequest -func (p *BackendServiceGetTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpCacheAsyncArgs) GetRequest() (v *TWarmUpCacheAsyncRequest) { + if !p.IsSetRequest() { + return BackendServiceWarmUpCacheAsyncArgs_Request_DEFAULT + } + return p.Request +} +func (p *BackendServiceWarmUpCacheAsyncArgs) SetRequest(val *TWarmUpCacheAsyncRequest) { + p.Request = val +} + +var fieldIDToName_BackendServiceWarmUpCacheAsyncArgs = map[int16]string{ + 1: "request", +} + +func (p *BackendServiceWarmUpCacheAsyncArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *BackendServiceWarmUpCacheAsyncArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11623,10 +23380,21 @@ func (p *BackendServiceGetTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (e if fieldTypeId == thrift.STOP { break } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11640,8 +23408,10 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -11649,12 +23419,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("get_trash_used_capacity_args"); err != nil { +func (p *BackendServiceWarmUpCacheAsyncArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTWarmUpCacheAsyncRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Request = _field + return nil +} + +func (p *BackendServiceWarmUpCacheAsyncArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("warm_up_cache_async_args"); err != nil { goto WriteStructBeginError } if p != nil { - + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11665,61 +23448,91 @@ func (p *BackendServiceGetTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) ( return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetTrashUsedCapacityArgs) String() string { +func (p *BackendServiceWarmUpCacheAsyncArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Request.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceWarmUpCacheAsyncArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetTrashUsedCapacityArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceWarmUpCacheAsyncArgs(%+v)", *p) + } -func (p *BackendServiceGetTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetTrashUsedCapacityArgs) bool { +func (p *BackendServiceWarmUpCacheAsyncArgs) DeepEqual(ano *BackendServiceWarmUpCacheAsyncArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } + if !p.Field1DeepEqual(ano.Request) { + return false + } return true } -type BackendServiceGetTrashUsedCapacityResult struct { - Success *int64 `thrift:"success,0,optional" frugal:"0,optional,i64" json:"success,omitempty"` +func (p *BackendServiceWarmUpCacheAsyncArgs) Field1DeepEqual(src *TWarmUpCacheAsyncRequest) bool { + + if !p.Request.DeepEqual(src) { + return false + } + return true } -func NewBackendServiceGetTrashUsedCapacityResult() *BackendServiceGetTrashUsedCapacityResult { - return &BackendServiceGetTrashUsedCapacityResult{} +type BackendServiceWarmUpCacheAsyncResult struct { + Success *TWarmUpCacheAsyncResponse `thrift:"success,0,optional" frugal:"0,optional,TWarmUpCacheAsyncResponse" json:"success,omitempty"` } -func (p *BackendServiceGetTrashUsedCapacityResult) InitDefault() { - *p = BackendServiceGetTrashUsedCapacityResult{} +func NewBackendServiceWarmUpCacheAsyncResult() *BackendServiceWarmUpCacheAsyncResult { + return &BackendServiceWarmUpCacheAsyncResult{} } -var BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT int64 +func (p *BackendServiceWarmUpCacheAsyncResult) InitDefault() { +} -func (p *BackendServiceGetTrashUsedCapacityResult) GetSuccess() (v int64) { +var BackendServiceWarmUpCacheAsyncResult_Success_DEFAULT *TWarmUpCacheAsyncResponse + +func (p *BackendServiceWarmUpCacheAsyncResult) GetSuccess() (v *TWarmUpCacheAsyncResponse) { if !p.IsSetSuccess() { - return BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT + return BackendServiceWarmUpCacheAsyncResult_Success_DEFAULT } - return *p.Success + return p.Success } -func (p *BackendServiceGetTrashUsedCapacityResult) SetSuccess(x interface{}) { - p.Success = x.(*int64) +func (p *BackendServiceWarmUpCacheAsyncResult) SetSuccess(x interface{}) { + p.Success = x.(*TWarmUpCacheAsyncResponse) } -var fieldIDToName_BackendServiceGetTrashUsedCapacityResult = map[int16]string{ +var fieldIDToName_BackendServiceWarmUpCacheAsyncResult = map[int16]string{ 0: "success", } -func (p *BackendServiceGetTrashUsedCapacityResult) IsSetSuccess() bool { +func (p *BackendServiceWarmUpCacheAsyncResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceGetTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpCacheAsyncResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11739,21 +23552,18 @@ func (p *BackendServiceGetTrashUsedCapacityResult) Read(iprot thrift.TProtocol) switch fieldId { case 0: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11768,7 +23578,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -11778,18 +23588,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *BackendServiceWarmUpCacheAsyncResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTWarmUpCacheAsyncResponse() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Success = &v } + p.Success = _field return nil } -func (p *BackendServiceGetTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpCacheAsyncResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_trash_used_capacity_result"); err != nil { + if err = oprot.WriteStructBegin("warm_up_cache_async_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -11797,7 +23607,6 @@ func (p *BackendServiceGetTrashUsedCapacityResult) Write(oprot thrift.TProtocol) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11816,12 +23625,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpCacheAsyncResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { - if err = oprot.WriteFieldBegin("success", thrift.I64, 0); err != nil { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.Success); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11835,14 +23644,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceGetTrashUsedCapacityResult) String() string { +func (p *BackendServiceWarmUpCacheAsyncResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetTrashUsedCapacityResult(%+v)", *p) + return fmt.Sprintf("BackendServiceWarmUpCacheAsyncResult(%+v)", *p) + } -func (p *BackendServiceGetTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetTrashUsedCapacityResult) bool { +func (p *BackendServiceWarmUpCacheAsyncResult) DeepEqual(ano *BackendServiceWarmUpCacheAsyncResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -11854,33 +23664,46 @@ func (p *BackendServiceGetTrashUsedCapacityResult) DeepEqual(ano *BackendService return true } -func (p *BackendServiceGetTrashUsedCapacityResult) Field0DeepEqual(src *int64) bool { +func (p *BackendServiceWarmUpCacheAsyncResult) Field0DeepEqual(src *TWarmUpCacheAsyncResponse) bool { - if p.Success == src { - return true - } else if p.Success == nil || src == nil { - return false - } - if *p.Success != *src { + if !p.Success.DeepEqual(src) { return false } return true } -type BackendServiceGetDiskTrashUsedCapacityArgs struct { +type BackendServiceCheckWarmUpCacheAsyncArgs struct { + Request *TCheckWarmUpCacheAsyncRequest `thrift:"request,1" frugal:"1,default,TCheckWarmUpCacheAsyncRequest" json:"request"` } -func NewBackendServiceGetDiskTrashUsedCapacityArgs() *BackendServiceGetDiskTrashUsedCapacityArgs { - return &BackendServiceGetDiskTrashUsedCapacityArgs{} +func NewBackendServiceCheckWarmUpCacheAsyncArgs() *BackendServiceCheckWarmUpCacheAsyncArgs { + return &BackendServiceCheckWarmUpCacheAsyncArgs{} } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) InitDefault() { - *p = BackendServiceGetDiskTrashUsedCapacityArgs{} +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) InitDefault() { } -var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityArgs = map[int16]string{} +var BackendServiceCheckWarmUpCacheAsyncArgs_Request_DEFAULT *TCheckWarmUpCacheAsyncRequest -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) GetRequest() (v *TCheckWarmUpCacheAsyncRequest) { + if !p.IsSetRequest() { + return BackendServiceCheckWarmUpCacheAsyncArgs_Request_DEFAULT + } + return p.Request +} +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) SetRequest(val *TCheckWarmUpCacheAsyncRequest) { + p.Request = val +} + +var fieldIDToName_BackendServiceCheckWarmUpCacheAsyncArgs = map[int16]string{ + 1: "request", +} + +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11897,10 +23720,21 @@ func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Read(iprot thrift.TProtocol if fieldTypeId == thrift.STOP { break } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11914,8 +23748,10 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -11923,12 +23759,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_args"); err != nil { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTCheckWarmUpCacheAsyncRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Request = _field + return nil +} + +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("check_warm_up_cache_async_args"); err != nil { goto WriteStructBeginError } if p != nil { - + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11939,61 +23788,91 @@ func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Write(oprot thrift.TProtoco return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) String() string { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Request.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceCheckWarmUpCacheAsyncArgs(%+v)", *p) + } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityArgs) bool { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) DeepEqual(ano *BackendServiceCheckWarmUpCacheAsyncArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } + if !p.Field1DeepEqual(ano.Request) { + return false + } return true } -type BackendServiceGetDiskTrashUsedCapacityResult struct { - Success []*TDiskTrashInfo `thrift:"success,0,optional" frugal:"0,optional,list" json:"success,omitempty"` +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) Field1DeepEqual(src *TCheckWarmUpCacheAsyncRequest) bool { + + if !p.Request.DeepEqual(src) { + return false + } + return true } -func NewBackendServiceGetDiskTrashUsedCapacityResult() *BackendServiceGetDiskTrashUsedCapacityResult { - return &BackendServiceGetDiskTrashUsedCapacityResult{} +type BackendServiceCheckWarmUpCacheAsyncResult struct { + Success *TCheckWarmUpCacheAsyncResponse `thrift:"success,0,optional" frugal:"0,optional,TCheckWarmUpCacheAsyncResponse" json:"success,omitempty"` } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) InitDefault() { - *p = BackendServiceGetDiskTrashUsedCapacityResult{} +func NewBackendServiceCheckWarmUpCacheAsyncResult() *BackendServiceCheckWarmUpCacheAsyncResult { + return &BackendServiceCheckWarmUpCacheAsyncResult{} } -var BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT []*TDiskTrashInfo +func (p *BackendServiceCheckWarmUpCacheAsyncResult) InitDefault() { +} -func (p *BackendServiceGetDiskTrashUsedCapacityResult) GetSuccess() (v []*TDiskTrashInfo) { +var BackendServiceCheckWarmUpCacheAsyncResult_Success_DEFAULT *TCheckWarmUpCacheAsyncResponse + +func (p *BackendServiceCheckWarmUpCacheAsyncResult) GetSuccess() (v *TCheckWarmUpCacheAsyncResponse) { if !p.IsSetSuccess() { - return BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT + return BackendServiceCheckWarmUpCacheAsyncResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) SetSuccess(x interface{}) { - p.Success = x.([]*TDiskTrashInfo) +func (p *BackendServiceCheckWarmUpCacheAsyncResult) SetSuccess(x interface{}) { + p.Success = x.(*TCheckWarmUpCacheAsyncResponse) } -var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult = map[int16]string{ +var fieldIDToName_BackendServiceCheckWarmUpCacheAsyncResult = map[int16]string{ 0: "success", } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) IsSetSuccess() bool { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12013,21 +23892,18 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) Read(iprot thrift.TProtoc switch fieldId { case 0: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12042,7 +23918,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12052,29 +23928,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.Success = make([]*TDiskTrashInfo, 0, size) - for i := 0; i < size; i++ { - _elem := NewTDiskTrashInfo() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.Success = append(p.Success, _elem) - } - if err := iprot.ReadListEnd(); err != nil { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTCheckWarmUpCacheAsyncResponse() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_result"); err != nil { + if err = oprot.WriteStructBegin("check_warm_up_cache_async_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12082,7 +23947,6 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) Write(oprot thrift.TProto fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12101,20 +23965,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { - if err = oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return err - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -12128,14 +23984,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) String() string { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityResult(%+v)", *p) + return fmt.Sprintf("BackendServiceCheckWarmUpCacheAsyncResult(%+v)", *p) + } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityResult) bool { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) DeepEqual(ano *BackendServiceCheckWarmUpCacheAsyncResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -12147,44 +24004,46 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) DeepEqual(ano *BackendSer return true } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) Field0DeepEqual(src []*TDiskTrashInfo) bool { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) Field0DeepEqual(src *TCheckWarmUpCacheAsyncResponse) bool { - if len(p.Success) != len(src) { + if !p.Success.DeepEqual(src) { return false } - for i, v := range p.Success { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -type BackendServiceSubmitRoutineLoadTaskArgs struct { - Tasks []*TRoutineLoadTask `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"` +type BackendServiceSyncLoadForTabletsArgs struct { + Request *TSyncLoadForTabletsRequest `thrift:"request,1" frugal:"1,default,TSyncLoadForTabletsRequest" json:"request"` } -func NewBackendServiceSubmitRoutineLoadTaskArgs() *BackendServiceSubmitRoutineLoadTaskArgs { - return &BackendServiceSubmitRoutineLoadTaskArgs{} +func NewBackendServiceSyncLoadForTabletsArgs() *BackendServiceSyncLoadForTabletsArgs { + return &BackendServiceSyncLoadForTabletsArgs{} } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) InitDefault() { - *p = BackendServiceSubmitRoutineLoadTaskArgs{} +func (p *BackendServiceSyncLoadForTabletsArgs) InitDefault() { } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) GetTasks() (v []*TRoutineLoadTask) { - return p.Tasks +var BackendServiceSyncLoadForTabletsArgs_Request_DEFAULT *TSyncLoadForTabletsRequest + +func (p *BackendServiceSyncLoadForTabletsArgs) GetRequest() (v *TSyncLoadForTabletsRequest) { + if !p.IsSetRequest() { + return BackendServiceSyncLoadForTabletsArgs_Request_DEFAULT + } + return p.Request } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) SetTasks(val []*TRoutineLoadTask) { - p.Tasks = val +func (p *BackendServiceSyncLoadForTabletsArgs) SetRequest(val *TSyncLoadForTabletsRequest) { + p.Request = val } -var fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs = map[int16]string{ - 1: "tasks", +var fieldIDToName_BackendServiceSyncLoadForTabletsArgs = map[int16]string{ + 1: "request", } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceSyncLoadForTabletsArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *BackendServiceSyncLoadForTabletsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12204,21 +24063,18 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) Read(iprot thrift.TProtocol) ( switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12233,7 +24089,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12243,29 +24099,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.Tasks = make([]*TRoutineLoadTask, 0, size) - for i := 0; i < size; i++ { - _elem := NewTRoutineLoadTask() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.Tasks = append(p.Tasks, _elem) - } - if err := iprot.ReadListEnd(); err != nil { +func (p *BackendServiceSyncLoadForTabletsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTSyncLoadForTabletsRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceSyncLoadForTabletsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("submit_routine_load_task_args"); err != nil { + if err = oprot.WriteStructBegin("sync_load_for_tablets_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12273,7 +24118,6 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) Write(oprot thrift.TProtocol) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12292,19 +24136,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil { +func (p *BackendServiceSyncLoadForTabletsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil { - return err - } - for _, v := range p.Tasks { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -12317,72 +24153,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) String() string { +func (p *BackendServiceSyncLoadForTabletsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceSyncLoadForTabletsArgs(%+v)", *p) + } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskArgs) bool { +func (p *BackendServiceSyncLoadForTabletsArgs) DeepEqual(ano *BackendServiceSyncLoadForTabletsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Tasks) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) Field1DeepEqual(src []*TRoutineLoadTask) bool { +func (p *BackendServiceSyncLoadForTabletsArgs) Field1DeepEqual(src *TSyncLoadForTabletsRequest) bool { - if len(p.Tasks) != len(src) { + if !p.Request.DeepEqual(src) { return false } - for i, v := range p.Tasks { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -type BackendServiceSubmitRoutineLoadTaskResult struct { - Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` +type BackendServiceSyncLoadForTabletsResult struct { + Success *TSyncLoadForTabletsResponse `thrift:"success,0,optional" frugal:"0,optional,TSyncLoadForTabletsResponse" json:"success,omitempty"` } -func NewBackendServiceSubmitRoutineLoadTaskResult() *BackendServiceSubmitRoutineLoadTaskResult { - return &BackendServiceSubmitRoutineLoadTaskResult{} +func NewBackendServiceSyncLoadForTabletsResult() *BackendServiceSyncLoadForTabletsResult { + return &BackendServiceSyncLoadForTabletsResult{} } -func (p *BackendServiceSubmitRoutineLoadTaskResult) InitDefault() { - *p = BackendServiceSubmitRoutineLoadTaskResult{} +func (p *BackendServiceSyncLoadForTabletsResult) InitDefault() { } -var BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT *status.TStatus +var BackendServiceSyncLoadForTabletsResult_Success_DEFAULT *TSyncLoadForTabletsResponse -func (p *BackendServiceSubmitRoutineLoadTaskResult) GetSuccess() (v *status.TStatus) { +func (p *BackendServiceSyncLoadForTabletsResult) GetSuccess() (v *TSyncLoadForTabletsResponse) { if !p.IsSetSuccess() { - return BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT + return BackendServiceSyncLoadForTabletsResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceSubmitRoutineLoadTaskResult) SetSuccess(x interface{}) { - p.Success = x.(*status.TStatus) +func (p *BackendServiceSyncLoadForTabletsResult) SetSuccess(x interface{}) { + p.Success = x.(*TSyncLoadForTabletsResponse) } -var fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult = map[int16]string{ +var fieldIDToName_BackendServiceSyncLoadForTabletsResult = map[int16]string{ 0: "success", } -func (p *BackendServiceSubmitRoutineLoadTaskResult) IsSetSuccess() bool { +func (p *BackendServiceSyncLoadForTabletsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceSubmitRoutineLoadTaskResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceSyncLoadForTabletsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12406,17 +24236,14 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) Read(iprot thrift.TProtocol) if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12431,7 +24258,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12441,17 +24268,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = status.NewTStatus() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceSyncLoadForTabletsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTSyncLoadForTabletsResponse() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceSubmitRoutineLoadTaskResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceSyncLoadForTabletsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("submit_routine_load_task_result"); err != nil { + if err = oprot.WriteStructBegin("sync_load_for_tablets_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12459,7 +24287,6 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) Write(oprot thrift.TProtocol fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12478,7 +24305,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceSyncLoadForTabletsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -12497,14 +24324,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskResult) String() string { +func (p *BackendServiceSyncLoadForTabletsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskResult(%+v)", *p) + return fmt.Sprintf("BackendServiceSyncLoadForTabletsResult(%+v)", *p) + } -func (p *BackendServiceSubmitRoutineLoadTaskResult) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskResult) bool { +func (p *BackendServiceSyncLoadForTabletsResult) DeepEqual(ano *BackendServiceSyncLoadForTabletsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -12516,7 +24344,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) DeepEqual(ano *BackendServic return true } -func (p *BackendServiceSubmitRoutineLoadTaskResult) Field0DeepEqual(src *status.TStatus) bool { +func (p *BackendServiceSyncLoadForTabletsResult) Field0DeepEqual(src *TSyncLoadForTabletsResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -12524,39 +24352,38 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) Field0DeepEqual(src *status. return true } -type BackendServiceOpenScannerArgs struct { - Params *dorisexternalservice.TScanOpenParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanOpenParams" json:"params"` +type BackendServiceGetTopNHotPartitionsArgs struct { + Request *TGetTopNHotPartitionsRequest `thrift:"request,1" frugal:"1,default,TGetTopNHotPartitionsRequest" json:"request"` } -func NewBackendServiceOpenScannerArgs() *BackendServiceOpenScannerArgs { - return &BackendServiceOpenScannerArgs{} +func NewBackendServiceGetTopNHotPartitionsArgs() *BackendServiceGetTopNHotPartitionsArgs { + return &BackendServiceGetTopNHotPartitionsArgs{} } -func (p *BackendServiceOpenScannerArgs) InitDefault() { - *p = BackendServiceOpenScannerArgs{} +func (p *BackendServiceGetTopNHotPartitionsArgs) InitDefault() { } -var BackendServiceOpenScannerArgs_Params_DEFAULT *dorisexternalservice.TScanOpenParams +var BackendServiceGetTopNHotPartitionsArgs_Request_DEFAULT *TGetTopNHotPartitionsRequest -func (p *BackendServiceOpenScannerArgs) GetParams() (v *dorisexternalservice.TScanOpenParams) { - if !p.IsSetParams() { - return BackendServiceOpenScannerArgs_Params_DEFAULT +func (p *BackendServiceGetTopNHotPartitionsArgs) GetRequest() (v *TGetTopNHotPartitionsRequest) { + if !p.IsSetRequest() { + return BackendServiceGetTopNHotPartitionsArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *BackendServiceOpenScannerArgs) SetParams(val *dorisexternalservice.TScanOpenParams) { - p.Params = val +func (p *BackendServiceGetTopNHotPartitionsArgs) SetRequest(val *TGetTopNHotPartitionsRequest) { + p.Request = val } -var fieldIDToName_BackendServiceOpenScannerArgs = map[int16]string{ - 1: "params", +var fieldIDToName_BackendServiceGetTopNHotPartitionsArgs = map[int16]string{ + 1: "request", } -func (p *BackendServiceOpenScannerArgs) IsSetParams() bool { - return p.Params != nil +func (p *BackendServiceGetTopNHotPartitionsArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *BackendServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTopNHotPartitionsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12580,17 +24407,14 @@ func (p *BackendServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12605,7 +24429,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12615,17 +24439,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceOpenScannerArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = dorisexternalservice.NewTScanOpenParams() - if err := p.Params.Read(iprot); err != nil { +func (p *BackendServiceGetTopNHotPartitionsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTopNHotPartitionsRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *BackendServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTopNHotPartitionsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("open_scanner_args"); err != nil { + if err = oprot.WriteStructBegin("get_top_n_hot_partitions_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12633,7 +24458,6 @@ func (p *BackendServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (err error fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12652,11 +24476,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceOpenScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceGetTopNHotPartitionsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -12669,66 +24493,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceOpenScannerArgs) String() string { +func (p *BackendServiceGetTopNHotPartitionsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceOpenScannerArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetTopNHotPartitionsArgs(%+v)", *p) + } -func (p *BackendServiceOpenScannerArgs) DeepEqual(ano *BackendServiceOpenScannerArgs) bool { +func (p *BackendServiceGetTopNHotPartitionsArgs) DeepEqual(ano *BackendServiceGetTopNHotPartitionsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *BackendServiceOpenScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanOpenParams) bool { +func (p *BackendServiceGetTopNHotPartitionsArgs) Field1DeepEqual(src *TGetTopNHotPartitionsRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type BackendServiceOpenScannerResult struct { - Success *dorisexternalservice.TScanOpenResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanOpenResult_" json:"success,omitempty"` +type BackendServiceGetTopNHotPartitionsResult struct { + Success *TGetTopNHotPartitionsResponse `thrift:"success,0,optional" frugal:"0,optional,TGetTopNHotPartitionsResponse" json:"success,omitempty"` } -func NewBackendServiceOpenScannerResult() *BackendServiceOpenScannerResult { - return &BackendServiceOpenScannerResult{} +func NewBackendServiceGetTopNHotPartitionsResult() *BackendServiceGetTopNHotPartitionsResult { + return &BackendServiceGetTopNHotPartitionsResult{} } -func (p *BackendServiceOpenScannerResult) InitDefault() { - *p = BackendServiceOpenScannerResult{} +func (p *BackendServiceGetTopNHotPartitionsResult) InitDefault() { } -var BackendServiceOpenScannerResult_Success_DEFAULT *dorisexternalservice.TScanOpenResult_ +var BackendServiceGetTopNHotPartitionsResult_Success_DEFAULT *TGetTopNHotPartitionsResponse -func (p *BackendServiceOpenScannerResult) GetSuccess() (v *dorisexternalservice.TScanOpenResult_) { +func (p *BackendServiceGetTopNHotPartitionsResult) GetSuccess() (v *TGetTopNHotPartitionsResponse) { if !p.IsSetSuccess() { - return BackendServiceOpenScannerResult_Success_DEFAULT + return BackendServiceGetTopNHotPartitionsResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceOpenScannerResult) SetSuccess(x interface{}) { - p.Success = x.(*dorisexternalservice.TScanOpenResult_) +func (p *BackendServiceGetTopNHotPartitionsResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetTopNHotPartitionsResponse) } -var fieldIDToName_BackendServiceOpenScannerResult = map[int16]string{ +var fieldIDToName_BackendServiceGetTopNHotPartitionsResult = map[int16]string{ 0: "success", } -func (p *BackendServiceOpenScannerResult) IsSetSuccess() bool { +func (p *BackendServiceGetTopNHotPartitionsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceOpenScannerResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTopNHotPartitionsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12752,17 +24576,14 @@ func (p *BackendServiceOpenScannerResult) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12777,7 +24598,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12787,17 +24608,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceOpenScannerResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = dorisexternalservice.NewTScanOpenResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetTopNHotPartitionsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetTopNHotPartitionsResponse() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceOpenScannerResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTopNHotPartitionsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("open_scanner_result"); err != nil { + if err = oprot.WriteStructBegin("get_top_n_hot_partitions_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12805,7 +24627,6 @@ func (p *BackendServiceOpenScannerResult) Write(oprot thrift.TProtocol) (err err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12824,7 +24645,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceOpenScannerResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetTopNHotPartitionsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -12843,14 +24664,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceOpenScannerResult) String() string { +func (p *BackendServiceGetTopNHotPartitionsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceOpenScannerResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetTopNHotPartitionsResult(%+v)", *p) + } -func (p *BackendServiceOpenScannerResult) DeepEqual(ano *BackendServiceOpenScannerResult) bool { +func (p *BackendServiceGetTopNHotPartitionsResult) DeepEqual(ano *BackendServiceGetTopNHotPartitionsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -12862,7 +24684,7 @@ func (p *BackendServiceOpenScannerResult) DeepEqual(ano *BackendServiceOpenScann return true } -func (p *BackendServiceOpenScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanOpenResult_) bool { +func (p *BackendServiceGetTopNHotPartitionsResult) Field0DeepEqual(src *TGetTopNHotPartitionsResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -12870,39 +24692,38 @@ func (p *BackendServiceOpenScannerResult) Field0DeepEqual(src *dorisexternalserv return true } -type BackendServiceGetNextArgs struct { - Params *dorisexternalservice.TScanNextBatchParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanNextBatchParams" json:"params"` +type BackendServiceWarmUpTabletsArgs struct { + Request *TWarmUpTabletsRequest `thrift:"request,1" frugal:"1,default,TWarmUpTabletsRequest" json:"request"` } -func NewBackendServiceGetNextArgs() *BackendServiceGetNextArgs { - return &BackendServiceGetNextArgs{} +func NewBackendServiceWarmUpTabletsArgs() *BackendServiceWarmUpTabletsArgs { + return &BackendServiceWarmUpTabletsArgs{} } -func (p *BackendServiceGetNextArgs) InitDefault() { - *p = BackendServiceGetNextArgs{} +func (p *BackendServiceWarmUpTabletsArgs) InitDefault() { } -var BackendServiceGetNextArgs_Params_DEFAULT *dorisexternalservice.TScanNextBatchParams +var BackendServiceWarmUpTabletsArgs_Request_DEFAULT *TWarmUpTabletsRequest -func (p *BackendServiceGetNextArgs) GetParams() (v *dorisexternalservice.TScanNextBatchParams) { - if !p.IsSetParams() { - return BackendServiceGetNextArgs_Params_DEFAULT +func (p *BackendServiceWarmUpTabletsArgs) GetRequest() (v *TWarmUpTabletsRequest) { + if !p.IsSetRequest() { + return BackendServiceWarmUpTabletsArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *BackendServiceGetNextArgs) SetParams(val *dorisexternalservice.TScanNextBatchParams) { - p.Params = val +func (p *BackendServiceWarmUpTabletsArgs) SetRequest(val *TWarmUpTabletsRequest) { + p.Request = val } -var fieldIDToName_BackendServiceGetNextArgs = map[int16]string{ - 1: "params", +var fieldIDToName_BackendServiceWarmUpTabletsArgs = map[int16]string{ + 1: "request", } -func (p *BackendServiceGetNextArgs) IsSetParams() bool { - return p.Params != nil +func (p *BackendServiceWarmUpTabletsArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *BackendServiceGetNextArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpTabletsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12926,17 +24747,14 @@ func (p *BackendServiceGetNextArgs) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12951,7 +24769,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12961,17 +24779,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetNextArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = dorisexternalservice.NewTScanNextBatchParams() - if err := p.Params.Read(iprot); err != nil { +func (p *BackendServiceWarmUpTabletsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTWarmUpTabletsRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *BackendServiceGetNextArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpTabletsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_next_args"); err != nil { + if err = oprot.WriteStructBegin("warm_up_tablets_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12979,7 +24798,6 @@ func (p *BackendServiceGetNextArgs) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12998,11 +24816,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetNextArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceWarmUpTabletsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13015,66 +24833,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceGetNextArgs) String() string { +func (p *BackendServiceWarmUpTabletsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetNextArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceWarmUpTabletsArgs(%+v)", *p) + } -func (p *BackendServiceGetNextArgs) DeepEqual(ano *BackendServiceGetNextArgs) bool { +func (p *BackendServiceWarmUpTabletsArgs) DeepEqual(ano *BackendServiceWarmUpTabletsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *BackendServiceGetNextArgs) Field1DeepEqual(src *dorisexternalservice.TScanNextBatchParams) bool { +func (p *BackendServiceWarmUpTabletsArgs) Field1DeepEqual(src *TWarmUpTabletsRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type BackendServiceGetNextResult struct { - Success *dorisexternalservice.TScanBatchResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanBatchResult_" json:"success,omitempty"` +type BackendServiceWarmUpTabletsResult struct { + Success *TWarmUpTabletsResponse `thrift:"success,0,optional" frugal:"0,optional,TWarmUpTabletsResponse" json:"success,omitempty"` } -func NewBackendServiceGetNextResult() *BackendServiceGetNextResult { - return &BackendServiceGetNextResult{} +func NewBackendServiceWarmUpTabletsResult() *BackendServiceWarmUpTabletsResult { + return &BackendServiceWarmUpTabletsResult{} } -func (p *BackendServiceGetNextResult) InitDefault() { - *p = BackendServiceGetNextResult{} +func (p *BackendServiceWarmUpTabletsResult) InitDefault() { } -var BackendServiceGetNextResult_Success_DEFAULT *dorisexternalservice.TScanBatchResult_ +var BackendServiceWarmUpTabletsResult_Success_DEFAULT *TWarmUpTabletsResponse -func (p *BackendServiceGetNextResult) GetSuccess() (v *dorisexternalservice.TScanBatchResult_) { +func (p *BackendServiceWarmUpTabletsResult) GetSuccess() (v *TWarmUpTabletsResponse) { if !p.IsSetSuccess() { - return BackendServiceGetNextResult_Success_DEFAULT + return BackendServiceWarmUpTabletsResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceGetNextResult) SetSuccess(x interface{}) { - p.Success = x.(*dorisexternalservice.TScanBatchResult_) +func (p *BackendServiceWarmUpTabletsResult) SetSuccess(x interface{}) { + p.Success = x.(*TWarmUpTabletsResponse) } -var fieldIDToName_BackendServiceGetNextResult = map[int16]string{ +var fieldIDToName_BackendServiceWarmUpTabletsResult = map[int16]string{ 0: "success", } -func (p *BackendServiceGetNextResult) IsSetSuccess() bool { +func (p *BackendServiceWarmUpTabletsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceGetNextResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpTabletsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -13098,17 +24916,14 @@ func (p *BackendServiceGetNextResult) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13123,7 +24938,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -13133,17 +24948,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetNextResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = dorisexternalservice.NewTScanBatchResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceWarmUpTabletsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTWarmUpTabletsResponse() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceGetNextResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpTabletsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_next_result"); err != nil { + if err = oprot.WriteStructBegin("warm_up_tablets_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13151,7 +24967,6 @@ func (p *BackendServiceGetNextResult) Write(oprot thrift.TProtocol) (err error) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13170,7 +24985,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetNextResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceWarmUpTabletsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -13189,14 +25004,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceGetNextResult) String() string { +func (p *BackendServiceWarmUpTabletsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetNextResult(%+v)", *p) + return fmt.Sprintf("BackendServiceWarmUpTabletsResult(%+v)", *p) + } -func (p *BackendServiceGetNextResult) DeepEqual(ano *BackendServiceGetNextResult) bool { +func (p *BackendServiceWarmUpTabletsResult) DeepEqual(ano *BackendServiceWarmUpTabletsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -13208,7 +25024,7 @@ func (p *BackendServiceGetNextResult) DeepEqual(ano *BackendServiceGetNextResult return true } -func (p *BackendServiceGetNextResult) Field0DeepEqual(src *dorisexternalservice.TScanBatchResult_) bool { +func (p *BackendServiceWarmUpTabletsResult) Field0DeepEqual(src *TWarmUpTabletsResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -13216,39 +25032,38 @@ func (p *BackendServiceGetNextResult) Field0DeepEqual(src *dorisexternalservice. return true } -type BackendServiceCloseScannerArgs struct { - Params *dorisexternalservice.TScanCloseParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanCloseParams" json:"params"` +type BackendServiceIngestBinlogArgs struct { + IngestBinlogRequest *TIngestBinlogRequest `thrift:"ingest_binlog_request,1" frugal:"1,default,TIngestBinlogRequest" json:"ingest_binlog_request"` } -func NewBackendServiceCloseScannerArgs() *BackendServiceCloseScannerArgs { - return &BackendServiceCloseScannerArgs{} +func NewBackendServiceIngestBinlogArgs() *BackendServiceIngestBinlogArgs { + return &BackendServiceIngestBinlogArgs{} } -func (p *BackendServiceCloseScannerArgs) InitDefault() { - *p = BackendServiceCloseScannerArgs{} +func (p *BackendServiceIngestBinlogArgs) InitDefault() { } -var BackendServiceCloseScannerArgs_Params_DEFAULT *dorisexternalservice.TScanCloseParams +var BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT *TIngestBinlogRequest -func (p *BackendServiceCloseScannerArgs) GetParams() (v *dorisexternalservice.TScanCloseParams) { - if !p.IsSetParams() { - return BackendServiceCloseScannerArgs_Params_DEFAULT +func (p *BackendServiceIngestBinlogArgs) GetIngestBinlogRequest() (v *TIngestBinlogRequest) { + if !p.IsSetIngestBinlogRequest() { + return BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT } - return p.Params + return p.IngestBinlogRequest } -func (p *BackendServiceCloseScannerArgs) SetParams(val *dorisexternalservice.TScanCloseParams) { - p.Params = val +func (p *BackendServiceIngestBinlogArgs) SetIngestBinlogRequest(val *TIngestBinlogRequest) { + p.IngestBinlogRequest = val } -var fieldIDToName_BackendServiceCloseScannerArgs = map[int16]string{ - 1: "params", +var fieldIDToName_BackendServiceIngestBinlogArgs = map[int16]string{ + 1: "ingest_binlog_request", } -func (p *BackendServiceCloseScannerArgs) IsSetParams() bool { - return p.Params != nil +func (p *BackendServiceIngestBinlogArgs) IsSetIngestBinlogRequest() bool { + return p.IngestBinlogRequest != nil } -func (p *BackendServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -13272,17 +25087,14 @@ func (p *BackendServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (err error if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13297,7 +25109,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -13307,17 +25119,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCloseScannerArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = dorisexternalservice.NewTScanCloseParams() - if err := p.Params.Read(iprot); err != nil { +func (p *BackendServiceIngestBinlogArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTIngestBinlogRequest() + if err := _field.Read(iprot); err != nil { return err } + p.IngestBinlogRequest = _field return nil } -func (p *BackendServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceIngestBinlogArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("close_scanner_args"); err != nil { + if err = oprot.WriteStructBegin("ingest_binlog_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13325,7 +25138,6 @@ func (p *BackendServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (err erro fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13344,11 +25156,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceCloseScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceIngestBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("ingest_binlog_request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.IngestBinlogRequest.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13361,66 +25173,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceCloseScannerArgs) String() string { +func (p *BackendServiceIngestBinlogArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceCloseScannerArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceIngestBinlogArgs(%+v)", *p) + } -func (p *BackendServiceCloseScannerArgs) DeepEqual(ano *BackendServiceCloseScannerArgs) bool { +func (p *BackendServiceIngestBinlogArgs) DeepEqual(ano *BackendServiceIngestBinlogArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.IngestBinlogRequest) { return false } return true } -func (p *BackendServiceCloseScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanCloseParams) bool { +func (p *BackendServiceIngestBinlogArgs) Field1DeepEqual(src *TIngestBinlogRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.IngestBinlogRequest.DeepEqual(src) { return false } return true } -type BackendServiceCloseScannerResult struct { - Success *dorisexternalservice.TScanCloseResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanCloseResult_" json:"success,omitempty"` +type BackendServiceIngestBinlogResult struct { + Success *TIngestBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TIngestBinlogResult_" json:"success,omitempty"` } -func NewBackendServiceCloseScannerResult() *BackendServiceCloseScannerResult { - return &BackendServiceCloseScannerResult{} +func NewBackendServiceIngestBinlogResult() *BackendServiceIngestBinlogResult { + return &BackendServiceIngestBinlogResult{} } -func (p *BackendServiceCloseScannerResult) InitDefault() { - *p = BackendServiceCloseScannerResult{} +func (p *BackendServiceIngestBinlogResult) InitDefault() { } -var BackendServiceCloseScannerResult_Success_DEFAULT *dorisexternalservice.TScanCloseResult_ +var BackendServiceIngestBinlogResult_Success_DEFAULT *TIngestBinlogResult_ -func (p *BackendServiceCloseScannerResult) GetSuccess() (v *dorisexternalservice.TScanCloseResult_) { +func (p *BackendServiceIngestBinlogResult) GetSuccess() (v *TIngestBinlogResult_) { if !p.IsSetSuccess() { - return BackendServiceCloseScannerResult_Success_DEFAULT + return BackendServiceIngestBinlogResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceCloseScannerResult) SetSuccess(x interface{}) { - p.Success = x.(*dorisexternalservice.TScanCloseResult_) +func (p *BackendServiceIngestBinlogResult) SetSuccess(x interface{}) { + p.Success = x.(*TIngestBinlogResult_) } -var fieldIDToName_BackendServiceCloseScannerResult = map[int16]string{ +var fieldIDToName_BackendServiceIngestBinlogResult = map[int16]string{ 0: "success", } -func (p *BackendServiceCloseScannerResult) IsSetSuccess() bool { +func (p *BackendServiceIngestBinlogResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceCloseScannerResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceIngestBinlogResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -13444,17 +25256,14 @@ func (p *BackendServiceCloseScannerResult) Read(iprot thrift.TProtocol) (err err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13469,7 +25278,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -13479,17 +25288,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCloseScannerResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = dorisexternalservice.NewTScanCloseResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceIngestBinlogResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTIngestBinlogResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceCloseScannerResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceIngestBinlogResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("close_scanner_result"); err != nil { + if err = oprot.WriteStructBegin("ingest_binlog_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13497,7 +25307,6 @@ func (p *BackendServiceCloseScannerResult) Write(oprot thrift.TProtocol) (err er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13516,7 +25325,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceCloseScannerResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceIngestBinlogResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -13535,14 +25344,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceCloseScannerResult) String() string { +func (p *BackendServiceIngestBinlogResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceCloseScannerResult(%+v)", *p) + return fmt.Sprintf("BackendServiceIngestBinlogResult(%+v)", *p) + } -func (p *BackendServiceCloseScannerResult) DeepEqual(ano *BackendServiceCloseScannerResult) bool { +func (p *BackendServiceIngestBinlogResult) DeepEqual(ano *BackendServiceIngestBinlogResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -13554,7 +25364,7 @@ func (p *BackendServiceCloseScannerResult) DeepEqual(ano *BackendServiceCloseSca return true } -func (p *BackendServiceCloseScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanCloseResult_) bool { +func (p *BackendServiceIngestBinlogResult) Field0DeepEqual(src *TIngestBinlogResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -13562,30 +25372,38 @@ func (p *BackendServiceCloseScannerResult) Field0DeepEqual(src *dorisexternalser return true } -type BackendServiceGetStreamLoadRecordArgs struct { - LastStreamRecordTime int64 `thrift:"last_stream_record_time,1" frugal:"1,default,i64" json:"last_stream_record_time"` +type BackendServiceQueryIngestBinlogArgs struct { + QueryIngestBinlogRequest *TQueryIngestBinlogRequest `thrift:"query_ingest_binlog_request,1" frugal:"1,default,TQueryIngestBinlogRequest" json:"query_ingest_binlog_request"` } -func NewBackendServiceGetStreamLoadRecordArgs() *BackendServiceGetStreamLoadRecordArgs { - return &BackendServiceGetStreamLoadRecordArgs{} +func NewBackendServiceQueryIngestBinlogArgs() *BackendServiceQueryIngestBinlogArgs { + return &BackendServiceQueryIngestBinlogArgs{} } -func (p *BackendServiceGetStreamLoadRecordArgs) InitDefault() { - *p = BackendServiceGetStreamLoadRecordArgs{} +func (p *BackendServiceQueryIngestBinlogArgs) InitDefault() { } -func (p *BackendServiceGetStreamLoadRecordArgs) GetLastStreamRecordTime() (v int64) { - return p.LastStreamRecordTime +var BackendServiceQueryIngestBinlogArgs_QueryIngestBinlogRequest_DEFAULT *TQueryIngestBinlogRequest + +func (p *BackendServiceQueryIngestBinlogArgs) GetQueryIngestBinlogRequest() (v *TQueryIngestBinlogRequest) { + if !p.IsSetQueryIngestBinlogRequest() { + return BackendServiceQueryIngestBinlogArgs_QueryIngestBinlogRequest_DEFAULT + } + return p.QueryIngestBinlogRequest } -func (p *BackendServiceGetStreamLoadRecordArgs) SetLastStreamRecordTime(val int64) { - p.LastStreamRecordTime = val +func (p *BackendServiceQueryIngestBinlogArgs) SetQueryIngestBinlogRequest(val *TQueryIngestBinlogRequest) { + p.QueryIngestBinlogRequest = val } -var fieldIDToName_BackendServiceGetStreamLoadRecordArgs = map[int16]string{ - 1: "last_stream_record_time", +var fieldIDToName_BackendServiceQueryIngestBinlogArgs = map[int16]string{ + 1: "query_ingest_binlog_request", } -func (p *BackendServiceGetStreamLoadRecordArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceQueryIngestBinlogArgs) IsSetQueryIngestBinlogRequest() bool { + return p.QueryIngestBinlogRequest != nil +} + +func (p *BackendServiceQueryIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -13605,21 +25423,18 @@ func (p *BackendServiceGetStreamLoadRecordArgs) Read(iprot thrift.TProtocol) (er switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13634,7 +25449,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -13644,18 +25459,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *BackendServiceQueryIngestBinlogArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTQueryIngestBinlogRequest() + if err := _field.Read(iprot); err != nil { return err - } else { - p.LastStreamRecordTime = v } + p.QueryIngestBinlogRequest = _field return nil } -func (p *BackendServiceGetStreamLoadRecordArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceQueryIngestBinlogArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_stream_load_record_args"); err != nil { + if err = oprot.WriteStructBegin("query_ingest_binlog_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13663,7 +25478,6 @@ func (p *BackendServiceGetStreamLoadRecordArgs) Write(oprot thrift.TProtocol) (e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13682,11 +25496,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("last_stream_record_time", thrift.I64, 1); err != nil { +func (p *BackendServiceQueryIngestBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("query_ingest_binlog_request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.LastStreamRecordTime); err != nil { + if err := p.QueryIngestBinlogRequest.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13699,66 +25513,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordArgs) String() string { +func (p *BackendServiceQueryIngestBinlogArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetStreamLoadRecordArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceQueryIngestBinlogArgs(%+v)", *p) + } -func (p *BackendServiceGetStreamLoadRecordArgs) DeepEqual(ano *BackendServiceGetStreamLoadRecordArgs) bool { +func (p *BackendServiceQueryIngestBinlogArgs) DeepEqual(ano *BackendServiceQueryIngestBinlogArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.LastStreamRecordTime) { + if !p.Field1DeepEqual(ano.QueryIngestBinlogRequest) { return false } return true } -func (p *BackendServiceGetStreamLoadRecordArgs) Field1DeepEqual(src int64) bool { +func (p *BackendServiceQueryIngestBinlogArgs) Field1DeepEqual(src *TQueryIngestBinlogRequest) bool { - if p.LastStreamRecordTime != src { + if !p.QueryIngestBinlogRequest.DeepEqual(src) { return false } return true } -type BackendServiceGetStreamLoadRecordResult struct { - Success *TStreamLoadRecordResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadRecordResult_" json:"success,omitempty"` +type BackendServiceQueryIngestBinlogResult struct { + Success *TQueryIngestBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TQueryIngestBinlogResult_" json:"success,omitempty"` } -func NewBackendServiceGetStreamLoadRecordResult() *BackendServiceGetStreamLoadRecordResult { - return &BackendServiceGetStreamLoadRecordResult{} +func NewBackendServiceQueryIngestBinlogResult() *BackendServiceQueryIngestBinlogResult { + return &BackendServiceQueryIngestBinlogResult{} } -func (p *BackendServiceGetStreamLoadRecordResult) InitDefault() { - *p = BackendServiceGetStreamLoadRecordResult{} +func (p *BackendServiceQueryIngestBinlogResult) InitDefault() { } -var BackendServiceGetStreamLoadRecordResult_Success_DEFAULT *TStreamLoadRecordResult_ +var BackendServiceQueryIngestBinlogResult_Success_DEFAULT *TQueryIngestBinlogResult_ -func (p *BackendServiceGetStreamLoadRecordResult) GetSuccess() (v *TStreamLoadRecordResult_) { +func (p *BackendServiceQueryIngestBinlogResult) GetSuccess() (v *TQueryIngestBinlogResult_) { if !p.IsSetSuccess() { - return BackendServiceGetStreamLoadRecordResult_Success_DEFAULT + return BackendServiceQueryIngestBinlogResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceGetStreamLoadRecordResult) SetSuccess(x interface{}) { - p.Success = x.(*TStreamLoadRecordResult_) +func (p *BackendServiceQueryIngestBinlogResult) SetSuccess(x interface{}) { + p.Success = x.(*TQueryIngestBinlogResult_) } -var fieldIDToName_BackendServiceGetStreamLoadRecordResult = map[int16]string{ +var fieldIDToName_BackendServiceQueryIngestBinlogResult = map[int16]string{ 0: "success", } -func (p *BackendServiceGetStreamLoadRecordResult) IsSetSuccess() bool { +func (p *BackendServiceQueryIngestBinlogResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceGetStreamLoadRecordResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceQueryIngestBinlogResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -13782,17 +25596,14 @@ func (p *BackendServiceGetStreamLoadRecordResult) Read(iprot thrift.TProtocol) ( if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13807,7 +25618,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -13817,17 +25628,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTStreamLoadRecordResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceQueryIngestBinlogResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTQueryIngestBinlogResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceGetStreamLoadRecordResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceQueryIngestBinlogResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("get_stream_load_record_result"); err != nil { + if err = oprot.WriteStructBegin("query_ingest_binlog_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13835,7 +25647,6 @@ func (p *BackendServiceGetStreamLoadRecordResult) Write(oprot thrift.TProtocol) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13854,7 +25665,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceQueryIngestBinlogResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -13873,14 +25684,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordResult) String() string { +func (p *BackendServiceQueryIngestBinlogResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceGetStreamLoadRecordResult(%+v)", *p) + return fmt.Sprintf("BackendServiceQueryIngestBinlogResult(%+v)", *p) + } -func (p *BackendServiceGetStreamLoadRecordResult) DeepEqual(ano *BackendServiceGetStreamLoadRecordResult) bool { +func (p *BackendServiceQueryIngestBinlogResult) DeepEqual(ano *BackendServiceQueryIngestBinlogResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -13892,7 +25704,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) DeepEqual(ano *BackendServiceG return true } -func (p *BackendServiceGetStreamLoadRecordResult) Field0DeepEqual(src *TStreamLoadRecordResult_) bool { +func (p *BackendServiceQueryIngestBinlogResult) Field0DeepEqual(src *TQueryIngestBinlogResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -13900,114 +25712,38 @@ func (p *BackendServiceGetStreamLoadRecordResult) Field0DeepEqual(src *TStreamLo return true } -type BackendServiceCleanTrashArgs struct { +type BackendServicePublishTopicInfoArgs struct { + TopicRequest *TPublishTopicRequest `thrift:"topic_request,1" frugal:"1,default,TPublishTopicRequest" json:"topic_request"` } -func NewBackendServiceCleanTrashArgs() *BackendServiceCleanTrashArgs { - return &BackendServiceCleanTrashArgs{} -} - -func (p *BackendServiceCleanTrashArgs) InitDefault() { - *p = BackendServiceCleanTrashArgs{} -} - -var fieldIDToName_BackendServiceCleanTrashArgs = map[int16]string{} - -func (p *BackendServiceCleanTrashArgs) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +func NewBackendServicePublishTopicInfoArgs() *BackendServicePublishTopicInfoArgs { + return &BackendServicePublishTopicInfoArgs{} } -func (p *BackendServiceCleanTrashArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("clean_trash_args"); err != nil { - goto WriteStructBeginError - } - if p != nil { - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +func (p *BackendServicePublishTopicInfoArgs) InitDefault() { } -func (p *BackendServiceCleanTrashArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BackendServiceCleanTrashArgs(%+v)", *p) -} +var BackendServicePublishTopicInfoArgs_TopicRequest_DEFAULT *TPublishTopicRequest -func (p *BackendServiceCleanTrashArgs) DeepEqual(ano *BackendServiceCleanTrashArgs) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false +func (p *BackendServicePublishTopicInfoArgs) GetTopicRequest() (v *TPublishTopicRequest) { + if !p.IsSetTopicRequest() { + return BackendServicePublishTopicInfoArgs_TopicRequest_DEFAULT } - return true + return p.TopicRequest } - -type BackendServiceCheckStorageFormatArgs struct { +func (p *BackendServicePublishTopicInfoArgs) SetTopicRequest(val *TPublishTopicRequest) { + p.TopicRequest = val } -func NewBackendServiceCheckStorageFormatArgs() *BackendServiceCheckStorageFormatArgs { - return &BackendServiceCheckStorageFormatArgs{} +var fieldIDToName_BackendServicePublishTopicInfoArgs = map[int16]string{ + 1: "topic_request", } -func (p *BackendServiceCheckStorageFormatArgs) InitDefault() { - *p = BackendServiceCheckStorageFormatArgs{} +func (p *BackendServicePublishTopicInfoArgs) IsSetTopicRequest() bool { + return p.TopicRequest != nil } -var fieldIDToName_BackendServiceCheckStorageFormatArgs = map[int16]string{} - -func (p *BackendServiceCheckStorageFormatArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServicePublishTopicInfoArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -14024,10 +25760,21 @@ func (p *BackendServiceCheckStorageFormatArgs) Read(iprot thrift.TProtocol) (err if fieldTypeId == thrift.STOP { break } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14041,8 +25788,10 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -14050,12 +25799,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCheckStorageFormatArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("check_storage_format_args"); err != nil { +func (p *BackendServicePublishTopicInfoArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTPublishTopicRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.TopicRequest = _field + return nil +} + +func (p *BackendServicePublishTopicInfoArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("publish_topic_info_args"); err != nil { goto WriteStructBeginError } if p != nil { - + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14066,61 +25828,91 @@ func (p *BackendServiceCheckStorageFormatArgs) Write(oprot thrift.TProtocol) (er return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceCheckStorageFormatArgs) String() string { +func (p *BackendServicePublishTopicInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("topic_request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.TopicRequest.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *BackendServicePublishTopicInfoArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceCheckStorageFormatArgs(%+v)", *p) + return fmt.Sprintf("BackendServicePublishTopicInfoArgs(%+v)", *p) + } -func (p *BackendServiceCheckStorageFormatArgs) DeepEqual(ano *BackendServiceCheckStorageFormatArgs) bool { +func (p *BackendServicePublishTopicInfoArgs) DeepEqual(ano *BackendServicePublishTopicInfoArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } + if !p.Field1DeepEqual(ano.TopicRequest) { + return false + } return true } -type BackendServiceCheckStorageFormatResult struct { - Success *TCheckStorageFormatResult_ `thrift:"success,0,optional" frugal:"0,optional,TCheckStorageFormatResult_" json:"success,omitempty"` +func (p *BackendServicePublishTopicInfoArgs) Field1DeepEqual(src *TPublishTopicRequest) bool { + + if !p.TopicRequest.DeepEqual(src) { + return false + } + return true } -func NewBackendServiceCheckStorageFormatResult() *BackendServiceCheckStorageFormatResult { - return &BackendServiceCheckStorageFormatResult{} +type BackendServicePublishTopicInfoResult struct { + Success *TPublishTopicResult_ `thrift:"success,0,optional" frugal:"0,optional,TPublishTopicResult_" json:"success,omitempty"` } -func (p *BackendServiceCheckStorageFormatResult) InitDefault() { - *p = BackendServiceCheckStorageFormatResult{} +func NewBackendServicePublishTopicInfoResult() *BackendServicePublishTopicInfoResult { + return &BackendServicePublishTopicInfoResult{} } -var BackendServiceCheckStorageFormatResult_Success_DEFAULT *TCheckStorageFormatResult_ +func (p *BackendServicePublishTopicInfoResult) InitDefault() { +} -func (p *BackendServiceCheckStorageFormatResult) GetSuccess() (v *TCheckStorageFormatResult_) { +var BackendServicePublishTopicInfoResult_Success_DEFAULT *TPublishTopicResult_ + +func (p *BackendServicePublishTopicInfoResult) GetSuccess() (v *TPublishTopicResult_) { if !p.IsSetSuccess() { - return BackendServiceCheckStorageFormatResult_Success_DEFAULT + return BackendServicePublishTopicInfoResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceCheckStorageFormatResult) SetSuccess(x interface{}) { - p.Success = x.(*TCheckStorageFormatResult_) +func (p *BackendServicePublishTopicInfoResult) SetSuccess(x interface{}) { + p.Success = x.(*TPublishTopicResult_) } -var fieldIDToName_BackendServiceCheckStorageFormatResult = map[int16]string{ +var fieldIDToName_BackendServicePublishTopicInfoResult = map[int16]string{ 0: "success", } -func (p *BackendServiceCheckStorageFormatResult) IsSetSuccess() bool { +func (p *BackendServicePublishTopicInfoResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceCheckStorageFormatResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServicePublishTopicInfoResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -14144,17 +25936,14 @@ func (p *BackendServiceCheckStorageFormatResult) Read(iprot thrift.TProtocol) (e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14169,7 +25958,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14179,17 +25968,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCheckStorageFormatResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTCheckStorageFormatResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServicePublishTopicInfoResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTPublishTopicResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceCheckStorageFormatResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServicePublishTopicInfoResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("check_storage_format_result"); err != nil { + if err = oprot.WriteStructBegin("publish_topic_info_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14197,7 +25987,6 @@ func (p *BackendServiceCheckStorageFormatResult) Write(oprot thrift.TProtocol) ( fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14216,7 +26005,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceCheckStorageFormatResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServicePublishTopicInfoResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -14235,14 +26024,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceCheckStorageFormatResult) String() string { +func (p *BackendServicePublishTopicInfoResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceCheckStorageFormatResult(%+v)", *p) + return fmt.Sprintf("BackendServicePublishTopicInfoResult(%+v)", *p) + } -func (p *BackendServiceCheckStorageFormatResult) DeepEqual(ano *BackendServiceCheckStorageFormatResult) bool { +func (p *BackendServicePublishTopicInfoResult) DeepEqual(ano *BackendServicePublishTopicInfoResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -14254,7 +26044,7 @@ func (p *BackendServiceCheckStorageFormatResult) DeepEqual(ano *BackendServiceCh return true } -func (p *BackendServiceCheckStorageFormatResult) Field0DeepEqual(src *TCheckStorageFormatResult_) bool { +func (p *BackendServicePublishTopicInfoResult) Field0DeepEqual(src *TPublishTopicResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -14262,39 +26052,38 @@ func (p *BackendServiceCheckStorageFormatResult) Field0DeepEqual(src *TCheckStor return true } -type BackendServiceIngestBinlogArgs struct { - IngestBinlogRequest *TIngestBinlogRequest `thrift:"ingest_binlog_request,1" frugal:"1,default,TIngestBinlogRequest" json:"ingest_binlog_request"` +type BackendServiceGetRealtimeExecStatusArgs struct { + Request *TGetRealtimeExecStatusRequest `thrift:"request,1" frugal:"1,default,TGetRealtimeExecStatusRequest" json:"request"` } -func NewBackendServiceIngestBinlogArgs() *BackendServiceIngestBinlogArgs { - return &BackendServiceIngestBinlogArgs{} +func NewBackendServiceGetRealtimeExecStatusArgs() *BackendServiceGetRealtimeExecStatusArgs { + return &BackendServiceGetRealtimeExecStatusArgs{} } -func (p *BackendServiceIngestBinlogArgs) InitDefault() { - *p = BackendServiceIngestBinlogArgs{} +func (p *BackendServiceGetRealtimeExecStatusArgs) InitDefault() { } -var BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT *TIngestBinlogRequest +var BackendServiceGetRealtimeExecStatusArgs_Request_DEFAULT *TGetRealtimeExecStatusRequest -func (p *BackendServiceIngestBinlogArgs) GetIngestBinlogRequest() (v *TIngestBinlogRequest) { - if !p.IsSetIngestBinlogRequest() { - return BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT +func (p *BackendServiceGetRealtimeExecStatusArgs) GetRequest() (v *TGetRealtimeExecStatusRequest) { + if !p.IsSetRequest() { + return BackendServiceGetRealtimeExecStatusArgs_Request_DEFAULT } - return p.IngestBinlogRequest + return p.Request } -func (p *BackendServiceIngestBinlogArgs) SetIngestBinlogRequest(val *TIngestBinlogRequest) { - p.IngestBinlogRequest = val +func (p *BackendServiceGetRealtimeExecStatusArgs) SetRequest(val *TGetRealtimeExecStatusRequest) { + p.Request = val } -var fieldIDToName_BackendServiceIngestBinlogArgs = map[int16]string{ - 1: "ingest_binlog_request", +var fieldIDToName_BackendServiceGetRealtimeExecStatusArgs = map[int16]string{ + 1: "request", } -func (p *BackendServiceIngestBinlogArgs) IsSetIngestBinlogRequest() bool { - return p.IngestBinlogRequest != nil +func (p *BackendServiceGetRealtimeExecStatusArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *BackendServiceIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetRealtimeExecStatusArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -14318,17 +26107,14 @@ func (p *BackendServiceIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14343,7 +26129,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14353,17 +26139,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceIngestBinlogArgs) ReadField1(iprot thrift.TProtocol) error { - p.IngestBinlogRequest = NewTIngestBinlogRequest() - if err := p.IngestBinlogRequest.Read(iprot); err != nil { +func (p *BackendServiceGetRealtimeExecStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetRealtimeExecStatusRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *BackendServiceIngestBinlogArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetRealtimeExecStatusArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ingest_binlog_args"); err != nil { + if err = oprot.WriteStructBegin("get_realtime_exec_status_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14371,7 +26158,6 @@ func (p *BackendServiceIngestBinlogArgs) Write(oprot thrift.TProtocol) (err erro fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14390,11 +26176,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceIngestBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("ingest_binlog_request", thrift.STRUCT, 1); err != nil { +func (p *BackendServiceGetRealtimeExecStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.IngestBinlogRequest.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14407,66 +26193,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *BackendServiceIngestBinlogArgs) String() string { +func (p *BackendServiceGetRealtimeExecStatusArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceIngestBinlogArgs(%+v)", *p) + return fmt.Sprintf("BackendServiceGetRealtimeExecStatusArgs(%+v)", *p) + } -func (p *BackendServiceIngestBinlogArgs) DeepEqual(ano *BackendServiceIngestBinlogArgs) bool { +func (p *BackendServiceGetRealtimeExecStatusArgs) DeepEqual(ano *BackendServiceGetRealtimeExecStatusArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.IngestBinlogRequest) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *BackendServiceIngestBinlogArgs) Field1DeepEqual(src *TIngestBinlogRequest) bool { +func (p *BackendServiceGetRealtimeExecStatusArgs) Field1DeepEqual(src *TGetRealtimeExecStatusRequest) bool { - if !p.IngestBinlogRequest.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type BackendServiceIngestBinlogResult struct { - Success *TIngestBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TIngestBinlogResult_" json:"success,omitempty"` +type BackendServiceGetRealtimeExecStatusResult struct { + Success *TGetRealtimeExecStatusResponse `thrift:"success,0,optional" frugal:"0,optional,TGetRealtimeExecStatusResponse" json:"success,omitempty"` } -func NewBackendServiceIngestBinlogResult() *BackendServiceIngestBinlogResult { - return &BackendServiceIngestBinlogResult{} +func NewBackendServiceGetRealtimeExecStatusResult() *BackendServiceGetRealtimeExecStatusResult { + return &BackendServiceGetRealtimeExecStatusResult{} } -func (p *BackendServiceIngestBinlogResult) InitDefault() { - *p = BackendServiceIngestBinlogResult{} +func (p *BackendServiceGetRealtimeExecStatusResult) InitDefault() { } -var BackendServiceIngestBinlogResult_Success_DEFAULT *TIngestBinlogResult_ +var BackendServiceGetRealtimeExecStatusResult_Success_DEFAULT *TGetRealtimeExecStatusResponse -func (p *BackendServiceIngestBinlogResult) GetSuccess() (v *TIngestBinlogResult_) { +func (p *BackendServiceGetRealtimeExecStatusResult) GetSuccess() (v *TGetRealtimeExecStatusResponse) { if !p.IsSetSuccess() { - return BackendServiceIngestBinlogResult_Success_DEFAULT + return BackendServiceGetRealtimeExecStatusResult_Success_DEFAULT } return p.Success } -func (p *BackendServiceIngestBinlogResult) SetSuccess(x interface{}) { - p.Success = x.(*TIngestBinlogResult_) +func (p *BackendServiceGetRealtimeExecStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetRealtimeExecStatusResponse) } -var fieldIDToName_BackendServiceIngestBinlogResult = map[int16]string{ +var fieldIDToName_BackendServiceGetRealtimeExecStatusResult = map[int16]string{ 0: "success", } -func (p *BackendServiceIngestBinlogResult) IsSetSuccess() bool { +func (p *BackendServiceGetRealtimeExecStatusResult) IsSetSuccess() bool { return p.Success != nil } -func (p *BackendServiceIngestBinlogResult) Read(iprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetRealtimeExecStatusResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -14490,17 +26276,14 @@ func (p *BackendServiceIngestBinlogResult) Read(iprot thrift.TProtocol) (err err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14515,7 +26298,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14525,17 +26308,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceIngestBinlogResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTIngestBinlogResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *BackendServiceGetRealtimeExecStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetRealtimeExecStatusResponse() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *BackendServiceIngestBinlogResult) Write(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetRealtimeExecStatusResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ingest_binlog_result"); err != nil { + if err = oprot.WriteStructBegin("get_realtime_exec_status_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14543,7 +26327,6 @@ func (p *BackendServiceIngestBinlogResult) Write(oprot thrift.TProtocol) (err er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14562,7 +26345,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *BackendServiceIngestBinlogResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *BackendServiceGetRealtimeExecStatusResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -14581,14 +26364,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *BackendServiceIngestBinlogResult) String() string { +func (p *BackendServiceGetRealtimeExecStatusResult) String() string { if p == nil { return "" } - return fmt.Sprintf("BackendServiceIngestBinlogResult(%+v)", *p) + return fmt.Sprintf("BackendServiceGetRealtimeExecStatusResult(%+v)", *p) + } -func (p *BackendServiceIngestBinlogResult) DeepEqual(ano *BackendServiceIngestBinlogResult) bool { +func (p *BackendServiceGetRealtimeExecStatusResult) DeepEqual(ano *BackendServiceGetRealtimeExecStatusResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -14600,7 +26384,7 @@ func (p *BackendServiceIngestBinlogResult) DeepEqual(ano *BackendServiceIngestBi return true } -func (p *BackendServiceIngestBinlogResult) Field0DeepEqual(src *TIngestBinlogResult_) bool { +func (p *BackendServiceGetRealtimeExecStatusResult) Field0DeepEqual(src *TGetRealtimeExecStatusResponse) bool { if !p.Success.DeepEqual(src) { return false diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go b/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go index 434f0d38..65089a73 100644 --- a/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go +++ b/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package backendservice @@ -42,19 +42,27 @@ func NewServiceInfo() *kitex.ServiceInfo { "get_next": kitex.NewMethodInfo(getNextHandler, newBackendServiceGetNextArgs, newBackendServiceGetNextResult, false), "close_scanner": kitex.NewMethodInfo(closeScannerHandler, newBackendServiceCloseScannerArgs, newBackendServiceCloseScannerResult, false), "get_stream_load_record": kitex.NewMethodInfo(getStreamLoadRecordHandler, newBackendServiceGetStreamLoadRecordArgs, newBackendServiceGetStreamLoadRecordResult, false), - "clean_trash": kitex.NewMethodInfo(cleanTrashHandler, newBackendServiceCleanTrashArgs, nil, true), "check_storage_format": kitex.NewMethodInfo(checkStorageFormatHandler, newBackendServiceCheckStorageFormatArgs, newBackendServiceCheckStorageFormatResult, false), + "warm_up_cache_async": kitex.NewMethodInfo(warmUpCacheAsyncHandler, newBackendServiceWarmUpCacheAsyncArgs, newBackendServiceWarmUpCacheAsyncResult, false), + "check_warm_up_cache_async": kitex.NewMethodInfo(checkWarmUpCacheAsyncHandler, newBackendServiceCheckWarmUpCacheAsyncArgs, newBackendServiceCheckWarmUpCacheAsyncResult, false), + "sync_load_for_tablets": kitex.NewMethodInfo(syncLoadForTabletsHandler, newBackendServiceSyncLoadForTabletsArgs, newBackendServiceSyncLoadForTabletsResult, false), + "get_top_n_hot_partitions": kitex.NewMethodInfo(getTopNHotPartitionsHandler, newBackendServiceGetTopNHotPartitionsArgs, newBackendServiceGetTopNHotPartitionsResult, false), + "warm_up_tablets": kitex.NewMethodInfo(warmUpTabletsHandler, newBackendServiceWarmUpTabletsArgs, newBackendServiceWarmUpTabletsResult, false), "ingest_binlog": kitex.NewMethodInfo(ingestBinlogHandler, newBackendServiceIngestBinlogArgs, newBackendServiceIngestBinlogResult, false), + "query_ingest_binlog": kitex.NewMethodInfo(queryIngestBinlogHandler, newBackendServiceQueryIngestBinlogArgs, newBackendServiceQueryIngestBinlogResult, false), + "publish_topic_info": kitex.NewMethodInfo(publishTopicInfoHandler, newBackendServicePublishTopicInfoArgs, newBackendServicePublishTopicInfoResult, false), + "get_realtime_exec_status": kitex.NewMethodInfo(getRealtimeExecStatusHandler, newBackendServiceGetRealtimeExecStatusArgs, newBackendServiceGetRealtimeExecStatusResult, false), } extra := map[string]interface{}{ - "PackageName": "backendservice", + "PackageName": "backendservice", + "ServiceFilePath": `thrift/BackendService.thrift`, } svcInfo := &kitex.ServiceInfo{ ServiceName: serviceName, HandlerType: handlerType, Methods: methods, PayloadCodec: kitex.Thrift, - KiteXGenVersion: "v0.4.4", + KiteXGenVersion: "v0.8.0", Extra: extra, } return svcInfo @@ -384,35 +392,112 @@ func newBackendServiceGetStreamLoadRecordResult() interface{} { return backendservice.NewBackendServiceGetStreamLoadRecordResult() } -func cleanTrashHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { +func checkStorageFormatHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { - err := handler.(backendservice.BackendService).CleanTrash(ctx) + realResult := result.(*backendservice.BackendServiceCheckStorageFormatResult) + success, err := handler.(backendservice.BackendService).CheckStorageFormat(ctx) if err != nil { return err } + realResult.Success = success + return nil +} +func newBackendServiceCheckStorageFormatArgs() interface{} { + return backendservice.NewBackendServiceCheckStorageFormatArgs() +} +func newBackendServiceCheckStorageFormatResult() interface{} { + return backendservice.NewBackendServiceCheckStorageFormatResult() +} + +func warmUpCacheAsyncHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceWarmUpCacheAsyncArgs) + realResult := result.(*backendservice.BackendServiceWarmUpCacheAsyncResult) + success, err := handler.(backendservice.BackendService).WarmUpCacheAsync(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success return nil } -func newBackendServiceCleanTrashArgs() interface{} { - return backendservice.NewBackendServiceCleanTrashArgs() +func newBackendServiceWarmUpCacheAsyncArgs() interface{} { + return backendservice.NewBackendServiceWarmUpCacheAsyncArgs() } -func checkStorageFormatHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { +func newBackendServiceWarmUpCacheAsyncResult() interface{} { + return backendservice.NewBackendServiceWarmUpCacheAsyncResult() +} - realResult := result.(*backendservice.BackendServiceCheckStorageFormatResult) - success, err := handler.(backendservice.BackendService).CheckStorageFormat(ctx) +func checkWarmUpCacheAsyncHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceCheckWarmUpCacheAsyncArgs) + realResult := result.(*backendservice.BackendServiceCheckWarmUpCacheAsyncResult) + success, err := handler.(backendservice.BackendService).CheckWarmUpCacheAsync(ctx, realArg.Request) if err != nil { return err } realResult.Success = success return nil } -func newBackendServiceCheckStorageFormatArgs() interface{} { - return backendservice.NewBackendServiceCheckStorageFormatArgs() +func newBackendServiceCheckWarmUpCacheAsyncArgs() interface{} { + return backendservice.NewBackendServiceCheckWarmUpCacheAsyncArgs() } -func newBackendServiceCheckStorageFormatResult() interface{} { - return backendservice.NewBackendServiceCheckStorageFormatResult() +func newBackendServiceCheckWarmUpCacheAsyncResult() interface{} { + return backendservice.NewBackendServiceCheckWarmUpCacheAsyncResult() +} + +func syncLoadForTabletsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceSyncLoadForTabletsArgs) + realResult := result.(*backendservice.BackendServiceSyncLoadForTabletsResult) + success, err := handler.(backendservice.BackendService).SyncLoadForTablets(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newBackendServiceSyncLoadForTabletsArgs() interface{} { + return backendservice.NewBackendServiceSyncLoadForTabletsArgs() +} + +func newBackendServiceSyncLoadForTabletsResult() interface{} { + return backendservice.NewBackendServiceSyncLoadForTabletsResult() +} + +func getTopNHotPartitionsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceGetTopNHotPartitionsArgs) + realResult := result.(*backendservice.BackendServiceGetTopNHotPartitionsResult) + success, err := handler.(backendservice.BackendService).GetTopNHotPartitions(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newBackendServiceGetTopNHotPartitionsArgs() interface{} { + return backendservice.NewBackendServiceGetTopNHotPartitionsArgs() +} + +func newBackendServiceGetTopNHotPartitionsResult() interface{} { + return backendservice.NewBackendServiceGetTopNHotPartitionsResult() +} + +func warmUpTabletsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceWarmUpTabletsArgs) + realResult := result.(*backendservice.BackendServiceWarmUpTabletsResult) + success, err := handler.(backendservice.BackendService).WarmUpTablets(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newBackendServiceWarmUpTabletsArgs() interface{} { + return backendservice.NewBackendServiceWarmUpTabletsArgs() +} + +func newBackendServiceWarmUpTabletsResult() interface{} { + return backendservice.NewBackendServiceWarmUpTabletsResult() } func ingestBinlogHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { @@ -433,6 +518,60 @@ func newBackendServiceIngestBinlogResult() interface{} { return backendservice.NewBackendServiceIngestBinlogResult() } +func queryIngestBinlogHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceQueryIngestBinlogArgs) + realResult := result.(*backendservice.BackendServiceQueryIngestBinlogResult) + success, err := handler.(backendservice.BackendService).QueryIngestBinlog(ctx, realArg.QueryIngestBinlogRequest) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newBackendServiceQueryIngestBinlogArgs() interface{} { + return backendservice.NewBackendServiceQueryIngestBinlogArgs() +} + +func newBackendServiceQueryIngestBinlogResult() interface{} { + return backendservice.NewBackendServiceQueryIngestBinlogResult() +} + +func publishTopicInfoHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServicePublishTopicInfoArgs) + realResult := result.(*backendservice.BackendServicePublishTopicInfoResult) + success, err := handler.(backendservice.BackendService).PublishTopicInfo(ctx, realArg.TopicRequest) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newBackendServicePublishTopicInfoArgs() interface{} { + return backendservice.NewBackendServicePublishTopicInfoArgs() +} + +func newBackendServicePublishTopicInfoResult() interface{} { + return backendservice.NewBackendServicePublishTopicInfoResult() +} + +func getRealtimeExecStatusHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*backendservice.BackendServiceGetRealtimeExecStatusArgs) + realResult := result.(*backendservice.BackendServiceGetRealtimeExecStatusResult) + success, err := handler.(backendservice.BackendService).GetRealtimeExecStatus(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newBackendServiceGetRealtimeExecStatusArgs() interface{} { + return backendservice.NewBackendServiceGetRealtimeExecStatusArgs() +} + +func newBackendServiceGetRealtimeExecStatusResult() interface{} { + return backendservice.NewBackendServiceGetRealtimeExecStatusResult() +} + type kClient struct { c client.Client } @@ -620,14 +759,6 @@ func (p *kClient) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime return _result.GetSuccess(), nil } -func (p *kClient) CleanTrash(ctx context.Context) (err error) { - var _args backendservice.BackendServiceCleanTrashArgs - if err = p.c.Call(ctx, "clean_trash", &_args, nil); err != nil { - return - } - return nil -} - func (p *kClient) CheckStorageFormat(ctx context.Context) (r *backendservice.TCheckStorageFormatResult_, err error) { var _args backendservice.BackendServiceCheckStorageFormatArgs var _result backendservice.BackendServiceCheckStorageFormatResult @@ -637,6 +768,56 @@ func (p *kClient) CheckStorageFormat(ctx context.Context) (r *backendservice.TCh return _result.GetSuccess(), nil } +func (p *kClient) WarmUpCacheAsync(ctx context.Context, request *backendservice.TWarmUpCacheAsyncRequest) (r *backendservice.TWarmUpCacheAsyncResponse, err error) { + var _args backendservice.BackendServiceWarmUpCacheAsyncArgs + _args.Request = request + var _result backendservice.BackendServiceWarmUpCacheAsyncResult + if err = p.c.Call(ctx, "warm_up_cache_async", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) CheckWarmUpCacheAsync(ctx context.Context, request *backendservice.TCheckWarmUpCacheAsyncRequest) (r *backendservice.TCheckWarmUpCacheAsyncResponse, err error) { + var _args backendservice.BackendServiceCheckWarmUpCacheAsyncArgs + _args.Request = request + var _result backendservice.BackendServiceCheckWarmUpCacheAsyncResult + if err = p.c.Call(ctx, "check_warm_up_cache_async", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) SyncLoadForTablets(ctx context.Context, request *backendservice.TSyncLoadForTabletsRequest) (r *backendservice.TSyncLoadForTabletsResponse, err error) { + var _args backendservice.BackendServiceSyncLoadForTabletsArgs + _args.Request = request + var _result backendservice.BackendServiceSyncLoadForTabletsResult + if err = p.c.Call(ctx, "sync_load_for_tablets", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetTopNHotPartitions(ctx context.Context, request *backendservice.TGetTopNHotPartitionsRequest) (r *backendservice.TGetTopNHotPartitionsResponse, err error) { + var _args backendservice.BackendServiceGetTopNHotPartitionsArgs + _args.Request = request + var _result backendservice.BackendServiceGetTopNHotPartitionsResult + if err = p.c.Call(ctx, "get_top_n_hot_partitions", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) WarmUpTablets(ctx context.Context, request *backendservice.TWarmUpTabletsRequest) (r *backendservice.TWarmUpTabletsResponse, err error) { + var _args backendservice.BackendServiceWarmUpTabletsArgs + _args.Request = request + var _result backendservice.BackendServiceWarmUpTabletsResult + if err = p.c.Call(ctx, "warm_up_tablets", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + func (p *kClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *backendservice.TIngestBinlogRequest) (r *backendservice.TIngestBinlogResult_, err error) { var _args backendservice.BackendServiceIngestBinlogArgs _args.IngestBinlogRequest = ingestBinlogRequest @@ -646,3 +827,33 @@ func (p *kClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *backend } return _result.GetSuccess(), nil } + +func (p *kClient) QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *backendservice.TQueryIngestBinlogRequest) (r *backendservice.TQueryIngestBinlogResult_, err error) { + var _args backendservice.BackendServiceQueryIngestBinlogArgs + _args.QueryIngestBinlogRequest = queryIngestBinlogRequest + var _result backendservice.BackendServiceQueryIngestBinlogResult + if err = p.c.Call(ctx, "query_ingest_binlog", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) PublishTopicInfo(ctx context.Context, topicRequest *backendservice.TPublishTopicRequest) (r *backendservice.TPublishTopicResult_, err error) { + var _args backendservice.BackendServicePublishTopicInfoArgs + _args.TopicRequest = topicRequest + var _result backendservice.BackendServicePublishTopicInfoResult + if err = p.c.Call(ctx, "publish_topic_info", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetRealtimeExecStatus(ctx context.Context, request *backendservice.TGetRealtimeExecStatusRequest) (r *backendservice.TGetRealtimeExecStatusResponse, err error) { + var _args backendservice.BackendServiceGetRealtimeExecStatusArgs + _args.Request = request + var _result backendservice.BackendServiceGetRealtimeExecStatusResult + if err = p.c.Call(ctx, "get_realtime_exec_status", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/client.go b/pkg/rpc/kitex_gen/backendservice/backendservice/client.go index 1481b215..e001d2bd 100644 --- a/pkg/rpc/kitex_gen/backendservice/backendservice/client.go +++ b/pkg/rpc/kitex_gen/backendservice/backendservice/client.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package backendservice @@ -34,9 +34,16 @@ type Client interface { GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams, callOptions ...callopt.Option) (r *dorisexternalservice.TScanBatchResult_, err error) CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams, callOptions ...callopt.Option) (r *dorisexternalservice.TScanCloseResult_, err error) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64, callOptions ...callopt.Option) (r *backendservice.TStreamLoadRecordResult_, err error) - CleanTrash(ctx context.Context, callOptions ...callopt.Option) (err error) CheckStorageFormat(ctx context.Context, callOptions ...callopt.Option) (r *backendservice.TCheckStorageFormatResult_, err error) + WarmUpCacheAsync(ctx context.Context, request *backendservice.TWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpCacheAsyncResponse, err error) + CheckWarmUpCacheAsync(ctx context.Context, request *backendservice.TCheckWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TCheckWarmUpCacheAsyncResponse, err error) + SyncLoadForTablets(ctx context.Context, request *backendservice.TSyncLoadForTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TSyncLoadForTabletsResponse, err error) + GetTopNHotPartitions(ctx context.Context, request *backendservice.TGetTopNHotPartitionsRequest, callOptions ...callopt.Option) (r *backendservice.TGetTopNHotPartitionsResponse, err error) + WarmUpTablets(ctx context.Context, request *backendservice.TWarmUpTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpTabletsResponse, err error) IngestBinlog(ctx context.Context, ingestBinlogRequest *backendservice.TIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TIngestBinlogResult_, err error) + QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *backendservice.TQueryIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TQueryIngestBinlogResult_, err error) + PublishTopicInfo(ctx context.Context, topicRequest *backendservice.TPublishTopicRequest, callOptions ...callopt.Option) (r *backendservice.TPublishTopicResult_, err error) + GetRealtimeExecStatus(ctx context.Context, request *backendservice.TGetRealtimeExecStatusRequest, callOptions ...callopt.Option) (r *backendservice.TGetRealtimeExecStatusResponse, err error) } // NewClient creates a client for the service defined in IDL. @@ -158,17 +165,52 @@ func (p *kBackendServiceClient) GetStreamLoadRecord(ctx context.Context, lastStr return p.kClient.GetStreamLoadRecord(ctx, lastStreamRecordTime) } -func (p *kBackendServiceClient) CleanTrash(ctx context.Context, callOptions ...callopt.Option) (err error) { +func (p *kBackendServiceClient) CheckStorageFormat(ctx context.Context, callOptions ...callopt.Option) (r *backendservice.TCheckStorageFormatResult_, err error) { ctx = client.NewCtxWithCallOptions(ctx, callOptions) - return p.kClient.CleanTrash(ctx) + return p.kClient.CheckStorageFormat(ctx) } -func (p *kBackendServiceClient) CheckStorageFormat(ctx context.Context, callOptions ...callopt.Option) (r *backendservice.TCheckStorageFormatResult_, err error) { +func (p *kBackendServiceClient) WarmUpCacheAsync(ctx context.Context, request *backendservice.TWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpCacheAsyncResponse, err error) { ctx = client.NewCtxWithCallOptions(ctx, callOptions) - return p.kClient.CheckStorageFormat(ctx) + return p.kClient.WarmUpCacheAsync(ctx, request) +} + +func (p *kBackendServiceClient) CheckWarmUpCacheAsync(ctx context.Context, request *backendservice.TCheckWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TCheckWarmUpCacheAsyncResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CheckWarmUpCacheAsync(ctx, request) +} + +func (p *kBackendServiceClient) SyncLoadForTablets(ctx context.Context, request *backendservice.TSyncLoadForTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TSyncLoadForTabletsResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.SyncLoadForTablets(ctx, request) +} + +func (p *kBackendServiceClient) GetTopNHotPartitions(ctx context.Context, request *backendservice.TGetTopNHotPartitionsRequest, callOptions ...callopt.Option) (r *backendservice.TGetTopNHotPartitionsResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetTopNHotPartitions(ctx, request) +} + +func (p *kBackendServiceClient) WarmUpTablets(ctx context.Context, request *backendservice.TWarmUpTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpTabletsResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.WarmUpTablets(ctx, request) } func (p *kBackendServiceClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *backendservice.TIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TIngestBinlogResult_, err error) { ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.IngestBinlog(ctx, ingestBinlogRequest) } + +func (p *kBackendServiceClient) QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *backendservice.TQueryIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TQueryIngestBinlogResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.QueryIngestBinlog(ctx, queryIngestBinlogRequest) +} + +func (p *kBackendServiceClient) PublishTopicInfo(ctx context.Context, topicRequest *backendservice.TPublishTopicRequest, callOptions ...callopt.Option) (r *backendservice.TPublishTopicResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.PublishTopicInfo(ctx, topicRequest) +} + +func (p *kBackendServiceClient) GetRealtimeExecStatus(ctx context.Context, request *backendservice.TGetRealtimeExecStatusRequest, callOptions ...callopt.Option) (r *backendservice.TGetRealtimeExecStatusResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetRealtimeExecStatus(ctx, request) +} diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go b/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go index bc7108dc..e38cd4f8 100644 --- a/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go +++ b/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package backendservice diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/server.go b/pkg/rpc/kitex_gen/backendservice/backendservice/server.go index 228b2335..c10bd073 100644 --- a/pkg/rpc/kitex_gen/backendservice/backendservice/server.go +++ b/pkg/rpc/kitex_gen/backendservice/backendservice/server.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package backendservice import ( diff --git a/pkg/rpc/kitex_gen/backendservice/k-BackendService.go b/pkg/rpc/kitex_gen/backendservice/k-BackendService.go index 3029f170..08b0b5d9 100644 --- a/pkg/rpc/kitex_gen/backendservice/k-BackendService.go +++ b/pkg/rpc/kitex_gen/backendservice/k-BackendService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package backendservice @@ -11,8 +11,10 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/dorisexternalservice" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/plannodes" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" @@ -29,6 +31,7 @@ var ( _ = bthrift.BinaryWriter(nil) _ = agentservice.KitexUnusedProtection _ = dorisexternalservice.KitexUnusedProtection + _ = frontendservice.KitexUnusedProtection _ = palointernalservice.KitexUnusedProtection _ = plannodes.KitexUnusedProtection _ = status.KitexUnusedProtection @@ -264,6 +267,34 @@ func (p *TTabletStat) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -339,7 +370,7 @@ func (p *TTabletStat) FastReadField3(buf []byte) (int, error) { return offset, err } else { offset += l - p.RowNum = &v + p.RowCount = &v } return offset, nil @@ -352,7 +383,7 @@ func (p *TTabletStat) FastReadField4(buf []byte) (int, error) { return offset, err } else { offset += l - p.VersionCount = &v + p.TotalVersionCount = &v } return offset, nil @@ -371,6 +402,32 @@ func (p *TTabletStat) FastReadField5(buf []byte) (int, error) { return offset, nil } +func (p *TTabletStat) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.VisibleVersionCount = &v + + } + return offset, nil +} + +func (p *TTabletStat) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.VisibleVersion = &v + + } + return offset, nil +} + // for compatibility func (p *TTabletStat) FastWrite(buf []byte) int { return 0 @@ -385,6 +442,8 @@ func (p *TTabletStat) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWri offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -400,6 +459,8 @@ func (p *TTabletStat) BLength() int { l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -428,9 +489,9 @@ func (p *TTabletStat) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWri func (p *TTabletStat) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRowNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_num", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowNum) + if p.IsSetRowCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_count", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowCount) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } @@ -439,9 +500,9 @@ func (p *TTabletStat) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWri func (p *TTabletStat) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetVersionCount() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version_count", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.VersionCount) + if p.IsSetTotalVersionCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_version_count", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalVersionCount) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } @@ -459,6 +520,28 @@ func (p *TTabletStat) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWri return offset } +func (p *TTabletStat) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVisibleVersionCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version_count", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.VisibleVersionCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletStat) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVisibleVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.VisibleVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TTabletStat) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) @@ -481,9 +564,9 @@ func (p *TTabletStat) field2Length() int { func (p *TTabletStat) field3Length() int { l := 0 - if p.IsSetRowNum() { - l += bthrift.Binary.FieldBeginLength("row_num", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.RowNum) + if p.IsSetRowCount() { + l += bthrift.Binary.FieldBeginLength("row_count", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.RowCount) l += bthrift.Binary.FieldEndLength() } @@ -492,9 +575,9 @@ func (p *TTabletStat) field3Length() int { func (p *TTabletStat) field4Length() int { l := 0 - if p.IsSetVersionCount() { - l += bthrift.Binary.FieldBeginLength("version_count", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.VersionCount) + if p.IsSetTotalVersionCount() { + l += bthrift.Binary.FieldBeginLength("total_version_count", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.TotalVersionCount) l += bthrift.Binary.FieldEndLength() } @@ -512,6 +595,28 @@ func (p *TTabletStat) field5Length() int { return l } +func (p *TTabletStat) field6Length() int { + l := 0 + if p.IsSetVisibleVersionCount() { + l += bthrift.Binary.FieldBeginLength("visible_version_count", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.VisibleVersionCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletStat) field7Length() int { + l := 0 + if p.IsSetVisibleVersion() { + l += bthrift.Binary.FieldBeginLength("visible_version", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.VisibleVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TTabletStatResult_) FastRead(buf []byte) (int, error) { var err error var offset int @@ -1403,6 +1508,48 @@ func (p *TRoutineLoadTask) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 17: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -1678,6 +1825,45 @@ func (p *TRoutineLoadTask) FastReadField16(buf []byte) (int, error) { return offset, nil } +func (p *TRoutineLoadTask) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MemtableOnSinkNode = &v + + } + return offset, nil +} + +func (p *TRoutineLoadTask) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.QualifiedUser = &v + + } + return offset, nil +} + +func (p *TRoutineLoadTask) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CloudCluster = &v + + } + return offset, nil +} + // for compatibility func (p *TRoutineLoadTask) FastWrite(buf []byte) int { return 0 @@ -1694,6 +1880,7 @@ func (p *TRoutineLoadTask) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) @@ -1703,6 +1890,8 @@ func (p *TRoutineLoadTask) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1729,6 +1918,9 @@ func (p *TRoutineLoadTask) BLength() int { l += p.field14Length() l += p.field15Length() l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1897,6 +2089,39 @@ func (p *TRoutineLoadTask) fastWriteField16(buf []byte, binaryWriter bthrift.Bin return offset } +func (p *TRoutineLoadTask) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMemtableOnSinkNode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memtable_on_sink_node", thrift.BOOL, 17) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.MemtableOnSinkNode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRoutineLoadTask) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQualifiedUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "qualified_user", thrift.STRING, 18) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.QualifiedUser) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRoutineLoadTask) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCloudCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_cluster", thrift.STRING, 19) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudCluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TRoutineLoadTask) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) @@ -2059,6 +2284,39 @@ func (p *TRoutineLoadTask) field16Length() int { return l } +func (p *TRoutineLoadTask) field17Length() int { + l := 0 + if p.IsSetMemtableOnSinkNode() { + l += bthrift.Binary.FieldBeginLength("memtable_on_sink_node", thrift.BOOL, 17) + l += bthrift.Binary.BoolLength(*p.MemtableOnSinkNode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRoutineLoadTask) field18Length() int { + l := 0 + if p.IsSetQualifiedUser() { + l += bthrift.Binary.FieldBeginLength("qualified_user", thrift.STRING, 18) + l += bthrift.Binary.StringLengthNocopy(*p.QualifiedUser) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRoutineLoadTask) field19Length() int { + l := 0 + if p.IsSetCloudCluster() { + l += bthrift.Binary.FieldBeginLength("cloud_cluster", thrift.STRING, 19) + l += bthrift.Binary.StringLengthNocopy(*p.CloudCluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TKafkaMetaProxyRequest) FastRead(buf []byte) (int, error) { var err error var offset int @@ -4435,12 +4693,15 @@ func (p *TCheckStorageFormatResult_) field2Length() int { return l } -func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { +func (p *TWarmUpCacheAsyncRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetHost bool = false + var issetBrpcPort bool = false + var issetTabletIds bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -4458,12 +4719,13 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetHost = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4472,12 +4734,13 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetBrpcPort = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4486,12 +4749,13 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetTabletIds = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4499,81 +4763,11 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } } @@ -4589,341 +4783,8607 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetHost { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetBrpcPort { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTabletIds { + fieldId = 3 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncRequest[fieldId])) } -func (p *TIngestBinlogRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TxnId = &v - - } - return offset, nil -} - -func (p *TIngestBinlogRequest) FastReadField2(buf []byte) (int, error) { +func (p *TWarmUpCacheAsyncRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.RemoteTabletId = &v - - } - return offset, nil -} - -func (p *TIngestBinlogRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.BinlogVersion = &v + p.Host = v } return offset, nil } -func (p *TIngestBinlogRequest) FastReadField4(buf []byte) (int, error) { +func (p *TWarmUpCacheAsyncRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.RemoteHost = &v - - } - return offset, nil -} - -func (p *TIngestBinlogRequest) FastReadField5(buf []byte) (int, error) { - offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.RemotePort = &v + p.BrpcPort = v } return offset, nil } -func (p *TIngestBinlogRequest) FastReadField6(buf []byte) (int, error) { +func (p *TWarmUpCacheAsyncRequest) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - p.PartitionId = &v - } - return offset, nil -} + p.TabletIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TIngestBinlogRequest) FastReadField7(buf []byte) (int, error) { - offset := 0 + _elem = v - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LocalTabletId = &v + } + p.TabletIds = append(p.TabletIds, _elem) } - return offset, nil -} - -func (p *TIngestBinlogRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.LoadId = tmp return offset, nil } // for compatibility -func (p *TIngestBinlogRequest) FastWrite(buf []byte) int { +func (p *TWarmUpCacheAsyncRequest) FastWrite(buf []byte) int { return 0 } -func (p *TIngestBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TWarmUpCacheAsyncRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpCacheAsyncRequest") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TIngestBinlogRequest) BLength() int { +func (p *TWarmUpCacheAsyncRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TIngestBinlogRequest") + l += bthrift.Binary.StructBeginLength("TWarmUpCacheAsyncRequest") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TIngestBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIngestBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRemoteTabletId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_tablet_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteTabletId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIngestBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBinlogVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "binlog_version", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.BinlogVersion) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIngestBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRemoteHost() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_host", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemoteHost) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIngestBinlogRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TWarmUpCacheAsyncRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRemotePort() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_port", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemotePort) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "host", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Host) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TIngestBinlogRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TWarmUpCacheAsyncRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPartitionId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionId) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "brpc_port", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], p.BrpcPort) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TIngestBinlogRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TWarmUpCacheAsyncRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLocalTabletId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_tablet_id", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LocalTabletId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TabletIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) -func (p *TIngestBinlogRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 8) - offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TIngestBinlogRequest) field1Length() int { +func (p *TWarmUpCacheAsyncRequest) field1Length() int { l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.TxnId) + l += bthrift.Binary.FieldBeginLength("host", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.Host) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TIngestBinlogRequest) field2Length() int { +func (p *TWarmUpCacheAsyncRequest) field2Length() int { l := 0 - if p.IsSetRemoteTabletId() { - l += bthrift.Binary.FieldBeginLength("remote_tablet_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.RemoteTabletId) + l += bthrift.Binary.FieldBeginLength("brpc_port", thrift.I32, 2) + l += bthrift.Binary.I32Length(p.BrpcPort) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TIngestBinlogRequest) field3Length() int { +func (p *TWarmUpCacheAsyncRequest) field3Length() int { l := 0 - if p.IsSetBinlogVersion() { - l += bthrift.Binary.FieldBeginLength("binlog_version", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.BinlogVersion) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TIngestBinlogRequest) field4Length() int { - l := 0 - if p.IsSetRemoteHost() { - l += bthrift.Binary.FieldBeginLength("remote_host", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.RemoteHost) +func (p *TWarmUpCacheAsyncResponse) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncResponse[fieldId])) +} + +func (p *TWarmUpCacheAsyncResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +// for compatibility +func (p *TWarmUpCacheAsyncResponse) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWarmUpCacheAsyncResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpCacheAsyncResponse") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpCacheAsyncResponse) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWarmUpCacheAsyncResponse") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWarmUpCacheAsyncResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpCacheAsyncResponse) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCheckWarmUpCacheAsyncRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCheckWarmUpCacheAsyncRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tablets = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.Tablets = append(p.Tablets, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TCheckWarmUpCacheAsyncRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCheckWarmUpCacheAsyncRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckWarmUpCacheAsyncRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCheckWarmUpCacheAsyncRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCheckWarmUpCacheAsyncRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCheckWarmUpCacheAsyncRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.Tablets { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckWarmUpCacheAsyncRequest) field1Length() int { + l := 0 + if p.IsSetTablets() { + l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.Tablets)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.Tablets) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckWarmUpCacheAsyncResponse) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId])) +} + +func (p *TCheckWarmUpCacheAsyncResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TCheckWarmUpCacheAsyncResponse) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TaskDone = make(map[int64]bool, size) + for i := 0; i < size; i++ { + var _key int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val bool + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.TaskDone[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TCheckWarmUpCacheAsyncResponse) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCheckWarmUpCacheAsyncResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckWarmUpCacheAsyncResponse") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCheckWarmUpCacheAsyncResponse) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCheckWarmUpCacheAsyncResponse") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCheckWarmUpCacheAsyncResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TCheckWarmUpCacheAsyncResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTaskDone() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_done", thrift.MAP, 2) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.BOOL, 0) + var length int + for k, v := range p.TaskDone { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + offset += bthrift.Binary.WriteBool(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.BOOL, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckWarmUpCacheAsyncResponse) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCheckWarmUpCacheAsyncResponse) field2Length() int { + l := 0 + if p.IsSetTaskDone() { + l += bthrift.Binary.FieldBeginLength("task_done", thrift.MAP, 2) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.BOOL, len(p.TaskDone)) + var tmpK int64 + var tmpV bool + l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.BoolLength(bool(tmpV))) * len(p.TaskDone) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSyncLoadForTabletsRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTabletIds bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTabletIds = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetTabletIds { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSyncLoadForTabletsRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSyncLoadForTabletsRequest[fieldId])) +} + +func (p *TSyncLoadForTabletsRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TabletIds = append(p.TabletIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TSyncLoadForTabletsRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSyncLoadForTabletsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSyncLoadForTabletsRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TSyncLoadForTabletsRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSyncLoadForTabletsRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TSyncLoadForTabletsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TabletIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TSyncLoadForTabletsRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TSyncLoadForTabletsResponse) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +// for compatibility +func (p *TSyncLoadForTabletsResponse) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSyncLoadForTabletsResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSyncLoadForTabletsResponse") + if p != nil { + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TSyncLoadForTabletsResponse) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSyncLoadForTabletsResponse") + if p != nil { + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THotPartition) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionId bool = false + var issetLastAccessTime bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPartitionId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetLastAccessTime = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetPartitionId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetLastAccessTime { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotPartition[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotPartition[fieldId])) +} + +func (p *THotPartition) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionId = v + + } + return offset, nil +} + +func (p *THotPartition) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.LastAccessTime = v + + } + return offset, nil +} + +func (p *THotPartition) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.QueryPerDay = &v + + } + return offset, nil +} + +func (p *THotPartition) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.QueryPerWeek = &v + + } + return offset, nil +} + +// for compatibility +func (p *THotPartition) FastWrite(buf []byte) int { + return 0 +} + +func (p *THotPartition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THotPartition") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THotPartition) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THotPartition") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THotPartition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.PartitionId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *THotPartition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_access_time", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], p.LastAccessTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *THotPartition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryPerDay() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_per_day", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.QueryPerDay) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THotPartition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryPerWeek() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_per_week", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.QueryPerWeek) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THotPartition) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.PartitionId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *THotPartition) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("last_access_time", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.LastAccessTime) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *THotPartition) field3Length() int { + l := 0 + if p.IsSetQueryPerDay() { + l += bthrift.Binary.FieldBeginLength("query_per_day", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.QueryPerDay) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THotPartition) field4Length() int { + l := 0 + if p.IsSetQueryPerWeek() { + l += bthrift.Binary.FieldBeginLength("query_per_week", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.QueryPerWeek) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THotTableMessage) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTableId bool = false + var issetIndexId bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTableId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetIndexId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetTableId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetIndexId { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotTableMessage[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotTableMessage[fieldId])) +} + +func (p *THotTableMessage) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TableId = v + + } + return offset, nil +} + +func (p *THotTableMessage) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IndexId = v + + } + return offset, nil +} + +func (p *THotTableMessage) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.HotPartitions = make([]*THotPartition, 0, size) + for i := 0; i < size; i++ { + _elem := NewTHotPartition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.HotPartitions = append(p.HotPartitions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *THotTableMessage) FastWrite(buf []byte) int { + return 0 +} + +func (p *THotTableMessage) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THotTableMessage") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THotTableMessage) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THotTableMessage") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THotTableMessage) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *THotTableMessage) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "index_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], p.IndexId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *THotTableMessage) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHotPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hot_partitions", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.HotPartitions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THotTableMessage) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.TableId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *THotTableMessage) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("index_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.IndexId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *THotTableMessage) field3Length() int { + l := 0 + if p.IsSetHotPartitions() { + l += bthrift.Binary.FieldBeginLength("hot_partitions", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.HotPartitions)) + for _, v := range p.HotPartitions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetTopNHotPartitionsRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +// for compatibility +func (p *TGetTopNHotPartitionsRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetTopNHotPartitionsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTopNHotPartitionsRequest") + if p != nil { + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetTopNHotPartitionsRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetTopNHotPartitionsRequest") + if p != nil { + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetTopNHotPartitionsResponse) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetFileCacheSize bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFileCacheSize = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetFileCacheSize { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTopNHotPartitionsResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTopNHotPartitionsResponse[fieldId])) +} + +func (p *TGetTopNHotPartitionsResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FileCacheSize = v + + } + return offset, nil +} + +func (p *TGetTopNHotPartitionsResponse) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.HotTables = make([]*THotTableMessage, 0, size) + for i := 0; i < size; i++ { + _elem := NewTHotTableMessage() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.HotTables = append(p.HotTables, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TGetTopNHotPartitionsResponse) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetTopNHotPartitionsResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTopNHotPartitionsResponse") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetTopNHotPartitionsResponse) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetTopNHotPartitionsResponse") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetTopNHotPartitionsResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_cache_size", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.FileCacheSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TGetTopNHotPartitionsResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHotTables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hot_tables", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.HotTables { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetTopNHotPartitionsResponse) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("file_cache_size", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.FileCacheSize) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TGetTopNHotPartitionsResponse) field2Length() int { + l := 0 + if p.IsSetHotTables() { + l += bthrift.Binary.FieldBeginLength("hot_tables", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.HotTables)) + for _, v := range p.HotTables { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJobMeta) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetDownloadType bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDownloadType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetDownloadType { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TJobMeta[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TJobMeta[fieldId])) +} + +func (p *TJobMeta) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DownloadType = TDownloadType(v) + + } + return offset, nil +} + +func (p *TJobMeta) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeIp = &v + + } + return offset, nil +} + +func (p *TJobMeta) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BrpcPort = &v + + } + return offset, nil +} + +func (p *TJobMeta) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TabletIds = append(p.TabletIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TJobMeta) FastWrite(buf []byte) int { + return 0 +} + +func (p *TJobMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TJobMeta") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TJobMeta) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TJobMeta") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TJobMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "download_type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.DownloadType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TJobMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_ip", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BeIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJobMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBrpcPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "brpc_port", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BrpcPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJobMeta) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TabletIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJobMeta) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("download_type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.DownloadType)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TJobMeta) field2Length() int { + l := 0 + if p.IsSetBeIp() { + l += bthrift.Binary.FieldBeginLength("be_ip", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.BeIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJobMeta) field3Length() int { + l := 0 + if p.IsSetBrpcPort() { + l += bthrift.Binary.FieldBeginLength("brpc_port", thrift.I32, 3) + l += bthrift.Binary.I32Length(*p.BrpcPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJobMeta) field4Length() int { + l := 0 + if p.IsSetTabletIds() { + l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWarmUpTabletsRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetJobId bool = false + var issetBatchId bool = false + var issetType bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetJobId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetBatchId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetJobId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetBatchId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetType { + fieldId = 4 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsRequest[fieldId])) +} + +func (p *TWarmUpTabletsRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.JobId = v + + } + return offset, nil +} + +func (p *TWarmUpTabletsRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BatchId = v + + } + return offset, nil +} + +func (p *TWarmUpTabletsRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.JobMetas = make([]*TJobMeta, 0, size) + for i := 0; i < size; i++ { + _elem := NewTJobMeta() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.JobMetas = append(p.JobMetas, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TWarmUpTabletsRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Type = TWarmUpTabletsRequestType(v) + + } + return offset, nil +} + +// for compatibility +func (p *TWarmUpTabletsRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWarmUpTabletsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpTabletsRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpTabletsRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWarmUpTabletsRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWarmUpTabletsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpTabletsRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "batch_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], p.BatchId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpTabletsRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobMetas() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_metas", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.JobMetas { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWarmUpTabletsRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Type)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpTabletsRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.JobId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TWarmUpTabletsRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("batch_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.BatchId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TWarmUpTabletsRequest) field3Length() int { + l := 0 + if p.IsSetJobMetas() { + l += bthrift.Binary.FieldBeginLength("job_metas", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.JobMetas)) + for _, v := range p.JobMetas { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWarmUpTabletsRequest) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 4) + l += bthrift.Binary.I32Length(int32(p.Type)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TWarmUpTabletsResponse) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsResponse[fieldId])) +} + +func (p *TWarmUpTabletsResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TWarmUpTabletsResponse) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.JobId = &v + + } + return offset, nil +} + +func (p *TWarmUpTabletsResponse) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BatchId = &v + + } + return offset, nil +} + +func (p *TWarmUpTabletsResponse) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PendingJobSize = &v + + } + return offset, nil +} + +func (p *TWarmUpTabletsResponse) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FinishJobSize = &v + + } + return offset, nil +} + +// for compatibility +func (p *TWarmUpTabletsResponse) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWarmUpTabletsResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpTabletsResponse") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpTabletsResponse) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWarmUpTabletsResponse") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWarmUpTabletsResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TWarmUpTabletsResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.JobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWarmUpTabletsResponse) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBatchId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "batch_id", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BatchId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWarmUpTabletsResponse) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPendingJobSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pending_job_size", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.PendingJobSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWarmUpTabletsResponse) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFinishJobSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "finish_job_size", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FinishJobSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWarmUpTabletsResponse) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TWarmUpTabletsResponse) field2Length() int { + l := 0 + if p.IsSetJobId() { + l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.JobId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWarmUpTabletsResponse) field3Length() int { + l := 0 + if p.IsSetBatchId() { + l += bthrift.Binary.FieldBeginLength("batch_id", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.BatchId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWarmUpTabletsResponse) field4Length() int { + l := 0 + if p.IsSetPendingJobSize() { + l += bthrift.Binary.FieldBeginLength("pending_job_size", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.PendingJobSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWarmUpTabletsResponse) field5Length() int { + l := 0 + if p.IsSetFinishJobSize() { + l += bthrift.Binary.FieldBeginLength("finish_job_size", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.FinishJobSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIngestBinlogRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RemoteTabletId = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BinlogVersion = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RemoteHost = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RemotePort = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PartitionId = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LocalTabletId = &v + + } + return offset, nil +} + +func (p *TIngestBinlogRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadId = tmp + return offset, nil +} + +// for compatibility +func (p *TIngestBinlogRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIngestBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIngestBinlogRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIngestBinlogRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIngestBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRemoteTabletId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_tablet_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteTabletId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBinlogVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "binlog_version", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BinlogVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRemoteHost() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_host", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemoteHost) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRemotePort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_port", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemotePort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLocalTabletId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_tablet_id", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LocalTabletId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 8) + offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogRequest) field1Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field2Length() int { + l := 0 + if p.IsSetRemoteTabletId() { + l += bthrift.Binary.FieldBeginLength("remote_tablet_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.RemoteTabletId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field3Length() int { + l := 0 + if p.IsSetBinlogVersion() { + l += bthrift.Binary.FieldBeginLength("binlog_version", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.BinlogVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field4Length() int { + l := 0 + if p.IsSetRemoteHost() { + l += bthrift.Binary.FieldBeginLength("remote_host", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.RemoteHost) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field5Length() int { + l := 0 + if p.IsSetRemotePort() { + l += bthrift.Binary.FieldBeginLength("remote_port", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.RemotePort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field6Length() int { + l := 0 + if p.IsSetPartitionId() { + l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.PartitionId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field7Length() int { + l := 0 + if p.IsSetLocalTabletId() { + l += bthrift.Binary.FieldBeginLength("local_tablet_id", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.LocalTabletId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogRequest) field8Length() int { + l := 0 + if p.IsSetLoadId() { + l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 8) + l += p.LoadId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIngestBinlogResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TIngestBinlogResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsAsync = &v + + } + return offset, nil +} + +// for compatibility +func (p *TIngestBinlogResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIngestBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIngestBinlogResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIngestBinlogResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIngestBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsAsync() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_async", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsAsync) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIngestBinlogResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIngestBinlogResult_) field2Length() int { + l := 0 + if p.IsSetIsAsync() { + l += bthrift.Binary.FieldBeginLength("is_async", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(*p.IsAsync) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryIngestBinlogRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryIngestBinlogRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TQueryIngestBinlogRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PartitionId = &v + + } + return offset, nil +} + +func (p *TQueryIngestBinlogRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TabletId = &v + + } + return offset, nil +} + +func (p *TQueryIngestBinlogRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadId = tmp + return offset, nil +} + +// for compatibility +func (p *TQueryIngestBinlogRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TQueryIngestBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryIngestBinlogRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TQueryIngestBinlogRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TQueryIngestBinlogRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TQueryIngestBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryIngestBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryIngestBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TabletId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryIngestBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 4) + offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryIngestBinlogRequest) field1Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryIngestBinlogRequest) field2Length() int { + l := 0 + if p.IsSetPartitionId() { + l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.PartitionId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryIngestBinlogRequest) field3Length() int { + l := 0 + if p.IsSetTabletId() { + l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.TabletId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryIngestBinlogRequest) field4Length() int { + l := 0 + if p.IsSetLoadId() { + l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 4) + l += p.LoadId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryIngestBinlogResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryIngestBinlogResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TIngestBinlogStatus(v) + p.Status = &tmp + + } + return offset, nil +} + +func (p *TQueryIngestBinlogResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ErrMsg = &v + + } + return offset, nil +} + +// for compatibility +func (p *TQueryIngestBinlogResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TQueryIngestBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryIngestBinlogResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TQueryIngestBinlogResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TQueryIngestBinlogResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TQueryIngestBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Status)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryIngestBinlogResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetErrMsg() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "err_msg", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ErrMsg) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryIngestBinlogResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.Status)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryIngestBinlogResult_) field2Length() int { + l := 0 + if p.IsSetErrMsg() { + l += bthrift.Binary.FieldBeginLength("err_msg", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.ErrMsg) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadGroupInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Id = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Name = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Version = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CpuShare = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CpuHardLimit = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MemLimit = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableMemoryOvercommit = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableCpuHardLimit = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ScanThreadNum = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxRemoteScanThreadNum = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MinRemoteScanThreadNum = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MemoryLowWatermark = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MemoryHighWatermark = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReadBytesPerSecond = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RemoteReadBytesPerSecond = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Tag = &v + + } + return offset, nil +} + +// for compatibility +func (p *TWorkloadGroupInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWorkloadGroupInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadGroupInfo") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWorkloadGroupInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWorkloadGroupInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWorkloadGroupInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCpuShare() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cpu_share", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CpuShare) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCpuHardLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cpu_hard_limit", thrift.I32, 5) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.CpuHardLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mem_limit", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.MemLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableMemoryOvercommit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_memory_overcommit", thrift.BOOL, 7) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableMemoryOvercommit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableCpuHardLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_cpu_hard_limit", thrift.BOOL, 8) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableCpuHardLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetScanThreadNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_thread_num", thrift.I32, 9) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ScanThreadNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxRemoteScanThreadNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_remote_scan_thread_num", thrift.I32, 10) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxRemoteScanThreadNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMinRemoteScanThreadNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_remote_scan_thread_num", thrift.I32, 11) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MinRemoteScanThreadNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMemoryLowWatermark() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memory_low_watermark", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MemoryLowWatermark) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMemoryHighWatermark() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memory_high_watermark", thrift.I32, 13) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MemoryHighWatermark) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReadBytesPerSecond() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "read_bytes_per_second", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReadBytesPerSecond) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRemoteReadBytesPerSecond() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_read_bytes_per_second", thrift.I64, 15) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteReadBytesPerSecond) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTag() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tag", thrift.STRING, 16) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tag) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) field1Length() int { + l := 0 + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field2Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field3Length() int { + l := 0 + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.Version) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field4Length() int { + l := 0 + if p.IsSetCpuShare() { + l += bthrift.Binary.FieldBeginLength("cpu_share", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.CpuShare) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field5Length() int { + l := 0 + if p.IsSetCpuHardLimit() { + l += bthrift.Binary.FieldBeginLength("cpu_hard_limit", thrift.I32, 5) + l += bthrift.Binary.I32Length(*p.CpuHardLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field6Length() int { + l := 0 + if p.IsSetMemLimit() { + l += bthrift.Binary.FieldBeginLength("mem_limit", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.MemLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field7Length() int { + l := 0 + if p.IsSetEnableMemoryOvercommit() { + l += bthrift.Binary.FieldBeginLength("enable_memory_overcommit", thrift.BOOL, 7) + l += bthrift.Binary.BoolLength(*p.EnableMemoryOvercommit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field8Length() int { + l := 0 + if p.IsSetEnableCpuHardLimit() { + l += bthrift.Binary.FieldBeginLength("enable_cpu_hard_limit", thrift.BOOL, 8) + l += bthrift.Binary.BoolLength(*p.EnableCpuHardLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field9Length() int { + l := 0 + if p.IsSetScanThreadNum() { + l += bthrift.Binary.FieldBeginLength("scan_thread_num", thrift.I32, 9) + l += bthrift.Binary.I32Length(*p.ScanThreadNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field10Length() int { + l := 0 + if p.IsSetMaxRemoteScanThreadNum() { + l += bthrift.Binary.FieldBeginLength("max_remote_scan_thread_num", thrift.I32, 10) + l += bthrift.Binary.I32Length(*p.MaxRemoteScanThreadNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field11Length() int { + l := 0 + if p.IsSetMinRemoteScanThreadNum() { + l += bthrift.Binary.FieldBeginLength("min_remote_scan_thread_num", thrift.I32, 11) + l += bthrift.Binary.I32Length(*p.MinRemoteScanThreadNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field12Length() int { + l := 0 + if p.IsSetMemoryLowWatermark() { + l += bthrift.Binary.FieldBeginLength("memory_low_watermark", thrift.I32, 12) + l += bthrift.Binary.I32Length(*p.MemoryLowWatermark) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field13Length() int { + l := 0 + if p.IsSetMemoryHighWatermark() { + l += bthrift.Binary.FieldBeginLength("memory_high_watermark", thrift.I32, 13) + l += bthrift.Binary.I32Length(*p.MemoryHighWatermark) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field14Length() int { + l := 0 + if p.IsSetReadBytesPerSecond() { + l += bthrift.Binary.FieldBeginLength("read_bytes_per_second", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.ReadBytesPerSecond) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field15Length() int { + l := 0 + if p.IsSetRemoteReadBytesPerSecond() { + l += bthrift.Binary.FieldBeginLength("remote_read_bytes_per_second", thrift.I64, 15) + l += bthrift.Binary.I64Length(*p.RemoteReadBytesPerSecond) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field16Length() int { + l := 0 + if p.IsSetTag() { + l += bthrift.Binary.FieldBeginLength("tag", thrift.STRING, 16) + l += bthrift.Binary.StringLengthNocopy(*p.Tag) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadCondition) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadCondition[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadCondition) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TWorkloadMetricType(v) + p.MetricName = &tmp + + } + return offset, nil +} + +func (p *TWorkloadCondition) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TCompareOperator(v) + p.Op = &tmp + + } + return offset, nil +} + +func (p *TWorkloadCondition) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Value = &v + + } + return offset, nil +} + +// for compatibility +func (p *TWorkloadCondition) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWorkloadCondition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadCondition") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWorkloadCondition) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWorkloadCondition") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWorkloadCondition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetricName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metric_name", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MetricName)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadCondition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "op", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Op)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadCondition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetValue() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Value) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadCondition) field1Length() int { + l := 0 + if p.IsSetMetricName() { + l += bthrift.Binary.FieldBeginLength("metric_name", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.MetricName)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadCondition) field2Length() int { + l := 0 + if p.IsSetOp() { + l += bthrift.Binary.FieldBeginLength("op", thrift.I32, 2) + l += bthrift.Binary.I32Length(int32(*p.Op)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadCondition) field3Length() int { + l := 0 + if p.IsSetValue() { + l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Value) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadAction) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadAction[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadAction) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TWorkloadActionType(v) + p.Action = &tmp + + } + return offset, nil +} + +func (p *TWorkloadAction) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ActionArgs_ = &v + + } + return offset, nil +} + +// for compatibility +func (p *TWorkloadAction) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWorkloadAction) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadAction") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWorkloadAction) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWorkloadAction") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWorkloadAction) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAction() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "action", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Action)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadAction) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetActionArgs_() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "action_args", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ActionArgs_) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadAction) field1Length() int { + l := 0 + if p.IsSetAction() { + l += bthrift.Binary.FieldBeginLength("action", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.Action)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadAction) field2Length() int { + l := 0 + if p.IsSetActionArgs_() { + l += bthrift.Binary.FieldBeginLength("action_args", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.ActionArgs_) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadSchedPolicy[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWorkloadSchedPolicy) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Id = &v + + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Name = &v + + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Version = &v + + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Priority = &v + + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Enabled = &v + + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ConditionList = make([]*TWorkloadCondition, 0, size) + for i := 0; i < size; i++ { + _elem := NewTWorkloadCondition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ConditionList = append(p.ConditionList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ActionList = make([]*TWorkloadAction, 0, size) + for i := 0; i < size; i++ { + _elem := NewTWorkloadAction() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ActionList = append(p.ActionList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TWorkloadSchedPolicy) FastReadField8(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.WgIdList = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.WgIdList = append(p.WgIdList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TWorkloadSchedPolicy) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWorkloadSchedPolicy) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadSchedPolicy") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWorkloadSchedPolicy) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWorkloadSchedPolicy") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWorkloadSchedPolicy) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.Version) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPriority() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priority", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.Priority) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnabled() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enabled", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Enabled) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConditionList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "condition_list", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.ConditionList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetActionList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "action_list", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.ActionList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWgIdList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "wg_id_list", thrift.LIST, 8) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.WgIdList { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadSchedPolicy) field1Length() int { + l := 0 + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field2Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field3Length() int { + l := 0 + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.I32, 3) + l += bthrift.Binary.I32Length(*p.Version) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field4Length() int { + l := 0 + if p.IsSetPriority() { + l += bthrift.Binary.FieldBeginLength("priority", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.Priority) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field5Length() int { + l := 0 + if p.IsSetEnabled() { + l += bthrift.Binary.FieldBeginLength("enabled", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.Enabled) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field6Length() int { + l := 0 + if p.IsSetConditionList() { + l += bthrift.Binary.FieldBeginLength("condition_list", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ConditionList)) + for _, v := range p.ConditionList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field7Length() int { + l := 0 + if p.IsSetActionList() { + l += bthrift.Binary.FieldBeginLength("action_list", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ActionList)) + for _, v := range p.ActionList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadSchedPolicy) field8Length() int { + l := 0 + if p.IsSetWgIdList() { + l += bthrift.Binary.FieldBeginLength("wg_id_list", thrift.LIST, 8) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.WgIdList)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.WgIdList) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TopicInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TopicInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TopicInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTWorkloadGroupInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.WorkloadGroupInfo = tmp + return offset, nil +} + +func (p *TopicInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := NewTWorkloadSchedPolicy() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.WorkloadSchedPolicy = tmp + return offset, nil +} + +// for compatibility +func (p *TopicInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TopicInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TopicInfo") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TopicInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TopicInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TopicInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWorkloadGroupInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_group_info", thrift.STRUCT, 1) + offset += p.WorkloadGroupInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TopicInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWorkloadSchedPolicy() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_sched_policy", thrift.STRUCT, 2) + offset += p.WorkloadSchedPolicy.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TopicInfo) field1Length() int { + l := 0 + if p.IsSetWorkloadGroupInfo() { + l += bthrift.Binary.FieldBeginLength("workload_group_info", thrift.STRUCT, 1) + l += p.WorkloadGroupInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TopicInfo) field2Length() int { + l := 0 + if p.IsSetWorkloadSchedPolicy() { + l += bthrift.Binary.FieldBeginLength("workload_sched_policy", thrift.STRUCT, 2) + l += p.WorkloadSchedPolicy.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPublishTopicRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTopicMap bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTopicMap = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetTopicMap { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicRequest[fieldId])) +} + +func (p *TPublishTopicRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopicMap = make(map[TTopicInfoType][]*TopicInfo, size) + for i := 0; i < size; i++ { + var _key TTopicInfoType + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = TTopicInfoType(v) + + } + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _val := make([]*TopicInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTopicInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _val = append(_val, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TopicMap[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TPublishTopicRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPublishTopicRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishTopicRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPublishTopicRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPublishTopicRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPublishTopicRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topic_map", thrift.MAP, 1) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, 0) + var length int + for k, v := range p.TopicMap { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], int32(k)) + + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.LIST, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPublishTopicRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("topic_map", thrift.MAP, 1) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, len(p.TopicMap)) + for k, v := range p.TopicMap { + + l += bthrift.Binary.I32Length(int32(k)) + + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPublishTopicResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicResult_[fieldId])) +} + +func (p *TPublishTopicResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +// for compatibility +func (p *TPublishTopicResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPublishTopicResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishTopicResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPublishTopicResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPublishTopicResult") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPublishTopicResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPublishTopicResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TGetRealtimeExecStatusRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Id = tmp + return offset, nil +} + +// for compatibility +func (p *TGetRealtimeExecStatusRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetRealtimeExecStatusRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetRealtimeExecStatusRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetRealtimeExecStatusRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetRealtimeExecStatusRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetRealtimeExecStatusRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.STRUCT, 1) + offset += p.Id.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetRealtimeExecStatusRequest) field1Length() int { + l := 0 + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.STRUCT, 1) + l += p.Id.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetRealtimeExecStatusResponse) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetRealtimeExecStatusResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TGetRealtimeExecStatusResponse) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := frontendservice.NewTReportExecStatusParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.ReportExecStatusParams = tmp + return offset, nil +} + +// for compatibility +func (p *TGetRealtimeExecStatusResponse) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetRealtimeExecStatusResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetRealtimeExecStatusResponse") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetRealtimeExecStatusResponse) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetRealtimeExecStatusResponse") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetRealtimeExecStatusResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetRealtimeExecStatusResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReportExecStatusParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "report_exec_status_params", thrift.STRUCT, 2) + offset += p.ReportExecStatusParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetRealtimeExecStatusResponse) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetRealtimeExecStatusResponse) field2Length() int { + l := 0 + if p.IsSetReportExecStatusParams() { + l += bthrift.Binary.FieldBeginLength("report_exec_status_params", thrift.STRUCT, 2) + l += p.ReportExecStatusParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServiceExecPlanFragmentArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTExecPlanFragmentParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Params = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceExecPlanFragmentArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceExecPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceExecPlanFragmentArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("exec_plan_fragment_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceExecPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceExecPlanFragmentArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceExecPlanFragmentResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceExecPlanFragmentResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTExecPlanFragmentResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceExecPlanFragmentResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceExecPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceExecPlanFragmentResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("exec_plan_fragment_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceExecPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServiceExecPlanFragmentResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServiceCancelPlanFragmentArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTCancelPlanFragmentParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Params = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceCancelPlanFragmentArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceCancelPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceCancelPlanFragmentArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceCancelPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceCancelPlanFragmentArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceCancelPlanFragmentResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceCancelPlanFragmentResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTCancelPlanFragmentResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceCancelPlanFragmentResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceCancelPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceCancelPlanFragmentResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceCancelPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServiceCancelPlanFragmentResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServiceTransmitDataArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceTransmitDataArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTTransmitDataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Params = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceTransmitDataArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceTransmitDataArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceTransmitDataArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("transmit_data_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceTransmitDataArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceTransmitDataArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceTransmitDataResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceTransmitDataResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTTransmitDataResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceTransmitDataResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceTransmitDataResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceTransmitDataResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("transmit_data_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceTransmitDataResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServiceTransmitDataResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServiceSubmitTasksArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tasks = make([]*agentservice.TAgentTaskRequest, 0, size) + for i := 0; i < size; i++ { + _elem := agentservice.NewTAgentTaskRequest() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Tasks = append(p.Tasks, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *BackendServiceSubmitTasksArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceSubmitTasksArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceSubmitTasksArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("submit_tasks_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceSubmitTasksArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Tasks { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceSubmitTasksArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks)) + for _, v := range p.Tasks { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceSubmitTasksResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceSubmitTasksResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := agentservice.NewTAgentResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceSubmitTasksResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceSubmitTasksResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceSubmitTasksResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("submit_tasks_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceSubmitTasksResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServiceSubmitTasksResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServiceMakeSnapshotArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceMakeSnapshotArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := agentservice.NewTSnapshotRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.SnapshotRequest = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceMakeSnapshotArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceMakeSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceMakeSnapshotArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("make_snapshot_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceMakeSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_request", thrift.STRUCT, 1) + offset += p.SnapshotRequest.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceMakeSnapshotArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("snapshot_request", thrift.STRUCT, 1) + l += p.SnapshotRequest.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceMakeSnapshotResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceMakeSnapshotResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := agentservice.NewTAgentResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceMakeSnapshotResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceMakeSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceMakeSnapshotResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("make_snapshot_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceMakeSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServiceMakeSnapshotResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServiceReleaseSnapshotArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceReleaseSnapshotArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SnapshotPath = v + + } + return offset, nil +} + +// for compatibility +func (p *BackendServiceReleaseSnapshotArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceReleaseSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceReleaseSnapshotArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("release_snapshot_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceReleaseSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_path", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.SnapshotPath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceReleaseSnapshotArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("snapshot_path", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.SnapshotPath) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceReleaseSnapshotResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceReleaseSnapshotResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := agentservice.NewTAgentResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServiceReleaseSnapshotResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServiceReleaseSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceReleaseSnapshotResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("release_snapshot_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServiceReleaseSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServiceReleaseSnapshotResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *BackendServicePublishClusterStateArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServicePublishClusterStateArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := agentservice.NewTAgentPublishRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Request = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServicePublishClusterStateArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServicePublishClusterStateArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServicePublishClusterStateArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("publish_cluster_state_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *BackendServicePublishClusterStateArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServicePublishClusterStateArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServicePublishClusterStateResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServicePublishClusterStateResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := agentservice.NewTAgentResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *BackendServicePublishClusterStateResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *BackendServicePublishClusterStateResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *BackendServicePublishClusterStateResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("publish_cluster_state_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} +func (p *BackendServicePublishClusterStateResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *BackendServicePublishClusterStateResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } - return l + return l +} + +func (p *BackendServiceSubmitExportTaskArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackendServiceSubmitExportTaskArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTExportTaskRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Request = tmp + return offset, nil } -func (p *TIngestBinlogRequest) field5Length() int { - l := 0 - if p.IsSetRemotePort() { - l += bthrift.Binary.FieldBeginLength("remote_port", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.RemotePort) +// for compatibility +func (p *BackendServiceSubmitExportTaskArgs) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *BackendServiceSubmitExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TIngestBinlogRequest) field6Length() int { +func (p *BackendServiceSubmitExportTaskArgs) BLength() int { l := 0 - if p.IsSetPartitionId() { - l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.PartitionId) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("submit_export_task_args") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TIngestBinlogRequest) field7Length() int { - l := 0 - if p.IsSetLocalTabletId() { - l += bthrift.Binary.FieldBeginLength("local_tablet_id", thrift.I64, 7) - l += bthrift.Binary.I64Length(*p.LocalTabletId) - - l += bthrift.Binary.FieldEndLength() - } - return l +func (p *BackendServiceSubmitExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TIngestBinlogRequest) field8Length() int { +func (p *BackendServiceSubmitExportTaskArgs) field1Length() int { l := 0 - if p.IsSetLoadId() { - l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 8) - l += p.LoadId.BLength() - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TIngestBinlogResult_) FastRead(buf []byte) (int, error) { +func (p *BackendServiceSubmitExportTaskResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -4945,9 +13405,9 @@ func (p *TIngestBinlogResult_) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -4985,7 +13445,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -4994,7 +13454,7 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TIngestBinlogResult_) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceSubmitExportTaskResult) FastReadField0(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -5003,58 +13463,58 @@ func (p *TIngestBinlogResult_) FastReadField1(buf []byte) (int, error) { } else { offset += l } - p.Status = tmp + p.Success = tmp return offset, nil } // for compatibility -func (p *TIngestBinlogResult_) FastWrite(buf []byte) int { +func (p *BackendServiceSubmitExportTaskResult) FastWrite(buf []byte) int { return 0 } -func (p *TIngestBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSubmitExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TIngestBinlogResult_) BLength() int { +func (p *BackendServiceSubmitExportTaskResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TIngestBinlogResult") + l += bthrift.Binary.StructBeginLength("submit_export_task_result") if p != nil { - l += p.field1Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TIngestBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSubmitExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TIngestBinlogResult_) field1Length() int { +func (p *BackendServiceSubmitExportTaskResult) field0Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *BackendServiceExecPlanFragmentArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetExportStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5116,7 +13576,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -5125,27 +13585,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceGetExportStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTExecPlanFragmentParams() + tmp := types.NewTUniqueId() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.TaskId = tmp return offset, nil } // for compatibility -func (p *BackendServiceExecPlanFragmentArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetExportStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceExecPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetExportStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -5154,9 +13614,9 @@ func (p *BackendServiceExecPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *BackendServiceExecPlanFragmentArgs) BLength() int { +func (p *BackendServiceGetExportStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("exec_plan_fragment_args") + l += bthrift.Binary.StructBeginLength("get_export_status_args") if p != nil { l += p.field1Length() } @@ -5165,23 +13625,23 @@ func (p *BackendServiceExecPlanFragmentArgs) BLength() int { return l } -func (p *BackendServiceExecPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetExportStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1) + offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceExecPlanFragmentArgs) field1Length() int { +func (p *BackendServiceGetExportStatusArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1) + l += p.TaskId.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceExecPlanFragmentResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetExportStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5243,7 +13703,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -5252,10 +13712,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceExecPlanFragmentResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetExportStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTExecPlanFragmentResult_() + tmp := palointernalservice.NewTExportStatusResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -5266,13 +13726,13 @@ func (p *BackendServiceExecPlanFragmentResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *BackendServiceExecPlanFragmentResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetExportStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceExecPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetExportStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -5281,9 +13741,9 @@ func (p *BackendServiceExecPlanFragmentResult) FastWriteNocopy(buf []byte, binar return offset } -func (p *BackendServiceExecPlanFragmentResult) BLength() int { +func (p *BackendServiceGetExportStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("exec_plan_fragment_result") + l += bthrift.Binary.StructBeginLength("get_export_status_result") if p != nil { l += p.field0Length() } @@ -5292,7 +13752,7 @@ func (p *BackendServiceExecPlanFragmentResult) BLength() int { return l } -func (p *BackendServiceExecPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetExportStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -5302,7 +13762,7 @@ func (p *BackendServiceExecPlanFragmentResult) fastWriteField0(buf []byte, binar return offset } -func (p *BackendServiceExecPlanFragmentResult) field0Length() int { +func (p *BackendServiceGetExportStatusResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -5312,7 +13772,7 @@ func (p *BackendServiceExecPlanFragmentResult) field0Length() int { return l } -func (p *BackendServiceCancelPlanFragmentArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceEraseExportTaskArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5374,7 +13834,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -5383,27 +13843,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceEraseExportTaskArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTCancelPlanFragmentParams() + tmp := types.NewTUniqueId() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.TaskId = tmp return offset, nil } // for compatibility -func (p *BackendServiceCancelPlanFragmentArgs) FastWrite(buf []byte) int { +func (p *BackendServiceEraseExportTaskArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceCancelPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceEraseExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -5412,9 +13872,9 @@ func (p *BackendServiceCancelPlanFragmentArgs) FastWriteNocopy(buf []byte, binar return offset } -func (p *BackendServiceCancelPlanFragmentArgs) BLength() int { +func (p *BackendServiceEraseExportTaskArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_args") + l += bthrift.Binary.StructBeginLength("erase_export_task_args") if p != nil { l += p.field1Length() } @@ -5423,23 +13883,23 @@ func (p *BackendServiceCancelPlanFragmentArgs) BLength() int { return l } -func (p *BackendServiceCancelPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceEraseExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1) + offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceCancelPlanFragmentArgs) field1Length() int { +func (p *BackendServiceEraseExportTaskArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1) + l += p.TaskId.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceCancelPlanFragmentResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceEraseExportTaskResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5501,7 +13961,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -5510,10 +13970,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCancelPlanFragmentResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceEraseExportTaskResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTCancelPlanFragmentResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -5524,13 +13984,13 @@ func (p *BackendServiceCancelPlanFragmentResult) FastReadField0(buf []byte) (int } // for compatibility -func (p *BackendServiceCancelPlanFragmentResult) FastWrite(buf []byte) int { +func (p *BackendServiceEraseExportTaskResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceCancelPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceEraseExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -5539,9 +13999,9 @@ func (p *BackendServiceCancelPlanFragmentResult) FastWriteNocopy(buf []byte, bin return offset } -func (p *BackendServiceCancelPlanFragmentResult) BLength() int { +func (p *BackendServiceEraseExportTaskResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_result") + l += bthrift.Binary.StructBeginLength("erase_export_task_result") if p != nil { l += p.field0Length() } @@ -5550,7 +14010,7 @@ func (p *BackendServiceCancelPlanFragmentResult) BLength() int { return l } -func (p *BackendServiceCancelPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceEraseExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -5560,7 +14020,7 @@ func (p *BackendServiceCancelPlanFragmentResult) fastWriteField0(buf []byte, bin return offset } -func (p *BackendServiceCancelPlanFragmentResult) field0Length() int { +func (p *BackendServiceEraseExportTaskResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -5570,7 +14030,7 @@ func (p *BackendServiceCancelPlanFragmentResult) field0Length() int { return l } -func (p *BackendServiceTransmitDataArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetTabletStatArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5591,27 +14051,10 @@ func (p *BackendServiceTransmitDataArgs) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -5631,73 +14074,40 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *BackendServiceTransmitDataArgs) FastReadField1(buf []byte) (int, error) { - offset := 0 - - tmp := palointernalservice.NewTTransmitDataParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.Params = tmp - return offset, nil +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } // for compatibility -func (p *BackendServiceTransmitDataArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetTabletStatArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceTransmitDataArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTabletStatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_args") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *BackendServiceTransmitDataArgs) BLength() int { +func (p *BackendServiceGetTabletStatArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("transmit_data_args") + l += bthrift.Binary.StructBeginLength("get_tablet_stat_args") if p != nil { - l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *BackendServiceTransmitDataArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *BackendServiceTransmitDataArgs) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *BackendServiceTransmitDataResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetTabletStatResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5759,7 +14169,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -5768,10 +14178,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceTransmitDataResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetTabletStatResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTTransmitDataResult_() + tmp := NewTTabletStatResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -5782,13 +14192,13 @@ func (p *BackendServiceTransmitDataResult) FastReadField0(buf []byte) (int, erro } // for compatibility -func (p *BackendServiceTransmitDataResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetTabletStatResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceTransmitDataResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTabletStatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -5797,9 +14207,9 @@ func (p *BackendServiceTransmitDataResult) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *BackendServiceTransmitDataResult) BLength() int { +func (p *BackendServiceGetTabletStatResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("transmit_data_result") + l += bthrift.Binary.StructBeginLength("get_tablet_stat_result") if p != nil { l += p.field0Length() } @@ -5808,7 +14218,7 @@ func (p *BackendServiceTransmitDataResult) BLength() int { return l } -func (p *BackendServiceTransmitDataResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTabletStatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -5818,7 +14228,7 @@ func (p *BackendServiceTransmitDataResult) fastWriteField0(buf []byte, binaryWri return offset } -func (p *BackendServiceTransmitDataResult) field0Length() int { +func (p *BackendServiceGetTabletStatResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -5828,7 +14238,7 @@ func (p *BackendServiceTransmitDataResult) field0Length() int { return l } -func (p *BackendServiceSubmitTasksArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -5849,27 +14259,10 @@ func (p *BackendServiceSubmitTasksArgs) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -5889,8 +14282,6 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -5899,89 +14290,32 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitTasksArgs) FastReadField1(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Tasks = make([]*agentservice.TAgentTaskRequest, 0, size) - for i := 0; i < size; i++ { - _elem := agentservice.NewTAgentTaskRequest() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.Tasks = append(p.Tasks, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - // for compatibility -func (p *BackendServiceSubmitTasksArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetTrashUsedCapacityArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceSubmitTasksArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_args") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *BackendServiceSubmitTasksArgs) BLength() int { +func (p *BackendServiceGetTrashUsedCapacityArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("submit_tasks_args") + l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_args") if p != nil { - l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *BackendServiceSubmitTasksArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Tasks { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *BackendServiceSubmitTasksArgs) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks)) - for _, v := range p.Tasks { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *BackendServiceSubmitTasksResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetTrashUsedCapacityResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6004,7 +14338,7 @@ func (p *BackendServiceSubmitTasksResult) FastRead(buf []byte) (int, error) { } switch fieldId { case 0: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { @@ -6043,7 +14377,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6052,27 +14386,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitTasksResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := agentservice.NewTAgentResult_() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Success = &v + } - p.Success = tmp return offset, nil } // for compatibility -func (p *BackendServiceSubmitTasksResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetTrashUsedCapacityResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceSubmitTasksResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -6081,9 +14415,9 @@ func (p *BackendServiceSubmitTasksResult) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *BackendServiceSubmitTasksResult) BLength() int { +func (p *BackendServiceGetTrashUsedCapacityResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("submit_tasks_result") + l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_result") if p != nil { l += p.field0Length() } @@ -6092,27 +14426,29 @@ func (p *BackendServiceSubmitTasksResult) BLength() int { return l } -func (p *BackendServiceSubmitTasksResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) - offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.I64, 0) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Success) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *BackendServiceSubmitTasksResult) field0Length() int { +func (p *BackendServiceGetTrashUsedCapacityResult) field0Length() int { l := 0 if p.IsSetSuccess() { - l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) - l += p.Success.BLength() + l += bthrift.Binary.FieldBeginLength("success", thrift.I64, 0) + l += bthrift.Binary.I64Length(*p.Success) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *BackendServiceMakeSnapshotArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6133,27 +14469,10 @@ func (p *BackendServiceMakeSnapshotArgs) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -6173,8 +14492,6 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6183,63 +14500,32 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceMakeSnapshotArgs) FastReadField1(buf []byte) (int, error) { - offset := 0 - - tmp := agentservice.NewTSnapshotRequest() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.SnapshotRequest = tmp - return offset, nil -} - // for compatibility -func (p *BackendServiceMakeSnapshotArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceMakeSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_args") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *BackendServiceMakeSnapshotArgs) BLength() int { +func (p *BackendServiceGetDiskTrashUsedCapacityArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("make_snapshot_args") + l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_args") if p != nil { - l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *BackendServiceMakeSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_request", thrift.STRUCT, 1) - offset += p.SnapshotRequest.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *BackendServiceMakeSnapshotArgs) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("snapshot_request", thrift.STRUCT, 1) - l += p.SnapshotRequest.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *BackendServiceMakeSnapshotResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6262,7 +14548,7 @@ func (p *BackendServiceMakeSnapshotResult) FastRead(buf []byte) (int, error) { } switch fieldId { case 0: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { @@ -6301,7 +14587,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6310,27 +14596,41 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceMakeSnapshotResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := agentservice.NewTAgentResult_() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Success = make([]*TDiskTrashInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTDiskTrashInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Success = append(p.Success, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Success = tmp return offset, nil } // for compatibility -func (p *BackendServiceMakeSnapshotResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceMakeSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -6339,9 +14639,9 @@ func (p *BackendServiceMakeSnapshotResult) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *BackendServiceMakeSnapshotResult) BLength() int { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("make_snapshot_result") + l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_result") if p != nil { l += p.field0Length() } @@ -6350,27 +14650,39 @@ func (p *BackendServiceMakeSnapshotResult) BLength() int { return l } -func (p *BackendServiceMakeSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) - offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.LIST, 0) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Success { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *BackendServiceMakeSnapshotResult) field0Length() int { +func (p *BackendServiceGetDiskTrashUsedCapacityResult) field0Length() int { l := 0 if p.IsSetSuccess() { - l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) - l += p.Success.BLength() + l += bthrift.Binary.FieldBeginLength("success", thrift.LIST, 0) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Success)) + for _, v := range p.Success { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *BackendServiceReleaseSnapshotArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6393,7 +14705,7 @@ func (p *BackendServiceReleaseSnapshotArgs) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -6432,7 +14744,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6441,28 +14753,41 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tasks = make([]*TRoutineLoadTask, 0, size) + for i := 0; i < size; i++ { + _elem := NewTRoutineLoadTask() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Tasks = append(p.Tasks, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.SnapshotPath = v - } return offset, nil } // for compatibility -func (p *BackendServiceReleaseSnapshotArgs) FastWrite(buf []byte) int { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceReleaseSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -6471,9 +14796,9 @@ func (p *BackendServiceReleaseSnapshotArgs) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *BackendServiceReleaseSnapshotArgs) BLength() int { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("release_snapshot_args") + l += bthrift.Binary.StructBeginLength("submit_routine_load_task_args") if p != nil { l += p.field1Length() } @@ -6482,25 +14807,35 @@ func (p *BackendServiceReleaseSnapshotArgs) BLength() int { return l } -func (p *BackendServiceReleaseSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_path", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.SnapshotPath) - + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Tasks { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceReleaseSnapshotArgs) field1Length() int { +func (p *BackendServiceSubmitRoutineLoadTaskArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("snapshot_path", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.SnapshotPath) - + l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks)) + for _, v := range p.Tasks { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceReleaseSnapshotResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceSubmitRoutineLoadTaskResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6562,7 +14897,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6571,10 +14906,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceReleaseSnapshotResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceSubmitRoutineLoadTaskResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := agentservice.NewTAgentResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -6585,13 +14920,13 @@ func (p *BackendServiceReleaseSnapshotResult) FastReadField0(buf []byte) (int, e } // for compatibility -func (p *BackendServiceReleaseSnapshotResult) FastWrite(buf []byte) int { +func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceReleaseSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -6600,9 +14935,9 @@ func (p *BackendServiceReleaseSnapshotResult) FastWriteNocopy(buf []byte, binary return offset } -func (p *BackendServiceReleaseSnapshotResult) BLength() int { +func (p *BackendServiceSubmitRoutineLoadTaskResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("release_snapshot_result") + l += bthrift.Binary.StructBeginLength("submit_routine_load_task_result") if p != nil { l += p.field0Length() } @@ -6611,7 +14946,7 @@ func (p *BackendServiceReleaseSnapshotResult) BLength() int { return l } -func (p *BackendServiceReleaseSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSubmitRoutineLoadTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -6621,7 +14956,7 @@ func (p *BackendServiceReleaseSnapshotResult) fastWriteField0(buf []byte, binary return offset } -func (p *BackendServiceReleaseSnapshotResult) field0Length() int { +func (p *BackendServiceSubmitRoutineLoadTaskResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -6631,7 +14966,7 @@ func (p *BackendServiceReleaseSnapshotResult) field0Length() int { return l } -func (p *BackendServicePublishClusterStateArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceOpenScannerArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6693,7 +15028,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6702,27 +15037,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServicePublishClusterStateArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceOpenScannerArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := agentservice.NewTAgentPublishRequest() + tmp := dorisexternalservice.NewTScanOpenParams() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Request = tmp + p.Params = tmp return offset, nil } // for compatibility -func (p *BackendServicePublishClusterStateArgs) FastWrite(buf []byte) int { +func (p *BackendServiceOpenScannerArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServicePublishClusterStateArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceOpenScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -6731,9 +15066,9 @@ func (p *BackendServicePublishClusterStateArgs) FastWriteNocopy(buf []byte, bina return offset } -func (p *BackendServicePublishClusterStateArgs) BLength() int { +func (p *BackendServiceOpenScannerArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("publish_cluster_state_args") + l += bthrift.Binary.StructBeginLength("open_scanner_args") if p != nil { l += p.field1Length() } @@ -6742,23 +15077,23 @@ func (p *BackendServicePublishClusterStateArgs) BLength() int { return l } -func (p *BackendServicePublishClusterStateArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceOpenScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) - offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServicePublishClusterStateArgs) field1Length() int { +func (p *BackendServiceOpenScannerArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) - l += p.Request.BLength() + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServicePublishClusterStateResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceOpenScannerResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6820,7 +15155,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6829,10 +15164,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServicePublishClusterStateResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceOpenScannerResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := agentservice.NewTAgentResult_() + tmp := dorisexternalservice.NewTScanOpenResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -6843,13 +15178,13 @@ func (p *BackendServicePublishClusterStateResult) FastReadField0(buf []byte) (in } // for compatibility -func (p *BackendServicePublishClusterStateResult) FastWrite(buf []byte) int { +func (p *BackendServiceOpenScannerResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServicePublishClusterStateResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceOpenScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -6858,9 +15193,9 @@ func (p *BackendServicePublishClusterStateResult) FastWriteNocopy(buf []byte, bi return offset } -func (p *BackendServicePublishClusterStateResult) BLength() int { +func (p *BackendServiceOpenScannerResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("publish_cluster_state_result") + l += bthrift.Binary.StructBeginLength("open_scanner_result") if p != nil { l += p.field0Length() } @@ -6869,7 +15204,7 @@ func (p *BackendServicePublishClusterStateResult) BLength() int { return l } -func (p *BackendServicePublishClusterStateResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceOpenScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -6879,7 +15214,7 @@ func (p *BackendServicePublishClusterStateResult) fastWriteField0(buf []byte, bi return offset } -func (p *BackendServicePublishClusterStateResult) field0Length() int { +func (p *BackendServiceOpenScannerResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -6889,7 +15224,7 @@ func (p *BackendServicePublishClusterStateResult) field0Length() int { return l } -func (p *BackendServiceSubmitExportTaskArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetNextArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6951,7 +15286,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6960,27 +15295,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceGetNextArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTExportTaskRequest() + tmp := dorisexternalservice.NewTScanNextBatchParams() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Request = tmp + p.Params = tmp return offset, nil } // for compatibility -func (p *BackendServiceSubmitExportTaskArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetNextArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceSubmitExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetNextArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -6989,9 +15324,9 @@ func (p *BackendServiceSubmitExportTaskArgs) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *BackendServiceSubmitExportTaskArgs) BLength() int { +func (p *BackendServiceGetNextArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("submit_export_task_args") + l += bthrift.Binary.StructBeginLength("get_next_args") if p != nil { l += p.field1Length() } @@ -7000,23 +15335,23 @@ func (p *BackendServiceSubmitExportTaskArgs) BLength() int { return l } -func (p *BackendServiceSubmitExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetNextArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) - offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceSubmitExportTaskArgs) field1Length() int { +func (p *BackendServiceGetNextArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) - l += p.Request.BLength() + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceSubmitExportTaskResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetNextResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7078,7 +15413,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7087,10 +15422,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitExportTaskResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetNextResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := dorisexternalservice.NewTScanBatchResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -7101,13 +15436,13 @@ func (p *BackendServiceSubmitExportTaskResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *BackendServiceSubmitExportTaskResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetNextResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceSubmitExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetNextResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -7116,9 +15451,9 @@ func (p *BackendServiceSubmitExportTaskResult) FastWriteNocopy(buf []byte, binar return offset } -func (p *BackendServiceSubmitExportTaskResult) BLength() int { +func (p *BackendServiceGetNextResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("submit_export_task_result") + l += bthrift.Binary.StructBeginLength("get_next_result") if p != nil { l += p.field0Length() } @@ -7127,7 +15462,7 @@ func (p *BackendServiceSubmitExportTaskResult) BLength() int { return l } -func (p *BackendServiceSubmitExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetNextResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -7137,7 +15472,7 @@ func (p *BackendServiceSubmitExportTaskResult) fastWriteField0(buf []byte, binar return offset } -func (p *BackendServiceSubmitExportTaskResult) field0Length() int { +func (p *BackendServiceGetNextResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -7147,7 +15482,7 @@ func (p *BackendServiceSubmitExportTaskResult) field0Length() int { return l } -func (p *BackendServiceGetExportStatusArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceCloseScannerArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7209,7 +15544,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7218,27 +15553,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetExportStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceCloseScannerArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() + tmp := dorisexternalservice.NewTScanCloseParams() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.TaskId = tmp + p.Params = tmp return offset, nil } // for compatibility -func (p *BackendServiceGetExportStatusArgs) FastWrite(buf []byte) int { +func (p *BackendServiceCloseScannerArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetExportStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCloseScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -7247,9 +15582,9 @@ func (p *BackendServiceGetExportStatusArgs) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *BackendServiceGetExportStatusArgs) BLength() int { +func (p *BackendServiceCloseScannerArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_export_status_args") + l += bthrift.Binary.StructBeginLength("close_scanner_args") if p != nil { l += p.field1Length() } @@ -7258,23 +15593,23 @@ func (p *BackendServiceGetExportStatusArgs) BLength() int { return l } -func (p *BackendServiceGetExportStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCloseScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1) - offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceGetExportStatusArgs) field1Length() int { +func (p *BackendServiceCloseScannerArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1) - l += p.TaskId.BLength() + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceGetExportStatusResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceCloseScannerResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7336,7 +15671,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7345,10 +15680,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetExportStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceCloseScannerResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTExportStatusResult_() + tmp := dorisexternalservice.NewTScanCloseResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -7359,13 +15694,13 @@ func (p *BackendServiceGetExportStatusResult) FastReadField0(buf []byte) (int, e } // for compatibility -func (p *BackendServiceGetExportStatusResult) FastWrite(buf []byte) int { +func (p *BackendServiceCloseScannerResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetExportStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCloseScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -7374,9 +15709,9 @@ func (p *BackendServiceGetExportStatusResult) FastWriteNocopy(buf []byte, binary return offset } -func (p *BackendServiceGetExportStatusResult) BLength() int { +func (p *BackendServiceCloseScannerResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_export_status_result") + l += bthrift.Binary.StructBeginLength("close_scanner_result") if p != nil { l += p.field0Length() } @@ -7385,7 +15720,7 @@ func (p *BackendServiceGetExportStatusResult) BLength() int { return l } -func (p *BackendServiceGetExportStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCloseScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -7395,7 +15730,7 @@ func (p *BackendServiceGetExportStatusResult) fastWriteField0(buf []byte, binary return offset } -func (p *BackendServiceGetExportStatusResult) field0Length() int { +func (p *BackendServiceCloseScannerResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -7405,7 +15740,7 @@ func (p *BackendServiceGetExportStatusResult) field0Length() int { return l } -func (p *BackendServiceEraseExportTaskArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetStreamLoadRecordArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7428,7 +15763,7 @@ func (p *BackendServiceEraseExportTaskArgs) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -7467,7 +15802,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7476,27 +15811,28 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceEraseExportTaskArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceGetStreamLoadRecordArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.LastStreamRecordTime = v + } - p.TaskId = tmp return offset, nil } // for compatibility -func (p *BackendServiceEraseExportTaskArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetStreamLoadRecordArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceEraseExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetStreamLoadRecordArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -7505,9 +15841,9 @@ func (p *BackendServiceEraseExportTaskArgs) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *BackendServiceEraseExportTaskArgs) BLength() int { +func (p *BackendServiceGetStreamLoadRecordArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("erase_export_task_args") + l += bthrift.Binary.StructBeginLength("get_stream_load_record_args") if p != nil { l += p.field1Length() } @@ -7516,23 +15852,25 @@ func (p *BackendServiceEraseExportTaskArgs) BLength() int { return l } -func (p *BackendServiceEraseExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetStreamLoadRecordArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1) - offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_stream_record_time", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.LastStreamRecordTime) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceEraseExportTaskArgs) field1Length() int { +func (p *BackendServiceGetStreamLoadRecordArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1) - l += p.TaskId.BLength() + l += bthrift.Binary.FieldBeginLength("last_stream_record_time", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.LastStreamRecordTime) + l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceEraseExportTaskResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetStreamLoadRecordResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7594,7 +15932,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7603,10 +15941,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceEraseExportTaskResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetStreamLoadRecordResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTStreamLoadRecordResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -7617,13 +15955,13 @@ func (p *BackendServiceEraseExportTaskResult) FastReadField0(buf []byte) (int, e } // for compatibility -func (p *BackendServiceEraseExportTaskResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetStreamLoadRecordResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceEraseExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetStreamLoadRecordResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -7632,9 +15970,9 @@ func (p *BackendServiceEraseExportTaskResult) FastWriteNocopy(buf []byte, binary return offset } -func (p *BackendServiceEraseExportTaskResult) BLength() int { +func (p *BackendServiceGetStreamLoadRecordResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("erase_export_task_result") + l += bthrift.Binary.StructBeginLength("get_stream_load_record_result") if p != nil { l += p.field0Length() } @@ -7643,7 +15981,7 @@ func (p *BackendServiceEraseExportTaskResult) BLength() int { return l } -func (p *BackendServiceEraseExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetStreamLoadRecordResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -7653,7 +15991,7 @@ func (p *BackendServiceEraseExportTaskResult) fastWriteField0(buf []byte, binary return offset } -func (p *BackendServiceEraseExportTaskResult) field0Length() int { +func (p *BackendServiceGetStreamLoadRecordResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -7663,7 +16001,7 @@ func (p *BackendServiceEraseExportTaskResult) field0Length() int { return l } -func (p *BackendServiceGetTabletStatArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceCheckStorageFormatArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7687,7 +16025,7 @@ func (p *BackendServiceGetTabletStatArgs) FastRead(buf []byte) (int, error) { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l if err != nil { - goto SkipFieldTypeError + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -7707,9 +16045,8 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: @@ -7717,13 +16054,13 @@ ReadStructEndError: } // for compatibility -func (p *BackendServiceGetTabletStatArgs) FastWrite(buf []byte) int { +func (p *BackendServiceCheckStorageFormatArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetTabletStatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCheckStorageFormatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_args") if p != nil { } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -7731,9 +16068,9 @@ func (p *BackendServiceGetTabletStatArgs) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *BackendServiceGetTabletStatArgs) BLength() int { +func (p *BackendServiceCheckStorageFormatArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_tablet_stat_args") + l += bthrift.Binary.StructBeginLength("check_storage_format_args") if p != nil { } l += bthrift.Binary.FieldStopLength() @@ -7741,7 +16078,7 @@ func (p *BackendServiceGetTabletStatArgs) BLength() int { return l } -func (p *BackendServiceGetTabletStatResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceCheckStorageFormatResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7803,7 +16140,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7812,10 +16149,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetTabletStatResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceCheckStorageFormatResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTTabletStatResult_() + tmp := NewTCheckStorageFormatResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -7826,13 +16163,13 @@ func (p *BackendServiceGetTabletStatResult) FastReadField0(buf []byte) (int, err } // for compatibility -func (p *BackendServiceGetTabletStatResult) FastWrite(buf []byte) int { +func (p *BackendServiceCheckStorageFormatResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetTabletStatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCheckStorageFormatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -7841,9 +16178,9 @@ func (p *BackendServiceGetTabletStatResult) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *BackendServiceGetTabletStatResult) BLength() int { +func (p *BackendServiceCheckStorageFormatResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_tablet_stat_result") + l += bthrift.Binary.StructBeginLength("check_storage_format_result") if p != nil { l += p.field0Length() } @@ -7852,7 +16189,7 @@ func (p *BackendServiceGetTabletStatResult) BLength() int { return l } -func (p *BackendServiceGetTabletStatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCheckStorageFormatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -7862,7 +16199,7 @@ func (p *BackendServiceGetTabletStatResult) fastWriteField0(buf []byte, binaryWr return offset } -func (p *BackendServiceGetTabletStatResult) field0Length() int { +func (p *BackendServiceCheckStorageFormatResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -7872,7 +16209,7 @@ func (p *BackendServiceGetTabletStatResult) field0Length() int { return l } -func (p *BackendServiceGetTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceWarmUpCacheAsyncArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7893,10 +16230,27 @@ func (p *BackendServiceGetTrashUsedCapacityArgs) FastRead(buf []byte) (int, erro if fieldTypeId == thrift.STOP { break } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -7916,41 +16270,73 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } +func (p *BackendServiceWarmUpCacheAsyncArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTWarmUpCacheAsyncRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Request = tmp + return offset, nil +} + // for compatibility -func (p *BackendServiceGetTrashUsedCapacityArgs) FastWrite(buf []byte) int { +func (p *BackendServiceWarmUpCacheAsyncArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpCacheAsyncArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_cache_async_args") if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *BackendServiceGetTrashUsedCapacityArgs) BLength() int { +func (p *BackendServiceWarmUpCacheAsyncArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_args") + l += bthrift.Binary.StructBeginLength("warm_up_cache_async_args") if p != nil { + l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *BackendServiceGetTrashUsedCapacityResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceWarmUpCacheAsyncArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceWarmUpCacheAsyncArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceWarmUpCacheAsyncResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7973,7 +16359,7 @@ func (p *BackendServiceGetTrashUsedCapacityResult) FastRead(buf []byte) (int, er } switch fieldId { case 0: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { @@ -8012,7 +16398,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8021,27 +16407,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceWarmUpCacheAsyncResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTWarmUpCacheAsyncResponse() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Success = &v - } + p.Success = tmp return offset, nil } // for compatibility -func (p *BackendServiceGetTrashUsedCapacityResult) FastWrite(buf []byte) int { +func (p *BackendServiceWarmUpCacheAsyncResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpCacheAsyncResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_cache_async_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -8050,9 +16436,9 @@ func (p *BackendServiceGetTrashUsedCapacityResult) FastWriteNocopy(buf []byte, b return offset } -func (p *BackendServiceGetTrashUsedCapacityResult) BLength() int { +func (p *BackendServiceWarmUpCacheAsyncResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_result") + l += bthrift.Binary.StructBeginLength("warm_up_cache_async_result") if p != nil { l += p.field0Length() } @@ -8061,29 +16447,27 @@ func (p *BackendServiceGetTrashUsedCapacityResult) BLength() int { return l } -func (p *BackendServiceGetTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpCacheAsyncResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.I64, 0) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Success) - + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *BackendServiceGetTrashUsedCapacityResult) field0Length() int { +func (p *BackendServiceWarmUpCacheAsyncResult) field0Length() int { l := 0 if p.IsSetSuccess() { - l += bthrift.Binary.FieldBeginLength("success", thrift.I64, 0) - l += bthrift.Binary.I64Length(*p.Success) - + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8104,10 +16488,27 @@ func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastRead(buf []byte) (int, if fieldTypeId == thrift.STOP { break } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -8127,41 +16528,73 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTCheckWarmUpCacheAsyncRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Request = tmp + return offset, nil +} + // for compatibility -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWrite(buf []byte) int { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_warm_up_cache_async_args") if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *BackendServiceGetDiskTrashUsedCapacityArgs) BLength() int { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_args") + l += bthrift.Binary.StructBeginLength("check_warm_up_cache_async_args") if p != nil { + l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8184,7 +16617,7 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastRead(buf []byte) (int } switch fieldId { case 0: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { @@ -8223,7 +16656,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8232,41 +16665,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Success = make([]*TDiskTrashInfo, 0, size) - for i := 0; i < size; i++ { - _elem := NewTDiskTrashInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.Success = append(p.Success, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + tmp := NewTCheckWarmUpCacheAsyncResponse() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.Success = tmp return offset, nil } // for compatibility -func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWrite(buf []byte) int { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_warm_up_cache_async_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -8275,9 +16694,9 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWriteNocopy(buf []byt return offset } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) BLength() int { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_result") + l += bthrift.Binary.StructBeginLength("check_warm_up_cache_async_result") if p != nil { l += p.field0Length() } @@ -8286,39 +16705,27 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) BLength() int { return l } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.LIST, 0) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Success { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *BackendServiceGetDiskTrashUsedCapacityResult) field0Length() int { +func (p *BackendServiceCheckWarmUpCacheAsyncResult) field0Length() int { l := 0 if p.IsSetSuccess() { - l += bthrift.Binary.FieldBeginLength("success", thrift.LIST, 0) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Success)) - for _, v := range p.Success { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceSyncLoadForTabletsArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8341,7 +16748,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastRead(buf []byte) (int, err } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -8380,7 +16787,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8389,41 +16796,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceSyncLoadForTabletsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Tasks = make([]*TRoutineLoadTask, 0, size) - for i := 0; i < size; i++ { - _elem := NewTRoutineLoadTask() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.Tasks = append(p.Tasks, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + tmp := NewTSyncLoadForTabletsRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.Request = tmp return offset, nil } // for compatibility -func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWrite(buf []byte) int { +func (p *BackendServiceSyncLoadForTabletsArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSyncLoadForTabletsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "sync_load_for_tablets_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -8432,9 +16825,9 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWriteNocopy(buf []byte, bi return offset } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) BLength() int { +func (p *BackendServiceSyncLoadForTabletsArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("submit_routine_load_task_args") + l += bthrift.Binary.StructBeginLength("sync_load_for_tablets_args") if p != nil { l += p.field1Length() } @@ -8443,35 +16836,23 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) BLength() int { return l } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSyncLoadForTabletsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Tasks { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceSubmitRoutineLoadTaskArgs) field1Length() int { +func (p *BackendServiceSyncLoadForTabletsArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks)) - for _, v := range p.Tasks { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceSubmitRoutineLoadTaskResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceSyncLoadForTabletsResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8533,7 +16914,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8542,10 +16923,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceSubmitRoutineLoadTaskResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceSyncLoadForTabletsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTSyncLoadForTabletsResponse() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -8556,13 +16937,13 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) FastReadField0(buf []byte) ( } // for compatibility -func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWrite(buf []byte) int { +func (p *BackendServiceSyncLoadForTabletsResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSyncLoadForTabletsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "sync_load_for_tablets_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -8571,9 +16952,9 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWriteNocopy(buf []byte, return offset } -func (p *BackendServiceSubmitRoutineLoadTaskResult) BLength() int { +func (p *BackendServiceSyncLoadForTabletsResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("submit_routine_load_task_result") + l += bthrift.Binary.StructBeginLength("sync_load_for_tablets_result") if p != nil { l += p.field0Length() } @@ -8582,7 +16963,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) BLength() int { return l } -func (p *BackendServiceSubmitRoutineLoadTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceSyncLoadForTabletsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -8592,7 +16973,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) fastWriteField0(buf []byte, return offset } -func (p *BackendServiceSubmitRoutineLoadTaskResult) field0Length() int { +func (p *BackendServiceSyncLoadForTabletsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -8602,7 +16983,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) field0Length() int { return l } -func (p *BackendServiceOpenScannerArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetTopNHotPartitionsArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8664,7 +17045,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8673,27 +17054,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceOpenScannerArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceGetTopNHotPartitionsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := dorisexternalservice.NewTScanOpenParams() + tmp := NewTGetTopNHotPartitionsRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *BackendServiceOpenScannerArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetTopNHotPartitionsArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceOpenScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTopNHotPartitionsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_top_n_hot_partitions_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -8702,9 +17083,9 @@ func (p *BackendServiceOpenScannerArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *BackendServiceOpenScannerArgs) BLength() int { +func (p *BackendServiceGetTopNHotPartitionsArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("open_scanner_args") + l += bthrift.Binary.StructBeginLength("get_top_n_hot_partitions_args") if p != nil { l += p.field1Length() } @@ -8713,23 +17094,23 @@ func (p *BackendServiceOpenScannerArgs) BLength() int { return l } -func (p *BackendServiceOpenScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTopNHotPartitionsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceOpenScannerArgs) field1Length() int { +func (p *BackendServiceGetTopNHotPartitionsArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceOpenScannerResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetTopNHotPartitionsResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8791,7 +17172,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8800,10 +17181,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceOpenScannerResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetTopNHotPartitionsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := dorisexternalservice.NewTScanOpenResult_() + tmp := NewTGetTopNHotPartitionsResponse() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -8814,13 +17195,13 @@ func (p *BackendServiceOpenScannerResult) FastReadField0(buf []byte) (int, error } // for compatibility -func (p *BackendServiceOpenScannerResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetTopNHotPartitionsResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceOpenScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTopNHotPartitionsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_top_n_hot_partitions_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -8829,9 +17210,9 @@ func (p *BackendServiceOpenScannerResult) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *BackendServiceOpenScannerResult) BLength() int { +func (p *BackendServiceGetTopNHotPartitionsResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("open_scanner_result") + l += bthrift.Binary.StructBeginLength("get_top_n_hot_partitions_result") if p != nil { l += p.field0Length() } @@ -8840,7 +17221,7 @@ func (p *BackendServiceOpenScannerResult) BLength() int { return l } -func (p *BackendServiceOpenScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetTopNHotPartitionsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -8850,7 +17231,7 @@ func (p *BackendServiceOpenScannerResult) fastWriteField0(buf []byte, binaryWrit return offset } -func (p *BackendServiceOpenScannerResult) field0Length() int { +func (p *BackendServiceGetTopNHotPartitionsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -8860,7 +17241,7 @@ func (p *BackendServiceOpenScannerResult) field0Length() int { return l } -func (p *BackendServiceGetNextArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceWarmUpTabletsArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8922,7 +17303,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8931,27 +17312,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetNextArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceWarmUpTabletsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := dorisexternalservice.NewTScanNextBatchParams() + tmp := NewTWarmUpTabletsRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *BackendServiceGetNextArgs) FastWrite(buf []byte) int { +func (p *BackendServiceWarmUpTabletsArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetNextArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpTabletsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_tablets_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -8960,9 +17341,9 @@ func (p *BackendServiceGetNextArgs) FastWriteNocopy(buf []byte, binaryWriter bth return offset } -func (p *BackendServiceGetNextArgs) BLength() int { +func (p *BackendServiceWarmUpTabletsArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_next_args") + l += bthrift.Binary.StructBeginLength("warm_up_tablets_args") if p != nil { l += p.field1Length() } @@ -8971,23 +17352,23 @@ func (p *BackendServiceGetNextArgs) BLength() int { return l } -func (p *BackendServiceGetNextArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpTabletsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceGetNextArgs) field1Length() int { +func (p *BackendServiceWarmUpTabletsArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceGetNextResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceWarmUpTabletsResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9049,7 +17430,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9058,10 +17439,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetNextResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceWarmUpTabletsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := dorisexternalservice.NewTScanBatchResult_() + tmp := NewTWarmUpTabletsResponse() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -9072,13 +17453,13 @@ func (p *BackendServiceGetNextResult) FastReadField0(buf []byte) (int, error) { } // for compatibility -func (p *BackendServiceGetNextResult) FastWrite(buf []byte) int { +func (p *BackendServiceWarmUpTabletsResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetNextResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpTabletsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_tablets_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -9087,9 +17468,9 @@ func (p *BackendServiceGetNextResult) FastWriteNocopy(buf []byte, binaryWriter b return offset } -func (p *BackendServiceGetNextResult) BLength() int { +func (p *BackendServiceWarmUpTabletsResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_next_result") + l += bthrift.Binary.StructBeginLength("warm_up_tablets_result") if p != nil { l += p.field0Length() } @@ -9098,7 +17479,7 @@ func (p *BackendServiceGetNextResult) BLength() int { return l } -func (p *BackendServiceGetNextResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceWarmUpTabletsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -9108,7 +17489,7 @@ func (p *BackendServiceGetNextResult) fastWriteField0(buf []byte, binaryWriter b return offset } -func (p *BackendServiceGetNextResult) field0Length() int { +func (p *BackendServiceWarmUpTabletsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -9118,7 +17499,7 @@ func (p *BackendServiceGetNextResult) field0Length() int { return l } -func (p *BackendServiceCloseScannerArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceIngestBinlogArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9180,7 +17561,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9189,27 +17570,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCloseScannerArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceIngestBinlogArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := dorisexternalservice.NewTScanCloseParams() + tmp := NewTIngestBinlogRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.IngestBinlogRequest = tmp return offset, nil } // for compatibility -func (p *BackendServiceCloseScannerArgs) FastWrite(buf []byte) int { +func (p *BackendServiceIngestBinlogArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceCloseScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -9218,9 +17599,9 @@ func (p *BackendServiceCloseScannerArgs) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *BackendServiceCloseScannerArgs) BLength() int { +func (p *BackendServiceIngestBinlogArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("close_scanner_args") + l += bthrift.Binary.StructBeginLength("ingest_binlog_args") if p != nil { l += p.field1Length() } @@ -9229,23 +17610,23 @@ func (p *BackendServiceCloseScannerArgs) BLength() int { return l } -func (p *BackendServiceCloseScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceIngestBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ingest_binlog_request", thrift.STRUCT, 1) + offset += p.IngestBinlogRequest.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceCloseScannerArgs) field1Length() int { +func (p *BackendServiceIngestBinlogArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("ingest_binlog_request", thrift.STRUCT, 1) + l += p.IngestBinlogRequest.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceCloseScannerResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceIngestBinlogResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9307,7 +17688,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9316,10 +17697,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCloseScannerResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceIngestBinlogResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := dorisexternalservice.NewTScanCloseResult_() + tmp := NewTIngestBinlogResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -9330,13 +17711,13 @@ func (p *BackendServiceCloseScannerResult) FastReadField0(buf []byte) (int, erro } // for compatibility -func (p *BackendServiceCloseScannerResult) FastWrite(buf []byte) int { +func (p *BackendServiceIngestBinlogResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceCloseScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -9345,9 +17726,9 @@ func (p *BackendServiceCloseScannerResult) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *BackendServiceCloseScannerResult) BLength() int { +func (p *BackendServiceIngestBinlogResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("close_scanner_result") + l += bthrift.Binary.StructBeginLength("ingest_binlog_result") if p != nil { l += p.field0Length() } @@ -9356,7 +17737,7 @@ func (p *BackendServiceCloseScannerResult) BLength() int { return l } -func (p *BackendServiceCloseScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceIngestBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -9366,7 +17747,7 @@ func (p *BackendServiceCloseScannerResult) fastWriteField0(buf []byte, binaryWri return offset } -func (p *BackendServiceCloseScannerResult) field0Length() int { +func (p *BackendServiceIngestBinlogResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -9376,7 +17757,7 @@ func (p *BackendServiceCloseScannerResult) field0Length() int { return l } -func (p *BackendServiceGetStreamLoadRecordArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceQueryIngestBinlogArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9399,7 +17780,7 @@ func (p *BackendServiceGetStreamLoadRecordArgs) FastRead(buf []byte) (int, error } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -9438,7 +17819,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9447,28 +17828,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceQueryIngestBinlogArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTQueryIngestBinlogRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.LastStreamRecordTime = v - } + p.QueryIngestBinlogRequest = tmp return offset, nil } // for compatibility -func (p *BackendServiceGetStreamLoadRecordArgs) FastWrite(buf []byte) int { +func (p *BackendServiceQueryIngestBinlogArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetStreamLoadRecordArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceQueryIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "query_ingest_binlog_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -9477,9 +17857,9 @@ func (p *BackendServiceGetStreamLoadRecordArgs) FastWriteNocopy(buf []byte, bina return offset } -func (p *BackendServiceGetStreamLoadRecordArgs) BLength() int { +func (p *BackendServiceQueryIngestBinlogArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_stream_load_record_args") + l += bthrift.Binary.StructBeginLength("query_ingest_binlog_args") if p != nil { l += p.field1Length() } @@ -9488,25 +17868,23 @@ func (p *BackendServiceGetStreamLoadRecordArgs) BLength() int { return l } -func (p *BackendServiceGetStreamLoadRecordArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceQueryIngestBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_stream_record_time", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], p.LastStreamRecordTime) - + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_ingest_binlog_request", thrift.STRUCT, 1) + offset += p.QueryIngestBinlogRequest.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceGetStreamLoadRecordArgs) field1Length() int { +func (p *BackendServiceQueryIngestBinlogArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("last_stream_record_time", thrift.I64, 1) - l += bthrift.Binary.I64Length(p.LastStreamRecordTime) - + l += bthrift.Binary.FieldBeginLength("query_ingest_binlog_request", thrift.STRUCT, 1) + l += p.QueryIngestBinlogRequest.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceGetStreamLoadRecordResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceQueryIngestBinlogResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9568,7 +17946,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9577,10 +17955,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceGetStreamLoadRecordResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceQueryIngestBinlogResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTStreamLoadRecordResult_() + tmp := NewTQueryIngestBinlogResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -9591,13 +17969,13 @@ func (p *BackendServiceGetStreamLoadRecordResult) FastReadField0(buf []byte) (in } // for compatibility -func (p *BackendServiceGetStreamLoadRecordResult) FastWrite(buf []byte) int { +func (p *BackendServiceQueryIngestBinlogResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceGetStreamLoadRecordResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceQueryIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "query_ingest_binlog_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -9606,9 +17984,9 @@ func (p *BackendServiceGetStreamLoadRecordResult) FastWriteNocopy(buf []byte, bi return offset } -func (p *BackendServiceGetStreamLoadRecordResult) BLength() int { +func (p *BackendServiceQueryIngestBinlogResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("get_stream_load_record_result") + l += bthrift.Binary.StructBeginLength("query_ingest_binlog_result") if p != nil { l += p.field0Length() } @@ -9617,7 +17995,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) BLength() int { return l } -func (p *BackendServiceGetStreamLoadRecordResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceQueryIngestBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -9627,7 +18005,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) fastWriteField0(buf []byte, bi return offset } -func (p *BackendServiceGetStreamLoadRecordResult) field0Length() int { +func (p *BackendServiceQueryIngestBinlogResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -9637,7 +18015,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) field0Length() int { return l } -func (p *BackendServiceCleanTrashArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServicePublishTopicInfoArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9658,10 +18036,27 @@ func (p *BackendServiceCleanTrashArgs) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -9681,119 +18076,73 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } +func (p *BackendServicePublishTopicInfoArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTPublishTopicRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TopicRequest = tmp + return offset, nil +} + // for compatibility -func (p *BackendServiceCleanTrashArgs) FastWrite(buf []byte) int { +func (p *BackendServicePublishTopicInfoArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceCleanTrashArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServicePublishTopicInfoArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "clean_trash_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_topic_info_args") if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *BackendServiceCleanTrashArgs) BLength() int { +func (p *BackendServicePublishTopicInfoArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("clean_trash_args") + l += bthrift.Binary.StructBeginLength("publish_topic_info_args") if p != nil { + l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *BackendServiceCheckStorageFormatArgs) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -// for compatibility -func (p *BackendServiceCheckStorageFormatArgs) FastWrite(buf []byte) int { - return 0 -} - -func (p *BackendServiceCheckStorageFormatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServicePublishTopicInfoArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_args") - if p != nil { - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topic_request", thrift.STRUCT, 1) + offset += p.TopicRequest.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceCheckStorageFormatArgs) BLength() int { +func (p *BackendServicePublishTopicInfoArgs) field1Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("check_storage_format_args") - if p != nil { - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() + l += bthrift.Binary.FieldBeginLength("topic_request", thrift.STRUCT, 1) + l += p.TopicRequest.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceCheckStorageFormatResult) FastRead(buf []byte) (int, error) { +func (p *BackendServicePublishTopicInfoResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9855,7 +18204,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9864,10 +18213,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceCheckStorageFormatResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServicePublishTopicInfoResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTCheckStorageFormatResult_() + tmp := NewTPublishTopicResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -9878,13 +18227,13 @@ func (p *BackendServiceCheckStorageFormatResult) FastReadField0(buf []byte) (int } // for compatibility -func (p *BackendServiceCheckStorageFormatResult) FastWrite(buf []byte) int { +func (p *BackendServicePublishTopicInfoResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceCheckStorageFormatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServicePublishTopicInfoResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_topic_info_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -9893,9 +18242,9 @@ func (p *BackendServiceCheckStorageFormatResult) FastWriteNocopy(buf []byte, bin return offset } -func (p *BackendServiceCheckStorageFormatResult) BLength() int { +func (p *BackendServicePublishTopicInfoResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("check_storage_format_result") + l += bthrift.Binary.StructBeginLength("publish_topic_info_result") if p != nil { l += p.field0Length() } @@ -9904,7 +18253,7 @@ func (p *BackendServiceCheckStorageFormatResult) BLength() int { return l } -func (p *BackendServiceCheckStorageFormatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServicePublishTopicInfoResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -9914,7 +18263,7 @@ func (p *BackendServiceCheckStorageFormatResult) fastWriteField0(buf []byte, bin return offset } -func (p *BackendServiceCheckStorageFormatResult) field0Length() int { +func (p *BackendServicePublishTopicInfoResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -9924,7 +18273,7 @@ func (p *BackendServiceCheckStorageFormatResult) field0Length() int { return l } -func (p *BackendServiceIngestBinlogArgs) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetRealtimeExecStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9986,7 +18335,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9995,27 +18344,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceIngestBinlogArgs) FastReadField1(buf []byte) (int, error) { +func (p *BackendServiceGetRealtimeExecStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTIngestBinlogRequest() + tmp := NewTGetRealtimeExecStatusRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.IngestBinlogRequest = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *BackendServiceIngestBinlogArgs) FastWrite(buf []byte) int { +func (p *BackendServiceGetRealtimeExecStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetRealtimeExecStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_realtime_exec_status_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -10024,9 +18373,9 @@ func (p *BackendServiceIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *BackendServiceIngestBinlogArgs) BLength() int { +func (p *BackendServiceGetRealtimeExecStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("ingest_binlog_args") + l += bthrift.Binary.StructBeginLength("get_realtime_exec_status_args") if p != nil { l += p.field1Length() } @@ -10035,23 +18384,23 @@ func (p *BackendServiceIngestBinlogArgs) BLength() int { return l } -func (p *BackendServiceIngestBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetRealtimeExecStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ingest_binlog_request", thrift.STRUCT, 1) - offset += p.IngestBinlogRequest.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *BackendServiceIngestBinlogArgs) field1Length() int { +func (p *BackendServiceGetRealtimeExecStatusArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("ingest_binlog_request", thrift.STRUCT, 1) - l += p.IngestBinlogRequest.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *BackendServiceIngestBinlogResult) FastRead(buf []byte) (int, error) { +func (p *BackendServiceGetRealtimeExecStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -10113,7 +18462,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -10122,10 +18471,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *BackendServiceIngestBinlogResult) FastReadField0(buf []byte) (int, error) { +func (p *BackendServiceGetRealtimeExecStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTIngestBinlogResult_() + tmp := NewTGetRealtimeExecStatusResponse() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -10136,13 +18485,13 @@ func (p *BackendServiceIngestBinlogResult) FastReadField0(buf []byte) (int, erro } // for compatibility -func (p *BackendServiceIngestBinlogResult) FastWrite(buf []byte) int { +func (p *BackendServiceGetRealtimeExecStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *BackendServiceIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetRealtimeExecStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_realtime_exec_status_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -10151,9 +18500,9 @@ func (p *BackendServiceIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *BackendServiceIngestBinlogResult) BLength() int { +func (p *BackendServiceGetRealtimeExecStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("ingest_binlog_result") + l += bthrift.Binary.StructBeginLength("get_realtime_exec_status_result") if p != nil { l += p.field0Length() } @@ -10162,7 +18511,7 @@ func (p *BackendServiceIngestBinlogResult) BLength() int { return l } -func (p *BackendServiceIngestBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *BackendServiceGetRealtimeExecStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -10172,7 +18521,7 @@ func (p *BackendServiceIngestBinlogResult) fastWriteField0(buf []byte, binaryWri return offset } -func (p *BackendServiceIngestBinlogResult) field0Length() int { +func (p *BackendServiceGetRealtimeExecStatusResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -10326,10 +18675,6 @@ func (p *BackendServiceGetStreamLoadRecordResult) GetResult() interface{} { return p.Success } -func (p *BackendServiceCleanTrashArgs) GetFirstArgument() interface{} { - return nil -} - func (p *BackendServiceCheckStorageFormatArgs) GetFirstArgument() interface{} { return nil } @@ -10338,6 +18683,46 @@ func (p *BackendServiceCheckStorageFormatResult) GetResult() interface{} { return p.Success } +func (p *BackendServiceWarmUpCacheAsyncArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *BackendServiceWarmUpCacheAsyncResult) GetResult() interface{} { + return p.Success +} + +func (p *BackendServiceCheckWarmUpCacheAsyncArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *BackendServiceCheckWarmUpCacheAsyncResult) GetResult() interface{} { + return p.Success +} + +func (p *BackendServiceSyncLoadForTabletsArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *BackendServiceSyncLoadForTabletsResult) GetResult() interface{} { + return p.Success +} + +func (p *BackendServiceGetTopNHotPartitionsArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *BackendServiceGetTopNHotPartitionsResult) GetResult() interface{} { + return p.Success +} + +func (p *BackendServiceWarmUpTabletsArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *BackendServiceWarmUpTabletsResult) GetResult() interface{} { + return p.Success +} + func (p *BackendServiceIngestBinlogArgs) GetFirstArgument() interface{} { return p.IngestBinlogRequest } @@ -10345,3 +18730,27 @@ func (p *BackendServiceIngestBinlogArgs) GetFirstArgument() interface{} { func (p *BackendServiceIngestBinlogResult) GetResult() interface{} { return p.Success } + +func (p *BackendServiceQueryIngestBinlogArgs) GetFirstArgument() interface{} { + return p.QueryIngestBinlogRequest +} + +func (p *BackendServiceQueryIngestBinlogResult) GetResult() interface{} { + return p.Success +} + +func (p *BackendServicePublishTopicInfoArgs) GetFirstArgument() interface{} { + return p.TopicRequest +} + +func (p *BackendServicePublishTopicInfoResult) GetResult() interface{} { + return p.Success +} + +func (p *BackendServiceGetRealtimeExecStatusArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *BackendServiceGetRealtimeExecStatusResult) GetResult() interface{} { + return p.Success +} diff --git a/pkg/rpc/kitex_gen/data/Data.go b/pkg/rpc/kitex_gen/data/Data.go index 8494d4cc..780e5545 100644 --- a/pkg/rpc/kitex_gen/data/Data.go +++ b/pkg/rpc/kitex_gen/data/Data.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package data @@ -25,7 +25,6 @@ func NewTRowBatch() *TRowBatch { } func (p *TRowBatch) InitDefault() { - *p = TRowBatch{} } func (p *TRowBatch) GetNumRows() (v int32) { @@ -114,10 +113,8 @@ func (p *TRowBatch) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -125,67 +122,54 @@ func (p *TRowBatch) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRowTuples = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -222,21 +206,24 @@ RequiredFieldNotSetError: } func (p *TRowBatch) ReadField1(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumRows = v + _field = v } + p.NumRows = _field return nil } - func (p *TRowBatch) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RowTuples = make([]types.TTupleId, 0, size) + _field := make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err @@ -244,21 +231,22 @@ func (p *TRowBatch) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.RowTuples = append(p.RowTuples, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RowTuples = _field return nil } - func (p *TRowBatch) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TupleOffsets = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -266,47 +254,56 @@ func (p *TRowBatch) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.TupleOffsets = append(p.TupleOffsets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TupleOffsets = _field return nil } - func (p *TRowBatch) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TupleData = v + _field = v } + p.TupleData = _field return nil } - func (p *TRowBatch) ReadField5(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsCompressed = v + _field = v } + p.IsCompressed = _field return nil } - func (p *TRowBatch) ReadField6(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BeNumber = v + _field = v } + p.BeNumber = _field return nil } - func (p *TRowBatch) ReadField7(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PacketSeq = v + _field = v } + p.PacketSeq = _field return nil } @@ -344,7 +341,6 @@ func (p *TRowBatch) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -503,6 +499,7 @@ func (p *TRowBatch) String() string { return "" } return fmt.Sprintf("TRowBatch(%+v)", *p) + } func (p *TRowBatch) DeepEqual(ano *TRowBatch) bool { @@ -603,6 +600,7 @@ type TCell struct { LongVal *int64 `thrift:"longVal,3,optional" frugal:"3,optional,i64" json:"longVal,omitempty"` DoubleVal *float64 `thrift:"doubleVal,4,optional" frugal:"4,optional,double" json:"doubleVal,omitempty"` StringVal *string `thrift:"stringVal,5,optional" frugal:"5,optional,string" json:"stringVal,omitempty"` + IsNull *bool `thrift:"isNull,6,optional" frugal:"6,optional,bool" json:"isNull,omitempty"` } func NewTCell() *TCell { @@ -610,7 +608,6 @@ func NewTCell() *TCell { } func (p *TCell) InitDefault() { - *p = TCell{} } var TCell_BoolVal_DEFAULT bool @@ -657,6 +654,15 @@ func (p *TCell) GetStringVal() (v string) { } return *p.StringVal } + +var TCell_IsNull_DEFAULT bool + +func (p *TCell) GetIsNull() (v bool) { + if !p.IsSetIsNull() { + return TCell_IsNull_DEFAULT + } + return *p.IsNull +} func (p *TCell) SetBoolVal(val *bool) { p.BoolVal = val } @@ -672,6 +678,9 @@ func (p *TCell) SetDoubleVal(val *float64) { func (p *TCell) SetStringVal(val *string) { p.StringVal = val } +func (p *TCell) SetIsNull(val *bool) { + p.IsNull = val +} var fieldIDToName_TCell = map[int16]string{ 1: "boolVal", @@ -679,6 +688,7 @@ var fieldIDToName_TCell = map[int16]string{ 3: "longVal", 4: "doubleVal", 5: "stringVal", + 6: "isNull", } func (p *TCell) IsSetBoolVal() bool { @@ -701,6 +711,10 @@ func (p *TCell) IsSetStringVal() bool { return p.StringVal != nil } +func (p *TCell) IsSetIsNull() bool { + return p.IsNull != nil +} + func (p *TCell) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -725,57 +739,54 @@ func (p *TCell) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.DOUBLE { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -801,47 +812,69 @@ ReadStructEndError: } func (p *TCell) ReadField1(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.BoolVal = &v + _field = &v } + p.BoolVal = _field return nil } - func (p *TCell) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.IntVal = &v + _field = &v } + p.IntVal = _field return nil } - func (p *TCell) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LongVal = &v + _field = &v } + p.LongVal = _field return nil } - func (p *TCell) ReadField4(iprot thrift.TProtocol) error { + + var _field *float64 if v, err := iprot.ReadDouble(); err != nil { return err } else { - p.DoubleVal = &v + _field = &v } + p.DoubleVal = _field return nil } - func (p *TCell) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.StringVal = &v + _field = &v } + p.StringVal = _field + return nil +} +func (p *TCell) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsNull = _field return nil } @@ -871,7 +904,10 @@ func (p *TCell) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -985,11 +1021,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *TCell) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNull() { + if err = oprot.WriteFieldBegin("isNull", thrift.BOOL, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsNull); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + func (p *TCell) String() string { if p == nil { return "" } return fmt.Sprintf("TCell(%+v)", *p) + } func (p *TCell) DeepEqual(ano *TCell) bool { @@ -1013,6 +1069,9 @@ func (p *TCell) DeepEqual(ano *TCell) bool { if !p.Field5DeepEqual(ano.StringVal) { return false } + if !p.Field6DeepEqual(ano.IsNull) { + return false + } return true } @@ -1076,6 +1135,18 @@ func (p *TCell) Field5DeepEqual(src *string) bool { } return true } +func (p *TCell) Field6DeepEqual(src *bool) bool { + + if p.IsNull == src { + return true + } else if p.IsNull == nil || src == nil { + return false + } + if *p.IsNull != *src { + return false + } + return true +} type TResultRow struct { ColVals []*TCell `thrift:"colVals,1" frugal:"1,default,list" json:"colVals"` @@ -1086,7 +1157,6 @@ func NewTResultRow() *TResultRow { } func (p *TResultRow) InitDefault() { - *p = TResultRow{} } func (p *TResultRow) GetColVals() (v []*TCell) { @@ -1124,17 +1194,14 @@ func (p *TResultRow) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1164,18 +1231,22 @@ func (p *TResultRow) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.ColVals = make([]*TCell, 0, size) + _field := make([]*TCell, 0, size) + values := make([]TCell, size) for i := 0; i < size; i++ { - _elem := NewTCell() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColVals = append(p.ColVals, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColVals = _field return nil } @@ -1189,7 +1260,6 @@ func (p *TResultRow) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1238,6 +1308,7 @@ func (p *TResultRow) String() string { return "" } return fmt.Sprintf("TResultRow(%+v)", *p) + } func (p *TResultRow) DeepEqual(ano *TResultRow) bool { @@ -1275,7 +1346,6 @@ func NewTRow() *TRow { } func (p *TRow) InitDefault() { - *p = TRow{} } var TRow_ColumnValue_DEFAULT []*TCell @@ -1322,17 +1392,14 @@ func (p *TRow) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1362,18 +1429,22 @@ func (p *TRow) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.ColumnValue = make([]*TCell, 0, size) + _field := make([]*TCell, 0, size) + values := make([]TCell, size) for i := 0; i < size; i++ { - _elem := NewTCell() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnValue = append(p.ColumnValue, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnValue = _field return nil } @@ -1387,7 +1458,6 @@ func (p *TRow) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1438,6 +1508,7 @@ func (p *TRow) String() string { return "" } return fmt.Sprintf("TRow(%+v)", *p) + } func (p *TRow) DeepEqual(ano *TRow) bool { @@ -1478,7 +1549,6 @@ func NewTResultBatch() *TResultBatch { } func (p *TResultBatch) InitDefault() { - *p = TResultBatch{} } func (p *TResultBatch) GetRows() (v [][]byte) { @@ -1553,10 +1623,8 @@ func (p *TResultBatch) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { @@ -1564,10 +1632,8 @@ func (p *TResultBatch) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsCompressed = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -1575,27 +1641,22 @@ func (p *TResultBatch) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPacketSeq = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1641,8 +1702,9 @@ func (p *TResultBatch) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Rows = make([][]byte, 0, size) + _field := make([][]byte, 0, size) for i := 0; i < size; i++ { + var _elem []byte if v, err := iprot.ReadBinary(); err != nil { return err @@ -1650,38 +1712,42 @@ func (p *TResultBatch) ReadField1(iprot thrift.TProtocol) error { _elem = []byte(v) } - p.Rows = append(p.Rows, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Rows = _field return nil } - func (p *TResultBatch) ReadField2(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsCompressed = v + _field = v } + p.IsCompressed = _field return nil } - func (p *TResultBatch) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PacketSeq = v + _field = v } + p.PacketSeq = _field return nil } - func (p *TResultBatch) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.AttachedInfos = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -1697,11 +1763,12 @@ func (p *TResultBatch) ReadField4(iprot thrift.TProtocol) error { _val = v } - p.AttachedInfos[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.AttachedInfos = _field return nil } @@ -1727,7 +1794,6 @@ func (p *TResultBatch) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1814,11 +1880,9 @@ func (p *TResultBatch) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.AttachedInfos { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -1842,6 +1906,7 @@ func (p *TResultBatch) String() string { return "" } return fmt.Sprintf("TResultBatch(%+v)", *p) + } func (p *TResultBatch) DeepEqual(ano *TResultBatch) bool { diff --git a/pkg/rpc/kitex_gen/data/k-Data.go b/pkg/rpc/kitex_gen/data/k-Data.go index 6828fb31..fc31f974 100644 --- a/pkg/rpc/kitex_gen/data/k-Data.go +++ b/pkg/rpc/kitex_gen/data/k-Data.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package data @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" ) @@ -602,6 +603,20 @@ func (p *TCell) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -702,6 +717,19 @@ func (p *TCell) FastReadField5(buf []byte) (int, error) { return offset, nil } +func (p *TCell) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsNull = &v + + } + return offset, nil +} + // for compatibility func (p *TCell) FastWrite(buf []byte) int { return 0 @@ -715,6 +743,7 @@ func (p *TCell) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) i offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -731,6 +760,7 @@ func (p *TCell) BLength() int { l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -792,6 +822,17 @@ func (p *TCell) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) i return offset } +func (p *TCell) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNull() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isNull", thrift.BOOL, 6) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsNull) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TCell) field1Length() int { l := 0 if p.IsSetBoolVal() { @@ -847,6 +888,17 @@ func (p *TCell) field5Length() int { return l } +func (p *TCell) field6Length() int { + l := 0 + if p.IsSetIsNull() { + l += bthrift.Binary.FieldBeginLength("isNull", thrift.BOOL, 6) + l += bthrift.Binary.BoolLength(*p.IsNull) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TResultRow) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/datasinks/DataSinks.go b/pkg/rpc/kitex_gen/datasinks/DataSinks.go index ba8753c7..3b8244c7 100644 --- a/pkg/rpc/kitex_gen/datasinks/DataSinks.go +++ b/pkg/rpc/kitex_gen/datasinks/DataSinks.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package datasinks @@ -30,6 +30,9 @@ const ( TDataSinkType_JDBC_TABLE_SINK TDataSinkType = 9 TDataSinkType_MULTI_CAST_DATA_STREAM_SINK TDataSinkType = 10 TDataSinkType_GROUP_COMMIT_OLAP_TABLE_SINK TDataSinkType = 11 + TDataSinkType_GROUP_COMMIT_BLOCK_SINK TDataSinkType = 12 + TDataSinkType_HIVE_TABLE_SINK TDataSinkType = 13 + TDataSinkType_ICEBERG_TABLE_SINK TDataSinkType = 14 ) func (p TDataSinkType) String() string { @@ -58,6 +61,12 @@ func (p TDataSinkType) String() string { return "MULTI_CAST_DATA_STREAM_SINK" case TDataSinkType_GROUP_COMMIT_OLAP_TABLE_SINK: return "GROUP_COMMIT_OLAP_TABLE_SINK" + case TDataSinkType_GROUP_COMMIT_BLOCK_SINK: + return "GROUP_COMMIT_BLOCK_SINK" + case TDataSinkType_HIVE_TABLE_SINK: + return "HIVE_TABLE_SINK" + case TDataSinkType_ICEBERG_TABLE_SINK: + return "ICEBERG_TABLE_SINK" } return "" } @@ -88,6 +97,12 @@ func TDataSinkTypeFromString(s string) (TDataSinkType, error) { return TDataSinkType_MULTI_CAST_DATA_STREAM_SINK, nil case "GROUP_COMMIT_OLAP_TABLE_SINK": return TDataSinkType_GROUP_COMMIT_OLAP_TABLE_SINK, nil + case "GROUP_COMMIT_BLOCK_SINK": + return TDataSinkType_GROUP_COMMIT_BLOCK_SINK, nil + case "HIVE_TABLE_SINK": + return TDataSinkType_HIVE_TABLE_SINK, nil + case "ICEBERG_TABLE_SINK": + return TDataSinkType_ICEBERG_TABLE_SINK, nil } return TDataSinkType(0), fmt.Errorf("not a valid TDataSinkType string") } @@ -499,6 +514,194 @@ func (p *TParquetRepetitionType) Value() (driver.Value, error) { return int64(*p), nil } +type TGroupCommitMode int64 + +const ( + TGroupCommitMode_SYNC_MODE TGroupCommitMode = 0 + TGroupCommitMode_ASYNC_MODE TGroupCommitMode = 1 + TGroupCommitMode_OFF_MODE TGroupCommitMode = 2 +) + +func (p TGroupCommitMode) String() string { + switch p { + case TGroupCommitMode_SYNC_MODE: + return "SYNC_MODE" + case TGroupCommitMode_ASYNC_MODE: + return "ASYNC_MODE" + case TGroupCommitMode_OFF_MODE: + return "OFF_MODE" + } + return "" +} + +func TGroupCommitModeFromString(s string) (TGroupCommitMode, error) { + switch s { + case "SYNC_MODE": + return TGroupCommitMode_SYNC_MODE, nil + case "ASYNC_MODE": + return TGroupCommitMode_ASYNC_MODE, nil + case "OFF_MODE": + return TGroupCommitMode_OFF_MODE, nil + } + return TGroupCommitMode(0), fmt.Errorf("not a valid TGroupCommitMode string") +} + +func TGroupCommitModePtr(v TGroupCommitMode) *TGroupCommitMode { return &v } +func (p *TGroupCommitMode) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TGroupCommitMode(result.Int64) + return +} + +func (p *TGroupCommitMode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type THiveColumnType int64 + +const ( + THiveColumnType_PARTITION_KEY THiveColumnType = 0 + THiveColumnType_REGULAR THiveColumnType = 1 + THiveColumnType_SYNTHESIZED THiveColumnType = 2 +) + +func (p THiveColumnType) String() string { + switch p { + case THiveColumnType_PARTITION_KEY: + return "PARTITION_KEY" + case THiveColumnType_REGULAR: + return "REGULAR" + case THiveColumnType_SYNTHESIZED: + return "SYNTHESIZED" + } + return "" +} + +func THiveColumnTypeFromString(s string) (THiveColumnType, error) { + switch s { + case "PARTITION_KEY": + return THiveColumnType_PARTITION_KEY, nil + case "REGULAR": + return THiveColumnType_REGULAR, nil + case "SYNTHESIZED": + return THiveColumnType_SYNTHESIZED, nil + } + return THiveColumnType(0), fmt.Errorf("not a valid THiveColumnType string") +} + +func THiveColumnTypePtr(v THiveColumnType) *THiveColumnType { return &v } +func (p *THiveColumnType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = THiveColumnType(result.Int64) + return +} + +func (p *THiveColumnType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TUpdateMode int64 + +const ( + TUpdateMode_NEW TUpdateMode = 0 + TUpdateMode_APPEND TUpdateMode = 1 + TUpdateMode_OVERWRITE TUpdateMode = 2 +) + +func (p TUpdateMode) String() string { + switch p { + case TUpdateMode_NEW: + return "NEW" + case TUpdateMode_APPEND: + return "APPEND" + case TUpdateMode_OVERWRITE: + return "OVERWRITE" + } + return "" +} + +func TUpdateModeFromString(s string) (TUpdateMode, error) { + switch s { + case "NEW": + return TUpdateMode_NEW, nil + case "APPEND": + return TUpdateMode_APPEND, nil + case "OVERWRITE": + return TUpdateMode_OVERWRITE, nil + } + return TUpdateMode(0), fmt.Errorf("not a valid TUpdateMode string") +} + +func TUpdateModePtr(v TUpdateMode) *TUpdateMode { return &v } +func (p *TUpdateMode) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TUpdateMode(result.Int64) + return +} + +func (p *TUpdateMode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type TFileContent int64 + +const ( + TFileContent_DATA TFileContent = 0 + TFileContent_POSITION_DELETES TFileContent = 1 + TFileContent_EQUALITY_DELETES TFileContent = 2 +) + +func (p TFileContent) String() string { + switch p { + case TFileContent_DATA: + return "DATA" + case TFileContent_POSITION_DELETES: + return "POSITION_DELETES" + case TFileContent_EQUALITY_DELETES: + return "EQUALITY_DELETES" + } + return "" +} + +func TFileContentFromString(s string) (TFileContent, error) { + switch s { + case "DATA": + return TFileContent_DATA, nil + case "POSITION_DELETES": + return TFileContent_POSITION_DELETES, nil + case "EQUALITY_DELETES": + return TFileContent_EQUALITY_DELETES, nil + } + return TFileContent(0), fmt.Errorf("not a valid TFileContent string") +} + +func TFileContentPtr(v TFileContent) *TFileContent { return &v } +func (p *TFileContent) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TFileContent(result.Int64) + return +} + +func (p *TFileContent) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TParquetSchema struct { SchemaRepetitionType *TParquetRepetitionType `thrift:"schema_repetition_type,1,optional" frugal:"1,optional,TParquetRepetitionType" json:"schema_repetition_type,omitempty"` SchemaDataType *TParquetDataType `thrift:"schema_data_type,2,optional" frugal:"2,optional,TParquetDataType" json:"schema_data_type,omitempty"` @@ -511,7 +714,6 @@ func NewTParquetSchema() *TParquetSchema { } func (p *TParquetSchema) InitDefault() { - *p = TParquetSchema{} } var TParquetSchema_SchemaRepetitionType_DEFAULT TParquetRepetitionType @@ -609,47 +811,38 @@ func (p *TParquetSchema) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -675,41 +868,50 @@ ReadStructEndError: } func (p *TParquetSchema) ReadField1(iprot thrift.TProtocol) error { + + var _field *TParquetRepetitionType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TParquetRepetitionType(v) - p.SchemaRepetitionType = &tmp + _field = &tmp } + p.SchemaRepetitionType = _field return nil } - func (p *TParquetSchema) ReadField2(iprot thrift.TProtocol) error { + + var _field *TParquetDataType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TParquetDataType(v) - p.SchemaDataType = &tmp + _field = &tmp } + p.SchemaDataType = _field return nil } - func (p *TParquetSchema) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SchemaColumnName = &v + _field = &v } + p.SchemaColumnName = _field return nil } - func (p *TParquetSchema) ReadField4(iprot thrift.TProtocol) error { + + var _field *TParquetDataLogicalType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TParquetDataLogicalType(v) - p.SchemaDataLogicalType = &tmp + _field = &tmp } + p.SchemaDataLogicalType = _field return nil } @@ -735,7 +937,6 @@ func (p *TParquetSchema) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -835,6 +1036,7 @@ func (p *TParquetSchema) String() string { return "" } return fmt.Sprintf("TParquetSchema(%+v)", *p) + } func (p *TParquetSchema) DeepEqual(ano *TParquetSchema) bool { @@ -908,23 +1110,26 @@ func (p *TParquetSchema) Field4DeepEqual(src *TParquetDataLogicalType) bool { } type TResultFileSinkOptions struct { - FilePath string `thrift:"file_path,1,required" frugal:"1,required,string" json:"file_path"` - FileFormat plannodes.TFileFormatType `thrift:"file_format,2,required" frugal:"2,required,TFileFormatType" json:"file_format"` - ColumnSeparator *string `thrift:"column_separator,3,optional" frugal:"3,optional,string" json:"column_separator,omitempty"` - LineDelimiter *string `thrift:"line_delimiter,4,optional" frugal:"4,optional,string" json:"line_delimiter,omitempty"` - MaxFileSizeBytes *int64 `thrift:"max_file_size_bytes,5,optional" frugal:"5,optional,i64" json:"max_file_size_bytes,omitempty"` - BrokerAddresses []*types.TNetworkAddress `thrift:"broker_addresses,6,optional" frugal:"6,optional,list" json:"broker_addresses,omitempty"` - BrokerProperties map[string]string `thrift:"broker_properties,7,optional" frugal:"7,optional,map" json:"broker_properties,omitempty"` - SuccessFileName *string `thrift:"success_file_name,8,optional" frugal:"8,optional,string" json:"success_file_name,omitempty"` - Schema [][]string `thrift:"schema,9,optional" frugal:"9,optional,list>" json:"schema,omitempty"` - FileProperties map[string]string `thrift:"file_properties,10,optional" frugal:"10,optional,map" json:"file_properties,omitempty"` - ParquetSchemas []*TParquetSchema `thrift:"parquet_schemas,11,optional" frugal:"11,optional,list" json:"parquet_schemas,omitempty"` - ParquetCompressionType *TParquetCompressionType `thrift:"parquet_compression_type,12,optional" frugal:"12,optional,TParquetCompressionType" json:"parquet_compression_type,omitempty"` - ParquetDisableDictionary *bool `thrift:"parquet_disable_dictionary,13,optional" frugal:"13,optional,bool" json:"parquet_disable_dictionary,omitempty"` - ParquetVersion *TParquetVersion `thrift:"parquet_version,14,optional" frugal:"14,optional,TParquetVersion" json:"parquet_version,omitempty"` - OrcSchema *string `thrift:"orc_schema,15,optional" frugal:"15,optional,string" json:"orc_schema,omitempty"` - DeleteExistingFiles *bool `thrift:"delete_existing_files,16,optional" frugal:"16,optional,bool" json:"delete_existing_files,omitempty"` - FileSuffix *string `thrift:"file_suffix,17,optional" frugal:"17,optional,string" json:"file_suffix,omitempty"` + FilePath string `thrift:"file_path,1,required" frugal:"1,required,string" json:"file_path"` + FileFormat plannodes.TFileFormatType `thrift:"file_format,2,required" frugal:"2,required,TFileFormatType" json:"file_format"` + ColumnSeparator *string `thrift:"column_separator,3,optional" frugal:"3,optional,string" json:"column_separator,omitempty"` + LineDelimiter *string `thrift:"line_delimiter,4,optional" frugal:"4,optional,string" json:"line_delimiter,omitempty"` + MaxFileSizeBytes *int64 `thrift:"max_file_size_bytes,5,optional" frugal:"5,optional,i64" json:"max_file_size_bytes,omitempty"` + BrokerAddresses []*types.TNetworkAddress `thrift:"broker_addresses,6,optional" frugal:"6,optional,list" json:"broker_addresses,omitempty"` + BrokerProperties map[string]string `thrift:"broker_properties,7,optional" frugal:"7,optional,map" json:"broker_properties,omitempty"` + SuccessFileName *string `thrift:"success_file_name,8,optional" frugal:"8,optional,string" json:"success_file_name,omitempty"` + Schema [][]string `thrift:"schema,9,optional" frugal:"9,optional,list>" json:"schema,omitempty"` + FileProperties map[string]string `thrift:"file_properties,10,optional" frugal:"10,optional,map" json:"file_properties,omitempty"` + ParquetSchemas []*TParquetSchema `thrift:"parquet_schemas,11,optional" frugal:"11,optional,list" json:"parquet_schemas,omitempty"` + ParquetCompressionType *TParquetCompressionType `thrift:"parquet_compression_type,12,optional" frugal:"12,optional,TParquetCompressionType" json:"parquet_compression_type,omitempty"` + ParquetDisableDictionary *bool `thrift:"parquet_disable_dictionary,13,optional" frugal:"13,optional,bool" json:"parquet_disable_dictionary,omitempty"` + ParquetVersion *TParquetVersion `thrift:"parquet_version,14,optional" frugal:"14,optional,TParquetVersion" json:"parquet_version,omitempty"` + OrcSchema *string `thrift:"orc_schema,15,optional" frugal:"15,optional,string" json:"orc_schema,omitempty"` + DeleteExistingFiles *bool `thrift:"delete_existing_files,16,optional" frugal:"16,optional,bool" json:"delete_existing_files,omitempty"` + FileSuffix *string `thrift:"file_suffix,17,optional" frugal:"17,optional,string" json:"file_suffix,omitempty"` + WithBom *bool `thrift:"with_bom,18,optional" frugal:"18,optional,bool" json:"with_bom,omitempty"` + OrcCompressionType *plannodes.TFileCompressType `thrift:"orc_compression_type,19,optional" frugal:"19,optional,TFileCompressType" json:"orc_compression_type,omitempty"` + OrcWriterVersion *int64 `thrift:"orc_writer_version,20,optional" frugal:"20,optional,i64" json:"orc_writer_version,omitempty"` } func NewTResultFileSinkOptions() *TResultFileSinkOptions { @@ -932,7 +1137,6 @@ func NewTResultFileSinkOptions() *TResultFileSinkOptions { } func (p *TResultFileSinkOptions) InitDefault() { - *p = TResultFileSinkOptions{} } func (p *TResultFileSinkOptions) GetFilePath() (v string) { @@ -1077,6 +1281,33 @@ func (p *TResultFileSinkOptions) GetFileSuffix() (v string) { } return *p.FileSuffix } + +var TResultFileSinkOptions_WithBom_DEFAULT bool + +func (p *TResultFileSinkOptions) GetWithBom() (v bool) { + if !p.IsSetWithBom() { + return TResultFileSinkOptions_WithBom_DEFAULT + } + return *p.WithBom +} + +var TResultFileSinkOptions_OrcCompressionType_DEFAULT plannodes.TFileCompressType + +func (p *TResultFileSinkOptions) GetOrcCompressionType() (v plannodes.TFileCompressType) { + if !p.IsSetOrcCompressionType() { + return TResultFileSinkOptions_OrcCompressionType_DEFAULT + } + return *p.OrcCompressionType +} + +var TResultFileSinkOptions_OrcWriterVersion_DEFAULT int64 + +func (p *TResultFileSinkOptions) GetOrcWriterVersion() (v int64) { + if !p.IsSetOrcWriterVersion() { + return TResultFileSinkOptions_OrcWriterVersion_DEFAULT + } + return *p.OrcWriterVersion +} func (p *TResultFileSinkOptions) SetFilePath(val string) { p.FilePath = val } @@ -1128,6 +1359,15 @@ func (p *TResultFileSinkOptions) SetDeleteExistingFiles(val *bool) { func (p *TResultFileSinkOptions) SetFileSuffix(val *string) { p.FileSuffix = val } +func (p *TResultFileSinkOptions) SetWithBom(val *bool) { + p.WithBom = val +} +func (p *TResultFileSinkOptions) SetOrcCompressionType(val *plannodes.TFileCompressType) { + p.OrcCompressionType = val +} +func (p *TResultFileSinkOptions) SetOrcWriterVersion(val *int64) { + p.OrcWriterVersion = val +} var fieldIDToName_TResultFileSinkOptions = map[int16]string{ 1: "file_path", @@ -1147,6 +1387,9 @@ var fieldIDToName_TResultFileSinkOptions = map[int16]string{ 15: "orc_schema", 16: "delete_existing_files", 17: "file_suffix", + 18: "with_bom", + 19: "orc_compression_type", + 20: "orc_writer_version", } func (p *TResultFileSinkOptions) IsSetColumnSeparator() bool { @@ -1209,6 +1452,18 @@ func (p *TResultFileSinkOptions) IsSetFileSuffix() bool { return p.FileSuffix != nil } +func (p *TResultFileSinkOptions) IsSetWithBom() bool { + return p.WithBom != nil +} + +func (p *TResultFileSinkOptions) IsSetOrcCompressionType() bool { + return p.OrcCompressionType != nil +} + +func (p *TResultFileSinkOptions) IsSetOrcWriterVersion() bool { + return p.OrcWriterVersion != nil +} + func (p *TResultFileSinkOptions) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -1236,10 +1491,8 @@ func (p *TResultFileSinkOptions) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFilePath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -1247,167 +1500,158 @@ func (p *TResultFileSinkOptions) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFileFormat = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.MAP { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.LIST { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.MAP { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.LIST { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I32 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I32 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRING { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.BOOL { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.STRING { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.I32 { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.I64 { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1444,76 +1688,89 @@ RequiredFieldNotSetError: } func (p *TResultFileSinkOptions) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FilePath = v + _field = v } + p.FilePath = _field return nil } - func (p *TResultFileSinkOptions) ReadField2(iprot thrift.TProtocol) error { + + var _field plannodes.TFileFormatType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FileFormat = plannodes.TFileFormatType(v) + _field = plannodes.TFileFormatType(v) } + p.FileFormat = _field return nil } - func (p *TResultFileSinkOptions) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnSeparator = &v + _field = &v } + p.ColumnSeparator = _field return nil } - func (p *TResultFileSinkOptions) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LineDelimiter = &v + _field = &v } + p.LineDelimiter = _field return nil } - func (p *TResultFileSinkOptions) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxFileSizeBytes = &v + _field = &v } + p.MaxFileSizeBytes = _field return nil } - func (p *TResultFileSinkOptions) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size) + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.BrokerAddresses = append(p.BrokerAddresses, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.BrokerAddresses = _field return nil } - func (p *TResultFileSinkOptions) ReadField7(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.BrokerProperties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -1529,29 +1786,31 @@ func (p *TResultFileSinkOptions) ReadField7(iprot thrift.TProtocol) error { _val = v } - p.BrokerProperties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.BrokerProperties = _field return nil } - func (p *TResultFileSinkOptions) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SuccessFileName = &v + _field = &v } + p.SuccessFileName = _field return nil } - func (p *TResultFileSinkOptions) ReadField9(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Schema = make([][]string, 0, size) + _field := make([][]string, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { @@ -1559,6 +1818,7 @@ func (p *TResultFileSinkOptions) ReadField9(iprot thrift.TProtocol) error { } _elem := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem1 string if v, err := iprot.ReadString(); err != nil { return err @@ -1572,20 +1832,20 @@ func (p *TResultFileSinkOptions) ReadField9(iprot thrift.TProtocol) error { return err } - p.Schema = append(p.Schema, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Schema = _field return nil } - func (p *TResultFileSinkOptions) ReadField10(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.FileProperties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -1601,103 +1861,153 @@ func (p *TResultFileSinkOptions) ReadField10(iprot thrift.TProtocol) error { _val = v } - p.FileProperties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.FileProperties = _field return nil } - func (p *TResultFileSinkOptions) ReadField11(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ParquetSchemas = make([]*TParquetSchema, 0, size) + _field := make([]*TParquetSchema, 0, size) + values := make([]TParquetSchema, size) for i := 0; i < size; i++ { - _elem := NewTParquetSchema() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ParquetSchemas = append(p.ParquetSchemas, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ParquetSchemas = _field return nil } - func (p *TResultFileSinkOptions) ReadField12(iprot thrift.TProtocol) error { + + var _field *TParquetCompressionType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TParquetCompressionType(v) - p.ParquetCompressionType = &tmp + _field = &tmp } + p.ParquetCompressionType = _field return nil } - func (p *TResultFileSinkOptions) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ParquetDisableDictionary = &v + _field = &v } + p.ParquetDisableDictionary = _field return nil } - func (p *TResultFileSinkOptions) ReadField14(iprot thrift.TProtocol) error { + + var _field *TParquetVersion if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TParquetVersion(v) - p.ParquetVersion = &tmp + _field = &tmp } + p.ParquetVersion = _field return nil } - func (p *TResultFileSinkOptions) ReadField15(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.OrcSchema = &v + _field = &v } + p.OrcSchema = _field return nil } - func (p *TResultFileSinkOptions) ReadField16(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.DeleteExistingFiles = &v + _field = &v } + p.DeleteExistingFiles = _field return nil } - func (p *TResultFileSinkOptions) ReadField17(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FileSuffix = &v + _field = &v } + p.FileSuffix = _field return nil } +func (p *TResultFileSinkOptions) ReadField18(iprot thrift.TProtocol) error { -func (p *TResultFileSinkOptions) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TResultFileSinkOptions"); err != nil { - goto WriteStructBeginError + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError + p.WithBom = _field + return nil +} +func (p *TResultFileSinkOptions) ReadField19(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileCompressType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileCompressType(v) + _field = &tmp + } + p.OrcCompressionType = _field + return nil +} +func (p *TResultFileSinkOptions) ReadField20(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.OrcWriterVersion = _field + return nil +} + +func (p *TResultFileSinkOptions) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TResultFileSinkOptions"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } if err = p.writeField3(oprot); err != nil { fieldId = 3 @@ -1759,7 +2069,18 @@ func (p *TResultFileSinkOptions) Write(oprot thrift.TProtocol) (err error) { fieldId = 17 goto WriteFieldError } - + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1905,11 +2226,9 @@ func (p *TResultFileSinkOptions) writeField7(oprot thrift.TProtocol) (err error) return err } for k, v := range p.BrokerProperties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -1991,11 +2310,9 @@ func (p *TResultFileSinkOptions) writeField10(oprot thrift.TProtocol) (err error return err } for k, v := range p.FileProperties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -2155,11 +2472,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } +func (p *TResultFileSinkOptions) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetWithBom() { + if err = oprot.WriteFieldBegin("with_bom", thrift.BOOL, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.WithBom); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TResultFileSinkOptions) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetOrcCompressionType() { + if err = oprot.WriteFieldBegin("orc_compression_type", thrift.I32, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.OrcCompressionType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TResultFileSinkOptions) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetOrcWriterVersion() { + if err = oprot.WriteFieldBegin("orc_writer_version", thrift.I64, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.OrcWriterVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + func (p *TResultFileSinkOptions) String() string { if p == nil { return "" } return fmt.Sprintf("TResultFileSinkOptions(%+v)", *p) + } func (p *TResultFileSinkOptions) DeepEqual(ano *TResultFileSinkOptions) bool { @@ -2219,6 +2594,15 @@ func (p *TResultFileSinkOptions) DeepEqual(ano *TResultFileSinkOptions) bool { if !p.Field17DeepEqual(ano.FileSuffix) { return false } + if !p.Field18DeepEqual(ano.WithBom) { + return false + } + if !p.Field19DeepEqual(ano.OrcCompressionType) { + return false + } + if !p.Field20DeepEqual(ano.OrcWriterVersion) { + return false + } return true } @@ -2427,6 +2811,42 @@ func (p *TResultFileSinkOptions) Field17DeepEqual(src *string) bool { } return true } +func (p *TResultFileSinkOptions) Field18DeepEqual(src *bool) bool { + + if p.WithBom == src { + return true + } else if p.WithBom == nil || src == nil { + return false + } + if *p.WithBom != *src { + return false + } + return true +} +func (p *TResultFileSinkOptions) Field19DeepEqual(src *plannodes.TFileCompressType) bool { + + if p.OrcCompressionType == src { + return true + } else if p.OrcCompressionType == nil || src == nil { + return false + } + if *p.OrcCompressionType != *src { + return false + } + return true +} +func (p *TResultFileSinkOptions) Field20DeepEqual(src *int64) bool { + + if p.OrcWriterVersion == src { + return true + } else if p.OrcWriterVersion == nil || src == nil { + return false + } + if *p.OrcWriterVersion != *src { + return false + } + return true +} type TMemoryScratchSink struct { } @@ -2436,7 +2856,6 @@ func NewTMemoryScratchSink() *TMemoryScratchSink { } func (p *TMemoryScratchSink) InitDefault() { - *p = TMemoryScratchSink{} } var fieldIDToName_TMemoryScratchSink = map[int16]string{} @@ -2461,7 +2880,6 @@ func (p *TMemoryScratchSink) Read(iprot thrift.TProtocol) (err error) { if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2489,7 +2907,6 @@ func (p *TMemoryScratchSink) Write(oprot thrift.TProtocol) (err error) { goto WriteStructBeginError } if p != nil { - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2511,6 +2928,7 @@ func (p *TMemoryScratchSink) String() string { return "" } return fmt.Sprintf("TMemoryScratchSink(%+v)", *p) + } func (p *TMemoryScratchSink) DeepEqual(ano *TMemoryScratchSink) bool { @@ -2533,7 +2951,6 @@ func NewTPlanFragmentDestination() *TPlanFragmentDestination { } func (p *TPlanFragmentDestination) InitDefault() { - *p = TPlanFragmentDestination{} } var TPlanFragmentDestination_FragmentInstanceId_DEFAULT *types.TUniqueId @@ -2617,10 +3034,8 @@ func (p *TPlanFragmentDestination) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFragmentInstanceId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -2628,27 +3043,22 @@ func (p *TPlanFragmentDestination) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetServer = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2685,26 +3095,27 @@ RequiredFieldNotSetError: } func (p *TPlanFragmentDestination) ReadField1(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.FragmentInstanceId = _field return nil } - func (p *TPlanFragmentDestination) ReadField2(iprot thrift.TProtocol) error { - p.Server = types.NewTNetworkAddress() - if err := p.Server.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.Server = _field return nil } - func (p *TPlanFragmentDestination) ReadField3(iprot thrift.TProtocol) error { - p.BrpcServer = types.NewTNetworkAddress() - if err := p.BrpcServer.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.BrpcServer = _field return nil } @@ -2726,7 +3137,6 @@ func (p *TPlanFragmentDestination) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2803,6 +3213,7 @@ func (p *TPlanFragmentDestination) String() string { return "" } return fmt.Sprintf("TPlanFragmentDestination(%+v)", *p) + } func (p *TPlanFragmentDestination) DeepEqual(ano *TPlanFragmentDestination) bool { @@ -2846,13 +3257,19 @@ func (p *TPlanFragmentDestination) Field3DeepEqual(src *types.TNetworkAddress) b } type TDataStreamSink struct { - DestNodeId types.TPlanNodeId `thrift:"dest_node_id,1,required" frugal:"1,required,i32" json:"dest_node_id"` - OutputPartition *partitions.TDataPartition `thrift:"output_partition,2,required" frugal:"2,required,partitions.TDataPartition" json:"output_partition"` - IgnoreNotFound *bool `thrift:"ignore_not_found,3,optional" frugal:"3,optional,bool" json:"ignore_not_found,omitempty"` - OutputExprs []*exprs.TExpr `thrift:"output_exprs,4,optional" frugal:"4,optional,list" json:"output_exprs,omitempty"` - OutputTupleId *types.TTupleId `thrift:"output_tuple_id,5,optional" frugal:"5,optional,i32" json:"output_tuple_id,omitempty"` - Conjuncts []*exprs.TExpr `thrift:"conjuncts,6,optional" frugal:"6,optional,list" json:"conjuncts,omitempty"` - RuntimeFilters []*plannodes.TRuntimeFilterDesc `thrift:"runtime_filters,7,optional" frugal:"7,optional,list" json:"runtime_filters,omitempty"` + DestNodeId types.TPlanNodeId `thrift:"dest_node_id,1,required" frugal:"1,required,i32" json:"dest_node_id"` + OutputPartition *partitions.TDataPartition `thrift:"output_partition,2,required" frugal:"2,required,partitions.TDataPartition" json:"output_partition"` + IgnoreNotFound *bool `thrift:"ignore_not_found,3,optional" frugal:"3,optional,bool" json:"ignore_not_found,omitempty"` + OutputExprs []*exprs.TExpr `thrift:"output_exprs,4,optional" frugal:"4,optional,list" json:"output_exprs,omitempty"` + OutputTupleId *types.TTupleId `thrift:"output_tuple_id,5,optional" frugal:"5,optional,i32" json:"output_tuple_id,omitempty"` + Conjuncts []*exprs.TExpr `thrift:"conjuncts,6,optional" frugal:"6,optional,list" json:"conjuncts,omitempty"` + RuntimeFilters []*plannodes.TRuntimeFilterDesc `thrift:"runtime_filters,7,optional" frugal:"7,optional,list" json:"runtime_filters,omitempty"` + TabletSinkSchema *descriptors.TOlapTableSchemaParam `thrift:"tablet_sink_schema,8,optional" frugal:"8,optional,descriptors.TOlapTableSchemaParam" json:"tablet_sink_schema,omitempty"` + TabletSinkPartition *descriptors.TOlapTablePartitionParam `thrift:"tablet_sink_partition,9,optional" frugal:"9,optional,descriptors.TOlapTablePartitionParam" json:"tablet_sink_partition,omitempty"` + TabletSinkLocation *descriptors.TOlapTableLocationParam `thrift:"tablet_sink_location,10,optional" frugal:"10,optional,descriptors.TOlapTableLocationParam" json:"tablet_sink_location,omitempty"` + TabletSinkTxnId *int64 `thrift:"tablet_sink_txn_id,11,optional" frugal:"11,optional,i64" json:"tablet_sink_txn_id,omitempty"` + TabletSinkTupleId *types.TTupleId `thrift:"tablet_sink_tuple_id,12,optional" frugal:"12,optional,i32" json:"tablet_sink_tuple_id,omitempty"` + TabletSinkExprs []*exprs.TExpr `thrift:"tablet_sink_exprs,13,optional" frugal:"13,optional,list" json:"tablet_sink_exprs,omitempty"` } func NewTDataStreamSink() *TDataStreamSink { @@ -2860,7 +3277,6 @@ func NewTDataStreamSink() *TDataStreamSink { } func (p *TDataStreamSink) InitDefault() { - *p = TDataStreamSink{} } func (p *TDataStreamSink) GetDestNodeId() (v types.TPlanNodeId) { @@ -2920,6 +3336,60 @@ func (p *TDataStreamSink) GetRuntimeFilters() (v []*plannodes.TRuntimeFilterDesc } return p.RuntimeFilters } + +var TDataStreamSink_TabletSinkSchema_DEFAULT *descriptors.TOlapTableSchemaParam + +func (p *TDataStreamSink) GetTabletSinkSchema() (v *descriptors.TOlapTableSchemaParam) { + if !p.IsSetTabletSinkSchema() { + return TDataStreamSink_TabletSinkSchema_DEFAULT + } + return p.TabletSinkSchema +} + +var TDataStreamSink_TabletSinkPartition_DEFAULT *descriptors.TOlapTablePartitionParam + +func (p *TDataStreamSink) GetTabletSinkPartition() (v *descriptors.TOlapTablePartitionParam) { + if !p.IsSetTabletSinkPartition() { + return TDataStreamSink_TabletSinkPartition_DEFAULT + } + return p.TabletSinkPartition +} + +var TDataStreamSink_TabletSinkLocation_DEFAULT *descriptors.TOlapTableLocationParam + +func (p *TDataStreamSink) GetTabletSinkLocation() (v *descriptors.TOlapTableLocationParam) { + if !p.IsSetTabletSinkLocation() { + return TDataStreamSink_TabletSinkLocation_DEFAULT + } + return p.TabletSinkLocation +} + +var TDataStreamSink_TabletSinkTxnId_DEFAULT int64 + +func (p *TDataStreamSink) GetTabletSinkTxnId() (v int64) { + if !p.IsSetTabletSinkTxnId() { + return TDataStreamSink_TabletSinkTxnId_DEFAULT + } + return *p.TabletSinkTxnId +} + +var TDataStreamSink_TabletSinkTupleId_DEFAULT types.TTupleId + +func (p *TDataStreamSink) GetTabletSinkTupleId() (v types.TTupleId) { + if !p.IsSetTabletSinkTupleId() { + return TDataStreamSink_TabletSinkTupleId_DEFAULT + } + return *p.TabletSinkTupleId +} + +var TDataStreamSink_TabletSinkExprs_DEFAULT []*exprs.TExpr + +func (p *TDataStreamSink) GetTabletSinkExprs() (v []*exprs.TExpr) { + if !p.IsSetTabletSinkExprs() { + return TDataStreamSink_TabletSinkExprs_DEFAULT + } + return p.TabletSinkExprs +} func (p *TDataStreamSink) SetDestNodeId(val types.TPlanNodeId) { p.DestNodeId = val } @@ -2941,15 +3411,39 @@ func (p *TDataStreamSink) SetConjuncts(val []*exprs.TExpr) { func (p *TDataStreamSink) SetRuntimeFilters(val []*plannodes.TRuntimeFilterDesc) { p.RuntimeFilters = val } +func (p *TDataStreamSink) SetTabletSinkSchema(val *descriptors.TOlapTableSchemaParam) { + p.TabletSinkSchema = val +} +func (p *TDataStreamSink) SetTabletSinkPartition(val *descriptors.TOlapTablePartitionParam) { + p.TabletSinkPartition = val +} +func (p *TDataStreamSink) SetTabletSinkLocation(val *descriptors.TOlapTableLocationParam) { + p.TabletSinkLocation = val +} +func (p *TDataStreamSink) SetTabletSinkTxnId(val *int64) { + p.TabletSinkTxnId = val +} +func (p *TDataStreamSink) SetTabletSinkTupleId(val *types.TTupleId) { + p.TabletSinkTupleId = val +} +func (p *TDataStreamSink) SetTabletSinkExprs(val []*exprs.TExpr) { + p.TabletSinkExprs = val +} var fieldIDToName_TDataStreamSink = map[int16]string{ - 1: "dest_node_id", - 2: "output_partition", - 3: "ignore_not_found", - 4: "output_exprs", - 5: "output_tuple_id", - 6: "conjuncts", - 7: "runtime_filters", + 1: "dest_node_id", + 2: "output_partition", + 3: "ignore_not_found", + 4: "output_exprs", + 5: "output_tuple_id", + 6: "conjuncts", + 7: "runtime_filters", + 8: "tablet_sink_schema", + 9: "tablet_sink_partition", + 10: "tablet_sink_location", + 11: "tablet_sink_txn_id", + 12: "tablet_sink_tuple_id", + 13: "tablet_sink_exprs", } func (p *TDataStreamSink) IsSetOutputPartition() bool { @@ -2976,6 +3470,30 @@ func (p *TDataStreamSink) IsSetRuntimeFilters() bool { return p.RuntimeFilters != nil } +func (p *TDataStreamSink) IsSetTabletSinkSchema() bool { + return p.TabletSinkSchema != nil +} + +func (p *TDataStreamSink) IsSetTabletSinkPartition() bool { + return p.TabletSinkPartition != nil +} + +func (p *TDataStreamSink) IsSetTabletSinkLocation() bool { + return p.TabletSinkLocation != nil +} + +func (p *TDataStreamSink) IsSetTabletSinkTxnId() bool { + return p.TabletSinkTxnId != nil +} + +func (p *TDataStreamSink) IsSetTabletSinkTupleId() bool { + return p.TabletSinkTupleId != nil +} + +func (p *TDataStreamSink) IsSetTabletSinkExprs() bool { + return p.TabletSinkExprs != nil +} + func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -3003,10 +3521,8 @@ func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDestNodeId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -3014,67 +3530,102 @@ func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOutputPartition = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I64 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.LIST { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3111,97 +3662,182 @@ RequiredFieldNotSetError: } func (p *TDataStreamSink) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TPlanNodeId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DestNodeId = v + _field = v } + p.DestNodeId = _field return nil } - func (p *TDataStreamSink) ReadField2(iprot thrift.TProtocol) error { - p.OutputPartition = partitions.NewTDataPartition() - if err := p.OutputPartition.Read(iprot); err != nil { + _field := partitions.NewTDataPartition() + if err := _field.Read(iprot); err != nil { return err } + p.OutputPartition = _field return nil } - func (p *TDataStreamSink) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IgnoreNotFound = &v + _field = &v } + p.IgnoreNotFound = _field return nil } - func (p *TDataStreamSink) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OutputExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.OutputExprs = append(p.OutputExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OutputExprs = _field return nil } - func (p *TDataStreamSink) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = &v + _field = &v } + p.OutputTupleId = _field return nil } - func (p *TDataStreamSink) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Conjuncts = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Conjuncts = append(p.Conjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Conjuncts = _field return nil } - func (p *TDataStreamSink) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RuntimeFilters = make([]*plannodes.TRuntimeFilterDesc, 0, size) + _field := make([]*plannodes.TRuntimeFilterDesc, 0, size) + values := make([]plannodes.TRuntimeFilterDesc, size) for i := 0; i < size; i++ { - _elem := plannodes.NewTRuntimeFilterDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.RuntimeFilters = append(p.RuntimeFilters, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RuntimeFilters = _field + return nil +} +func (p *TDataStreamSink) ReadField8(iprot thrift.TProtocol) error { + _field := descriptors.NewTOlapTableSchemaParam() + if err := _field.Read(iprot); err != nil { + return err + } + p.TabletSinkSchema = _field + return nil +} +func (p *TDataStreamSink) ReadField9(iprot thrift.TProtocol) error { + _field := descriptors.NewTOlapTablePartitionParam() + if err := _field.Read(iprot); err != nil { + return err + } + p.TabletSinkPartition = _field + return nil +} +func (p *TDataStreamSink) ReadField10(iprot thrift.TProtocol) error { + _field := descriptors.NewTOlapTableLocationParam() + if err := _field.Read(iprot); err != nil { + return err + } + p.TabletSinkLocation = _field + return nil +} +func (p *TDataStreamSink) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TabletSinkTxnId = _field + return nil +} +func (p *TDataStreamSink) ReadField12(iprot thrift.TProtocol) error { + + var _field *types.TTupleId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.TabletSinkTupleId = _field + return nil +} +func (p *TDataStreamSink) ReadField13(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TabletSinkExprs = _field return nil } @@ -3239,7 +3875,30 @@ func (p *TDataStreamSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3411,11 +4070,134 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } +func (p *TDataStreamSink) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkSchema() { + if err = oprot.WriteFieldBegin("tablet_sink_schema", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.TabletSinkSchema.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TDataStreamSink) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkPartition() { + if err = oprot.WriteFieldBegin("tablet_sink_partition", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.TabletSinkPartition.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TDataStreamSink) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkLocation() { + if err = oprot.WriteFieldBegin("tablet_sink_location", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.TabletSinkLocation.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TDataStreamSink) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkTxnId() { + if err = oprot.WriteFieldBegin("tablet_sink_txn_id", thrift.I64, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TabletSinkTxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TDataStreamSink) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkTupleId() { + if err = oprot.WriteFieldBegin("tablet_sink_tuple_id", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.TabletSinkTupleId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TDataStreamSink) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkExprs() { + if err = oprot.WriteFieldBegin("tablet_sink_exprs", thrift.LIST, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TabletSinkExprs)); err != nil { + return err + } + for _, v := range p.TabletSinkExprs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + func (p *TDataStreamSink) String() string { if p == nil { return "" } return fmt.Sprintf("TDataStreamSink(%+v)", *p) + } func (p *TDataStreamSink) DeepEqual(ano *TDataStreamSink) bool { @@ -3445,6 +4227,24 @@ func (p *TDataStreamSink) DeepEqual(ano *TDataStreamSink) bool { if !p.Field7DeepEqual(ano.RuntimeFilters) { return false } + if !p.Field8DeepEqual(ano.TabletSinkSchema) { + return false + } + if !p.Field9DeepEqual(ano.TabletSinkPartition) { + return false + } + if !p.Field10DeepEqual(ano.TabletSinkLocation) { + return false + } + if !p.Field11DeepEqual(ano.TabletSinkTxnId) { + return false + } + if !p.Field12DeepEqual(ano.TabletSinkTupleId) { + return false + } + if !p.Field13DeepEqual(ano.TabletSinkExprs) { + return false + } return true } @@ -3525,6 +4325,64 @@ func (p *TDataStreamSink) Field7DeepEqual(src []*plannodes.TRuntimeFilterDesc) b } return true } +func (p *TDataStreamSink) Field8DeepEqual(src *descriptors.TOlapTableSchemaParam) bool { + + if !p.TabletSinkSchema.DeepEqual(src) { + return false + } + return true +} +func (p *TDataStreamSink) Field9DeepEqual(src *descriptors.TOlapTablePartitionParam) bool { + + if !p.TabletSinkPartition.DeepEqual(src) { + return false + } + return true +} +func (p *TDataStreamSink) Field10DeepEqual(src *descriptors.TOlapTableLocationParam) bool { + + if !p.TabletSinkLocation.DeepEqual(src) { + return false + } + return true +} +func (p *TDataStreamSink) Field11DeepEqual(src *int64) bool { + + if p.TabletSinkTxnId == src { + return true + } else if p.TabletSinkTxnId == nil || src == nil { + return false + } + if *p.TabletSinkTxnId != *src { + return false + } + return true +} +func (p *TDataStreamSink) Field12DeepEqual(src *types.TTupleId) bool { + + if p.TabletSinkTupleId == src { + return true + } else if p.TabletSinkTupleId == nil || src == nil { + return false + } + if *p.TabletSinkTupleId != *src { + return false + } + return true +} +func (p *TDataStreamSink) Field13DeepEqual(src []*exprs.TExpr) bool { + + if len(p.TabletSinkExprs) != len(src) { + return false + } + for i, v := range p.TabletSinkExprs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} type TMultiCastDataStreamSink struct { Sinks []*TDataStreamSink `thrift:"sinks,1,optional" frugal:"1,optional,list" json:"sinks,omitempty"` @@ -3536,7 +4394,6 @@ func NewTMultiCastDataStreamSink() *TMultiCastDataStreamSink { } func (p *TMultiCastDataStreamSink) InitDefault() { - *p = TMultiCastDataStreamSink{} } var TMultiCastDataStreamSink_Sinks_DEFAULT []*TDataStreamSink @@ -3600,27 +4457,22 @@ func (p *TMultiCastDataStreamSink) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3650,35 +4502,41 @@ func (p *TMultiCastDataStreamSink) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Sinks = make([]*TDataStreamSink, 0, size) + _field := make([]*TDataStreamSink, 0, size) + values := make([]TDataStreamSink, size) for i := 0; i < size; i++ { - _elem := NewTDataStreamSink() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Sinks = append(p.Sinks, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Sinks = _field return nil } - func (p *TMultiCastDataStreamSink) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Destinations = make([][]*TPlanFragmentDestination, 0, size) + _field := make([][]*TPlanFragmentDestination, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*TPlanFragmentDestination, 0, size) + values := make([]TPlanFragmentDestination, size) for i := 0; i < size; i++ { - _elem1 := NewTPlanFragmentDestination() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -3689,11 +4547,12 @@ func (p *TMultiCastDataStreamSink) ReadField2(iprot thrift.TProtocol) error { return err } - p.Destinations = append(p.Destinations, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Destinations = _field return nil } @@ -3711,7 +4570,6 @@ func (p *TMultiCastDataStreamSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3797,6 +4655,7 @@ func (p *TMultiCastDataStreamSink) String() string { return "" } return fmt.Sprintf("TMultiCastDataStreamSink(%+v)", *p) + } func (p *TMultiCastDataStreamSink) DeepEqual(ano *TMultiCastDataStreamSink) bool { @@ -3859,7 +4718,6 @@ func NewTFetchOption() *TFetchOption { } func (p *TFetchOption) InitDefault() { - *p = TFetchOption{} } var TFetchOption_UseTwoPhaseFetch_DEFAULT bool @@ -3957,47 +4815,38 @@ func (p *TFetchOption) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4023,48 +4872,56 @@ ReadStructEndError: } func (p *TFetchOption) ReadField1(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTwoPhaseFetch = &v + _field = &v } + p.UseTwoPhaseFetch = _field return nil } - func (p *TFetchOption) ReadField2(iprot thrift.TProtocol) error { - p.NodesInfo = descriptors.NewTPaloNodesInfo() - if err := p.NodesInfo.Read(iprot); err != nil { + _field := descriptors.NewTPaloNodesInfo() + if err := _field.Read(iprot); err != nil { return err } + p.NodesInfo = _field return nil } - func (p *TFetchOption) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.FetchRowStore = &v + _field = &v } + p.FetchRowStore = _field return nil } - func (p *TFetchOption) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnDesc = make([]*descriptors.TColumn, 0, size) + _field := make([]*descriptors.TColumn, 0, size) + values := make([]descriptors.TColumn, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnDesc = append(p.ColumnDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnDesc = _field return nil } @@ -4090,7 +4947,6 @@ func (p *TFetchOption) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4198,6 +5054,7 @@ func (p *TFetchOption) String() string { return "" } return fmt.Sprintf("TFetchOption(%+v)", *p) + } func (p *TFetchOption) DeepEqual(ano *TFetchOption) bool { @@ -4277,7 +5134,6 @@ func NewTResultSink() *TResultSink { } func (p *TResultSink) InitDefault() { - *p = TResultSink{} } var TResultSink_Type_DEFAULT TResultSinkType @@ -4358,37 +5214,30 @@ func (p *TResultSink) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4414,28 +5263,31 @@ ReadStructEndError: } func (p *TResultSink) ReadField1(iprot thrift.TProtocol) error { + + var _field *TResultSinkType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TResultSinkType(v) - p.Type = &tmp + _field = &tmp } + p.Type = _field return nil } - func (p *TResultSink) ReadField2(iprot thrift.TProtocol) error { - p.FileOptions = NewTResultFileSinkOptions() - if err := p.FileOptions.Read(iprot); err != nil { + _field := NewTResultFileSinkOptions() + if err := _field.Read(iprot); err != nil { return err } + p.FileOptions = _field return nil } - func (p *TResultSink) ReadField3(iprot thrift.TProtocol) error { - p.FetchOption = NewTFetchOption() - if err := p.FetchOption.Read(iprot); err != nil { + _field := NewTFetchOption() + if err := _field.Read(iprot); err != nil { return err } + p.FetchOption = _field return nil } @@ -4457,7 +5309,6 @@ func (p *TResultSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4538,6 +5389,7 @@ func (p *TResultSink) String() string { return "" } return fmt.Sprintf("TResultSink(%+v)", *p) + } func (p *TResultSink) DeepEqual(ano *TResultSink) bool { @@ -4599,7 +5451,6 @@ func NewTResultFileSink() *TResultFileSink { } func (p *TResultFileSink) InitDefault() { - *p = TResultFileSink{} } var TResultFileSink_FileOptions_DEFAULT *TResultFileSinkOptions @@ -4731,67 +5582,54 @@ func (p *TResultFileSink) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4817,56 +5655,67 @@ ReadStructEndError: } func (p *TResultFileSink) ReadField1(iprot thrift.TProtocol) error { - p.FileOptions = NewTResultFileSinkOptions() - if err := p.FileOptions.Read(iprot); err != nil { + _field := NewTResultFileSinkOptions() + if err := _field.Read(iprot); err != nil { return err } + p.FileOptions = _field return nil } - func (p *TResultFileSink) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TStorageBackendType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TStorageBackendType(v) - p.StorageBackendType = &tmp + _field = &tmp } + p.StorageBackendType = _field return nil } - func (p *TResultFileSink) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TPlanNodeId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DestNodeId = &v + _field = &v } + p.DestNodeId = _field return nil } - func (p *TResultFileSink) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = &v + _field = &v } + p.OutputTupleId = _field return nil } - func (p *TResultFileSink) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Header = &v + _field = &v } + p.Header = _field return nil } - func (p *TResultFileSink) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HeaderType = &v + _field = &v } + p.HeaderType = _field return nil } @@ -4900,7 +5749,6 @@ func (p *TResultFileSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5038,6 +5886,7 @@ func (p *TResultFileSink) String() string { return "" } return fmt.Sprintf("TResultFileSink(%+v)", *p) + } func (p *TResultFileSink) DeepEqual(ano *TResultFileSink) bool { @@ -5150,7 +5999,6 @@ func NewTMysqlTableSink() *TMysqlTableSink { } func (p *TMysqlTableSink) InitDefault() { - *p = TMysqlTableSink{} } func (p *TMysqlTableSink) GetHost() (v string) { @@ -5244,10 +6092,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHost = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -5255,10 +6101,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -5266,10 +6110,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -5277,10 +6119,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { @@ -5288,10 +6128,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { @@ -5299,10 +6137,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { @@ -5310,17 +6146,14 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCharset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5382,65 +6215,80 @@ RequiredFieldNotSetError: } func (p *TMysqlTableSink) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = v + _field = v } + p.Host = _field return nil } - func (p *TMysqlTableSink) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Port = v + _field = v } + p.Port = _field return nil } - func (p *TMysqlTableSink) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TMysqlTableSink) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = v } + p.Passwd = _field return nil } - func (p *TMysqlTableSink) ReadField5(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = v } + p.Db = _field return nil } - func (p *TMysqlTableSink) ReadField6(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = v + _field = v } + p.Table = _field return nil } - func (p *TMysqlTableSink) ReadField7(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Charset = v + _field = v } + p.Charset = _field return nil } @@ -5478,7 +6326,6 @@ func (p *TMysqlTableSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5621,6 +6468,7 @@ func (p *TMysqlTableSink) String() string { return "" } return fmt.Sprintf("TMysqlTableSink(%+v)", *p) + } func (p *TMysqlTableSink) DeepEqual(ano *TMysqlTableSink) bool { @@ -5714,7 +6562,6 @@ func NewTOdbcTableSink() *TOdbcTableSink { } func (p *TOdbcTableSink) InitDefault() { - *p = TOdbcTableSink{} } var TOdbcTableSink_ConnectString_DEFAULT string @@ -5795,37 +6642,30 @@ func (p *TOdbcTableSink) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5851,29 +6691,36 @@ ReadStructEndError: } func (p *TOdbcTableSink) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ConnectString = &v + _field = &v } + p.ConnectString = _field return nil } - func (p *TOdbcTableSink) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = &v + _field = &v } + p.Table = _field return nil } - func (p *TOdbcTableSink) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTransaction = &v + _field = &v } + p.UseTransaction = _field return nil } @@ -5895,7 +6742,6 @@ func (p *TOdbcTableSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5976,6 +6822,7 @@ func (p *TOdbcTableSink) String() string { return "" } return fmt.Sprintf("TOdbcTableSink(%+v)", *p) + } func (p *TOdbcTableSink) DeepEqual(ano *TOdbcTableSink) bool { @@ -6045,7 +6892,6 @@ func NewTJdbcTableSink() *TJdbcTableSink { } func (p *TJdbcTableSink) InitDefault() { - *p = TJdbcTableSink{} } var TJdbcTableSink_JdbcTable_DEFAULT *descriptors.TJdbcTable @@ -6143,47 +6989,38 @@ func (p *TJdbcTableSink) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6209,38 +7046,45 @@ ReadStructEndError: } func (p *TJdbcTableSink) ReadField1(iprot thrift.TProtocol) error { - p.JdbcTable = descriptors.NewTJdbcTable() - if err := p.JdbcTable.Read(iprot); err != nil { + _field := descriptors.NewTJdbcTable() + if err := _field.Read(iprot); err != nil { return err } + p.JdbcTable = _field return nil } - func (p *TJdbcTableSink) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTransaction = &v + _field = &v } + p.UseTransaction = _field return nil } - func (p *TJdbcTableSink) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TOdbcTableType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TOdbcTableType(v) - p.TableType = &tmp + _field = &tmp } + p.TableType = _field return nil } - func (p *TJdbcTableSink) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.InsertSql = &v + _field = &v } + p.InsertSql = _field return nil } @@ -6266,7 +7110,6 @@ func (p *TJdbcTableSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6366,6 +7209,7 @@ func (p *TJdbcTableSink) String() string { return "" } return fmt.Sprintf("TJdbcTableSink(%+v)", *p) + } func (p *TJdbcTableSink) DeepEqual(ano *TJdbcTableSink) bool { @@ -6448,7 +7292,6 @@ func NewTExportSink() *TExportSink { } func (p *TExportSink) InitDefault() { - *p = TExportSink{} } func (p *TExportSink) GetFileType() (v types.TFileType) { @@ -6566,10 +7409,8 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFileType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -6577,10 +7418,8 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetExportPath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -6588,10 +7427,8 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnSeparator = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -6599,47 +7436,38 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLineDelimiter = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.MAP { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6686,67 +7514,78 @@ RequiredFieldNotSetError: } func (p *TExportSink) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TFileType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FileType = types.TFileType(v) + _field = types.TFileType(v) } + p.FileType = _field return nil } - func (p *TExportSink) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ExportPath = v + _field = v } + p.ExportPath = _field return nil } - func (p *TExportSink) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnSeparator = v + _field = v } + p.ColumnSeparator = _field return nil } - func (p *TExportSink) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LineDelimiter = v + _field = v } + p.LineDelimiter = _field return nil } - func (p *TExportSink) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size) + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.BrokerAddresses = append(p.BrokerAddresses, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.BrokerAddresses = _field return nil } - func (p *TExportSink) ReadField6(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -6762,20 +7601,23 @@ func (p *TExportSink) ReadField6(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } - func (p *TExportSink) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Header = &v + _field = &v } + p.Header = _field return nil } @@ -6813,7 +7655,6 @@ func (p *TExportSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6936,11 +7777,9 @@ func (p *TExportSink) writeField6(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -6983,6 +7822,7 @@ func (p *TExportSink) String() string { return "" } return fmt.Sprintf("TExportSink(%+v)", *p) + } func (p *TExportSink) DeepEqual(ano *TExportSink) bool { @@ -7103,6 +7943,10 @@ type TOlapTableSink struct { SlaveLocation *descriptors.TOlapTableLocationParam `thrift:"slave_location,18,optional" frugal:"18,optional,descriptors.TOlapTableLocationParam" json:"slave_location,omitempty"` TxnTimeoutS *int64 `thrift:"txn_timeout_s,19,optional" frugal:"19,optional,i64" json:"txn_timeout_s,omitempty"` WriteFileCache *bool `thrift:"write_file_cache,20,optional" frugal:"20,optional,bool" json:"write_file_cache,omitempty"` + BaseSchemaVersion *int64 `thrift:"base_schema_version,21,optional" frugal:"21,optional,i64" json:"base_schema_version,omitempty"` + GroupCommitMode *TGroupCommitMode `thrift:"group_commit_mode,22,optional" frugal:"22,optional,TGroupCommitMode" json:"group_commit_mode,omitempty"` + MaxFilterRatio *float64 `thrift:"max_filter_ratio,23,optional" frugal:"23,optional,double" json:"max_filter_ratio,omitempty"` + StorageVaultId *string `thrift:"storage_vault_id,24,optional" frugal:"24,optional,string" json:"storage_vault_id,omitempty"` } func NewTOlapTableSink() *TOlapTableSink { @@ -7110,7 +7954,6 @@ func NewTOlapTableSink() *TOlapTableSink { } func (p *TOlapTableSink) InitDefault() { - *p = TOlapTableSink{} } var TOlapTableSink_LoadId_DEFAULT *types.TUniqueId @@ -7262,6 +8105,42 @@ func (p *TOlapTableSink) GetWriteFileCache() (v bool) { } return *p.WriteFileCache } + +var TOlapTableSink_BaseSchemaVersion_DEFAULT int64 + +func (p *TOlapTableSink) GetBaseSchemaVersion() (v int64) { + if !p.IsSetBaseSchemaVersion() { + return TOlapTableSink_BaseSchemaVersion_DEFAULT + } + return *p.BaseSchemaVersion +} + +var TOlapTableSink_GroupCommitMode_DEFAULT TGroupCommitMode + +func (p *TOlapTableSink) GetGroupCommitMode() (v TGroupCommitMode) { + if !p.IsSetGroupCommitMode() { + return TOlapTableSink_GroupCommitMode_DEFAULT + } + return *p.GroupCommitMode +} + +var TOlapTableSink_MaxFilterRatio_DEFAULT float64 + +func (p *TOlapTableSink) GetMaxFilterRatio() (v float64) { + if !p.IsSetMaxFilterRatio() { + return TOlapTableSink_MaxFilterRatio_DEFAULT + } + return *p.MaxFilterRatio +} + +var TOlapTableSink_StorageVaultId_DEFAULT string + +func (p *TOlapTableSink) GetStorageVaultId() (v string) { + if !p.IsSetStorageVaultId() { + return TOlapTableSink_StorageVaultId_DEFAULT + } + return *p.StorageVaultId +} func (p *TOlapTableSink) SetLoadId(val *types.TUniqueId) { p.LoadId = val } @@ -7322,6 +8201,18 @@ func (p *TOlapTableSink) SetTxnTimeoutS(val *int64) { func (p *TOlapTableSink) SetWriteFileCache(val *bool) { p.WriteFileCache = val } +func (p *TOlapTableSink) SetBaseSchemaVersion(val *int64) { + p.BaseSchemaVersion = val +} +func (p *TOlapTableSink) SetGroupCommitMode(val *TGroupCommitMode) { + p.GroupCommitMode = val +} +func (p *TOlapTableSink) SetMaxFilterRatio(val *float64) { + p.MaxFilterRatio = val +} +func (p *TOlapTableSink) SetStorageVaultId(val *string) { + p.StorageVaultId = val +} var fieldIDToName_TOlapTableSink = map[int16]string{ 1: "load_id", @@ -7344,6 +8235,10 @@ var fieldIDToName_TOlapTableSink = map[int16]string{ 18: "slave_location", 19: "txn_timeout_s", 20: "write_file_cache", + 21: "base_schema_version", + 22: "group_commit_mode", + 23: "max_filter_ratio", + 24: "storage_vault_id", } func (p *TOlapTableSink) IsSetLoadId() bool { @@ -7402,6 +8297,22 @@ func (p *TOlapTableSink) IsSetWriteFileCache() bool { return p.WriteFileCache != nil } +func (p *TOlapTableSink) IsSetBaseSchemaVersion() bool { + return p.BaseSchemaVersion != nil +} + +func (p *TOlapTableSink) IsSetGroupCommitMode() bool { + return p.GroupCommitMode != nil +} + +func (p *TOlapTableSink) IsSetMaxFilterRatio() bool { + return p.MaxFilterRatio != nil +} + +func (p *TOlapTableSink) IsSetStorageVaultId() bool { + return p.StorageVaultId != nil +} + func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -7438,10 +8349,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLoadId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -7449,10 +8358,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTxnId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -7460,10 +8367,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -7471,10 +8376,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { @@ -7482,10 +8385,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { @@ -7493,10 +8394,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumReplicas = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { @@ -7504,30 +8403,24 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNeedGenRollup = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { @@ -7535,10 +8428,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchema = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { @@ -7546,10 +8437,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPartition = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { @@ -7557,10 +8446,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLocation = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRUCT { @@ -7568,91 +8455,106 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodesInfo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I64 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.I32 { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.BOOL { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.BOOL { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.STRUCT { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.I64 { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.BOOL { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.I64 { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - default: - if err = iprot.Skip(fieldTypeId); err != nil { + case 22: + if fieldTypeId == thrift.I32 { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } + case 23: + if fieldTypeId == thrift.DOUBLE { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 24: + if fieldTypeId == thrift.STRING { + if err = p.ReadField24(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } if err = iprot.ReadStructEnd(); err != nil { goto ReadStructEndError } @@ -7730,176 +8632,250 @@ RequiredFieldNotSetError: } func (p *TOlapTableSink) ReadField1(iprot thrift.TProtocol) error { - p.LoadId = types.NewTUniqueId() - if err := p.LoadId.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.LoadId = _field return nil } - func (p *TOlapTableSink) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnId = v + _field = v } + p.TxnId = _field return nil } - func (p *TOlapTableSink) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DbId = v + _field = v } + p.DbId = _field return nil } - func (p *TOlapTableSink) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = v + _field = v } + p.TableId = _field return nil } - func (p *TOlapTableSink) ReadField5(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TOlapTableSink) ReadField6(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumReplicas = v + _field = v } + p.NumReplicas = _field return nil } - func (p *TOlapTableSink) ReadField7(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NeedGenRollup = v + _field = v } + p.NeedGenRollup = _field return nil } - func (p *TOlapTableSink) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = &v + _field = &v } + p.DbName = _field return nil } - func (p *TOlapTableSink) ReadField9(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } - func (p *TOlapTableSink) ReadField10(iprot thrift.TProtocol) error { - p.Schema = descriptors.NewTOlapTableSchemaParam() - if err := p.Schema.Read(iprot); err != nil { + _field := descriptors.NewTOlapTableSchemaParam() + if err := _field.Read(iprot); err != nil { return err } + p.Schema = _field return nil } - func (p *TOlapTableSink) ReadField11(iprot thrift.TProtocol) error { - p.Partition = descriptors.NewTOlapTablePartitionParam() - if err := p.Partition.Read(iprot); err != nil { + _field := descriptors.NewTOlapTablePartitionParam() + if err := _field.Read(iprot); err != nil { return err } + p.Partition = _field return nil } - func (p *TOlapTableSink) ReadField12(iprot thrift.TProtocol) error { - p.Location = descriptors.NewTOlapTableLocationParam() - if err := p.Location.Read(iprot); err != nil { + _field := descriptors.NewTOlapTableLocationParam() + if err := _field.Read(iprot); err != nil { return err } + p.Location = _field return nil } - func (p *TOlapTableSink) ReadField13(iprot thrift.TProtocol) error { - p.NodesInfo = descriptors.NewTPaloNodesInfo() - if err := p.NodesInfo.Read(iprot); err != nil { + _field := descriptors.NewTPaloNodesInfo() + if err := _field.Read(iprot); err != nil { return err } + p.NodesInfo = _field return nil } - func (p *TOlapTableSink) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LoadChannelTimeoutS = &v + _field = &v } + p.LoadChannelTimeoutS = _field return nil } - func (p *TOlapTableSink) ReadField15(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SendBatchParallelism = &v + _field = &v } + p.SendBatchParallelism = _field return nil } - func (p *TOlapTableSink) ReadField16(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.LoadToSingleTablet = &v + _field = &v } + p.LoadToSingleTablet = _field return nil } - func (p *TOlapTableSink) ReadField17(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.WriteSingleReplica = &v + _field = &v } + p.WriteSingleReplica = _field return nil } - func (p *TOlapTableSink) ReadField18(iprot thrift.TProtocol) error { - p.SlaveLocation = descriptors.NewTOlapTableLocationParam() - if err := p.SlaveLocation.Read(iprot); err != nil { + _field := descriptors.NewTOlapTableLocationParam() + if err := _field.Read(iprot); err != nil { return err } + p.SlaveLocation = _field return nil } - func (p *TOlapTableSink) ReadField19(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnTimeoutS = &v + _field = &v } + p.TxnTimeoutS = _field return nil } - func (p *TOlapTableSink) ReadField20(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.WriteFileCache = &v + _field = &v + } + p.WriteFileCache = _field + return nil +} +func (p *TOlapTableSink) ReadField21(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BaseSchemaVersion = _field + return nil +} +func (p *TOlapTableSink) ReadField22(iprot thrift.TProtocol) error { + + var _field *TGroupCommitMode + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TGroupCommitMode(v) + _field = &tmp + } + p.GroupCommitMode = _field + return nil +} +func (p *TOlapTableSink) ReadField23(iprot thrift.TProtocol) error { + + var _field *float64 + if v, err := iprot.ReadDouble(); err != nil { + return err + } else { + _field = &v + } + p.MaxFilterRatio = _field + return nil +} +func (p *TOlapTableSink) ReadField24(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } + p.StorageVaultId = _field return nil } @@ -7989,7 +8965,22 @@ func (p *TOlapTableSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 20 goto WriteFieldError } - + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8366,11 +9357,88 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } +func (p *TOlapTableSink) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseSchemaVersion() { + if err = oprot.WriteFieldBegin("base_schema_version", thrift.I64, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BaseSchemaVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) +} + +func (p *TOlapTableSink) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitMode() { + if err = oprot.WriteFieldBegin("group_commit_mode", thrift.I32, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.GroupCommitMode)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +} + +func (p *TOlapTableSink) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxFilterRatio() { + if err = oprot.WriteFieldBegin("max_filter_ratio", thrift.DOUBLE, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteDouble(*p.MaxFilterRatio); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +} + +func (p *TOlapTableSink) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetStorageVaultId() { + if err = oprot.WriteFieldBegin("storage_vault_id", thrift.STRING, 24); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.StorageVaultId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) +} + func (p *TOlapTableSink) String() string { if p == nil { return "" } return fmt.Sprintf("TOlapTableSink(%+v)", *p) + } func (p *TOlapTableSink) DeepEqual(ano *TOlapTableSink) bool { @@ -8439,6 +9507,18 @@ func (p *TOlapTableSink) DeepEqual(ano *TOlapTableSink) bool { if !p.Field20DeepEqual(ano.WriteFileCache) { return false } + if !p.Field21DeepEqual(ano.BaseSchemaVersion) { + return false + } + if !p.Field22DeepEqual(ano.GroupCommitMode) { + return false + } + if !p.Field23DeepEqual(ano.MaxFilterRatio) { + return false + } + if !p.Field24DeepEqual(ano.StorageVaultId) { + return false + } return true } @@ -8622,122 +9702,6783 @@ func (p *TOlapTableSink) Field20DeepEqual(src *bool) bool { } return true } +func (p *TOlapTableSink) Field21DeepEqual(src *int64) bool { -type TDataSink struct { - Type TDataSinkType `thrift:"type,1,required" frugal:"1,required,TDataSinkType" json:"type"` - StreamSink *TDataStreamSink `thrift:"stream_sink,2,optional" frugal:"2,optional,TDataStreamSink" json:"stream_sink,omitempty"` - ResultSink *TResultSink `thrift:"result_sink,3,optional" frugal:"3,optional,TResultSink" json:"result_sink,omitempty"` - MysqlTableSink *TMysqlTableSink `thrift:"mysql_table_sink,5,optional" frugal:"5,optional,TMysqlTableSink" json:"mysql_table_sink,omitempty"` - ExportSink *TExportSink `thrift:"export_sink,6,optional" frugal:"6,optional,TExportSink" json:"export_sink,omitempty"` - OlapTableSink *TOlapTableSink `thrift:"olap_table_sink,7,optional" frugal:"7,optional,TOlapTableSink" json:"olap_table_sink,omitempty"` - MemoryScratchSink *TMemoryScratchSink `thrift:"memory_scratch_sink,8,optional" frugal:"8,optional,TMemoryScratchSink" json:"memory_scratch_sink,omitempty"` - OdbcTableSink *TOdbcTableSink `thrift:"odbc_table_sink,9,optional" frugal:"9,optional,TOdbcTableSink" json:"odbc_table_sink,omitempty"` - ResultFileSink *TResultFileSink `thrift:"result_file_sink,10,optional" frugal:"10,optional,TResultFileSink" json:"result_file_sink,omitempty"` - JdbcTableSink *TJdbcTableSink `thrift:"jdbc_table_sink,11,optional" frugal:"11,optional,TJdbcTableSink" json:"jdbc_table_sink,omitempty"` - MultiCastStreamSink *TMultiCastDataStreamSink `thrift:"multi_cast_stream_sink,12,optional" frugal:"12,optional,TMultiCastDataStreamSink" json:"multi_cast_stream_sink,omitempty"` + if p.BaseSchemaVersion == src { + return true + } else if p.BaseSchemaVersion == nil || src == nil { + return false + } + if *p.BaseSchemaVersion != *src { + return false + } + return true } +func (p *TOlapTableSink) Field22DeepEqual(src *TGroupCommitMode) bool { -func NewTDataSink() *TDataSink { - return &TDataSink{} + if p.GroupCommitMode == src { + return true + } else if p.GroupCommitMode == nil || src == nil { + return false + } + if *p.GroupCommitMode != *src { + return false + } + return true } +func (p *TOlapTableSink) Field23DeepEqual(src *float64) bool { -func (p *TDataSink) InitDefault() { - *p = TDataSink{} + if p.MaxFilterRatio == src { + return true + } else if p.MaxFilterRatio == nil || src == nil { + return false + } + if *p.MaxFilterRatio != *src { + return false + } + return true } +func (p *TOlapTableSink) Field24DeepEqual(src *string) bool { -func (p *TDataSink) GetType() (v TDataSinkType) { - return p.Type + if p.StorageVaultId == src { + return true + } else if p.StorageVaultId == nil || src == nil { + return false + } + if strings.Compare(*p.StorageVaultId, *src) != 0 { + return false + } + return true } -var TDataSink_StreamSink_DEFAULT *TDataStreamSink - -func (p *TDataSink) GetStreamSink() (v *TDataStreamSink) { - if !p.IsSetStreamSink() { - return TDataSink_StreamSink_DEFAULT - } - return p.StreamSink +type THiveLocationParams struct { + WritePath *string `thrift:"write_path,1,optional" frugal:"1,optional,string" json:"write_path,omitempty"` + TargetPath *string `thrift:"target_path,2,optional" frugal:"2,optional,string" json:"target_path,omitempty"` + FileType *types.TFileType `thrift:"file_type,3,optional" frugal:"3,optional,TFileType" json:"file_type,omitempty"` + OriginalWritePath *string `thrift:"original_write_path,4,optional" frugal:"4,optional,string" json:"original_write_path,omitempty"` } -var TDataSink_ResultSink_DEFAULT *TResultSink +func NewTHiveLocationParams() *THiveLocationParams { + return &THiveLocationParams{} +} -func (p *TDataSink) GetResultSink() (v *TResultSink) { - if !p.IsSetResultSink() { - return TDataSink_ResultSink_DEFAULT - } - return p.ResultSink +func (p *THiveLocationParams) InitDefault() { } -var TDataSink_MysqlTableSink_DEFAULT *TMysqlTableSink +var THiveLocationParams_WritePath_DEFAULT string -func (p *TDataSink) GetMysqlTableSink() (v *TMysqlTableSink) { - if !p.IsSetMysqlTableSink() { - return TDataSink_MysqlTableSink_DEFAULT +func (p *THiveLocationParams) GetWritePath() (v string) { + if !p.IsSetWritePath() { + return THiveLocationParams_WritePath_DEFAULT } - return p.MysqlTableSink + return *p.WritePath } -var TDataSink_ExportSink_DEFAULT *TExportSink +var THiveLocationParams_TargetPath_DEFAULT string -func (p *TDataSink) GetExportSink() (v *TExportSink) { - if !p.IsSetExportSink() { - return TDataSink_ExportSink_DEFAULT +func (p *THiveLocationParams) GetTargetPath() (v string) { + if !p.IsSetTargetPath() { + return THiveLocationParams_TargetPath_DEFAULT } - return p.ExportSink + return *p.TargetPath } -var TDataSink_OlapTableSink_DEFAULT *TOlapTableSink +var THiveLocationParams_FileType_DEFAULT types.TFileType -func (p *TDataSink) GetOlapTableSink() (v *TOlapTableSink) { - if !p.IsSetOlapTableSink() { - return TDataSink_OlapTableSink_DEFAULT +func (p *THiveLocationParams) GetFileType() (v types.TFileType) { + if !p.IsSetFileType() { + return THiveLocationParams_FileType_DEFAULT } - return p.OlapTableSink + return *p.FileType } -var TDataSink_MemoryScratchSink_DEFAULT *TMemoryScratchSink +var THiveLocationParams_OriginalWritePath_DEFAULT string -func (p *TDataSink) GetMemoryScratchSink() (v *TMemoryScratchSink) { - if !p.IsSetMemoryScratchSink() { - return TDataSink_MemoryScratchSink_DEFAULT +func (p *THiveLocationParams) GetOriginalWritePath() (v string) { + if !p.IsSetOriginalWritePath() { + return THiveLocationParams_OriginalWritePath_DEFAULT } - return p.MemoryScratchSink + return *p.OriginalWritePath +} +func (p *THiveLocationParams) SetWritePath(val *string) { + p.WritePath = val +} +func (p *THiveLocationParams) SetTargetPath(val *string) { + p.TargetPath = val +} +func (p *THiveLocationParams) SetFileType(val *types.TFileType) { + p.FileType = val +} +func (p *THiveLocationParams) SetOriginalWritePath(val *string) { + p.OriginalWritePath = val } -var TDataSink_OdbcTableSink_DEFAULT *TOdbcTableSink - -func (p *TDataSink) GetOdbcTableSink() (v *TOdbcTableSink) { - if !p.IsSetOdbcTableSink() { - return TDataSink_OdbcTableSink_DEFAULT - } - return p.OdbcTableSink +var fieldIDToName_THiveLocationParams = map[int16]string{ + 1: "write_path", + 2: "target_path", + 3: "file_type", + 4: "original_write_path", } -var TDataSink_ResultFileSink_DEFAULT *TResultFileSink +func (p *THiveLocationParams) IsSetWritePath() bool { + return p.WritePath != nil +} -func (p *TDataSink) GetResultFileSink() (v *TResultFileSink) { - if !p.IsSetResultFileSink() { - return TDataSink_ResultFileSink_DEFAULT - } - return p.ResultFileSink +func (p *THiveLocationParams) IsSetTargetPath() bool { + return p.TargetPath != nil } -var TDataSink_JdbcTableSink_DEFAULT *TJdbcTableSink +func (p *THiveLocationParams) IsSetFileType() bool { + return p.FileType != nil +} -func (p *TDataSink) GetJdbcTableSink() (v *TJdbcTableSink) { - if !p.IsSetJdbcTableSink() { - return TDataSink_JdbcTableSink_DEFAULT - } - return p.JdbcTableSink +func (p *THiveLocationParams) IsSetOriginalWritePath() bool { + return p.OriginalWritePath != nil } -var TDataSink_MultiCastStreamSink_DEFAULT *TMultiCastDataStreamSink +func (p *THiveLocationParams) Read(iprot thrift.TProtocol) (err error) { -func (p *TDataSink) GetMultiCastStreamSink() (v *TMultiCastDataStreamSink) { - if !p.IsSetMultiCastStreamSink() { - return TDataSink_MultiCastStreamSink_DEFAULT - } - return p.MultiCastStreamSink -} + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveLocationParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveLocationParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.WritePath = _field + return nil +} +func (p *THiveLocationParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TargetPath = _field + return nil +} +func (p *THiveLocationParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TFileType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TFileType(v) + _field = &tmp + } + p.FileType = _field + return nil +} +func (p *THiveLocationParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OriginalWritePath = _field + return nil +} + +func (p *THiveLocationParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THiveLocationParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THiveLocationParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetWritePath() { + if err = oprot.WriteFieldBegin("write_path", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.WritePath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THiveLocationParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTargetPath() { + if err = oprot.WriteFieldBegin("target_path", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TargetPath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THiveLocationParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFileType() { + if err = oprot.WriteFieldBegin("file_type", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THiveLocationParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetOriginalWritePath() { + if err = oprot.WriteFieldBegin("original_write_path", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OriginalWritePath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *THiveLocationParams) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THiveLocationParams(%+v)", *p) + +} + +func (p *THiveLocationParams) DeepEqual(ano *THiveLocationParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WritePath) { + return false + } + if !p.Field2DeepEqual(ano.TargetPath) { + return false + } + if !p.Field3DeepEqual(ano.FileType) { + return false + } + if !p.Field4DeepEqual(ano.OriginalWritePath) { + return false + } + return true +} + +func (p *THiveLocationParams) Field1DeepEqual(src *string) bool { + + if p.WritePath == src { + return true + } else if p.WritePath == nil || src == nil { + return false + } + if strings.Compare(*p.WritePath, *src) != 0 { + return false + } + return true +} +func (p *THiveLocationParams) Field2DeepEqual(src *string) bool { + + if p.TargetPath == src { + return true + } else if p.TargetPath == nil || src == nil { + return false + } + if strings.Compare(*p.TargetPath, *src) != 0 { + return false + } + return true +} +func (p *THiveLocationParams) Field3DeepEqual(src *types.TFileType) bool { + + if p.FileType == src { + return true + } else if p.FileType == nil || src == nil { + return false + } + if *p.FileType != *src { + return false + } + return true +} +func (p *THiveLocationParams) Field4DeepEqual(src *string) bool { + + if p.OriginalWritePath == src { + return true + } else if p.OriginalWritePath == nil || src == nil { + return false + } + if strings.Compare(*p.OriginalWritePath, *src) != 0 { + return false + } + return true +} + +type TSortedColumn struct { + SortColumnName *string `thrift:"sort_column_name,1,optional" frugal:"1,optional,string" json:"sort_column_name,omitempty"` + Order *int32 `thrift:"order,2,optional" frugal:"2,optional,i32" json:"order,omitempty"` +} + +func NewTSortedColumn() *TSortedColumn { + return &TSortedColumn{} +} + +func (p *TSortedColumn) InitDefault() { +} + +var TSortedColumn_SortColumnName_DEFAULT string + +func (p *TSortedColumn) GetSortColumnName() (v string) { + if !p.IsSetSortColumnName() { + return TSortedColumn_SortColumnName_DEFAULT + } + return *p.SortColumnName +} + +var TSortedColumn_Order_DEFAULT int32 + +func (p *TSortedColumn) GetOrder() (v int32) { + if !p.IsSetOrder() { + return TSortedColumn_Order_DEFAULT + } + return *p.Order +} +func (p *TSortedColumn) SetSortColumnName(val *string) { + p.SortColumnName = val +} +func (p *TSortedColumn) SetOrder(val *int32) { + p.Order = val +} + +var fieldIDToName_TSortedColumn = map[int16]string{ + 1: "sort_column_name", + 2: "order", +} + +func (p *TSortedColumn) IsSetSortColumnName() bool { + return p.SortColumnName != nil +} + +func (p *TSortedColumn) IsSetOrder() bool { + return p.Order != nil +} + +func (p *TSortedColumn) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortedColumn[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSortedColumn) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SortColumnName = _field + return nil +} +func (p *TSortedColumn) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Order = _field + return nil +} + +func (p *TSortedColumn) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSortedColumn"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSortedColumn) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSortColumnName() { + if err = oprot.WriteFieldBegin("sort_column_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SortColumnName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSortedColumn) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetOrder() { + if err = oprot.WriteFieldBegin("order", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Order); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TSortedColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSortedColumn(%+v)", *p) + +} + +func (p *TSortedColumn) DeepEqual(ano *TSortedColumn) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SortColumnName) { + return false + } + if !p.Field2DeepEqual(ano.Order) { + return false + } + return true +} + +func (p *TSortedColumn) Field1DeepEqual(src *string) bool { + + if p.SortColumnName == src { + return true + } else if p.SortColumnName == nil || src == nil { + return false + } + if strings.Compare(*p.SortColumnName, *src) != 0 { + return false + } + return true +} +func (p *TSortedColumn) Field2DeepEqual(src *int32) bool { + + if p.Order == src { + return true + } else if p.Order == nil || src == nil { + return false + } + if *p.Order != *src { + return false + } + return true +} + +type TBucketingMode struct { + BucketVersion *int32 `thrift:"bucket_version,1,optional" frugal:"1,optional,i32" json:"bucket_version,omitempty"` +} + +func NewTBucketingMode() *TBucketingMode { + return &TBucketingMode{} +} + +func (p *TBucketingMode) InitDefault() { +} + +var TBucketingMode_BucketVersion_DEFAULT int32 + +func (p *TBucketingMode) GetBucketVersion() (v int32) { + if !p.IsSetBucketVersion() { + return TBucketingMode_BucketVersion_DEFAULT + } + return *p.BucketVersion +} +func (p *TBucketingMode) SetBucketVersion(val *int32) { + p.BucketVersion = val +} + +var fieldIDToName_TBucketingMode = map[int16]string{ + 1: "bucket_version", +} + +func (p *TBucketingMode) IsSetBucketVersion() bool { + return p.BucketVersion != nil +} + +func (p *TBucketingMode) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBucketingMode[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBucketingMode) ReadField1(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.BucketVersion = _field + return nil +} + +func (p *TBucketingMode) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TBucketingMode"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TBucketingMode) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetBucketVersion() { + if err = oprot.WriteFieldBegin("bucket_version", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BucketVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TBucketingMode) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBucketingMode(%+v)", *p) + +} + +func (p *TBucketingMode) DeepEqual(ano *TBucketingMode) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.BucketVersion) { + return false + } + return true +} + +func (p *TBucketingMode) Field1DeepEqual(src *int32) bool { + + if p.BucketVersion == src { + return true + } else if p.BucketVersion == nil || src == nil { + return false + } + if *p.BucketVersion != *src { + return false + } + return true +} + +type THiveBucket struct { + BucketedBy []string `thrift:"bucketed_by,1,optional" frugal:"1,optional,list" json:"bucketed_by,omitempty"` + BucketMode *TBucketingMode `thrift:"bucket_mode,2,optional" frugal:"2,optional,TBucketingMode" json:"bucket_mode,omitempty"` + BucketCount *int32 `thrift:"bucket_count,3,optional" frugal:"3,optional,i32" json:"bucket_count,omitempty"` + SortedBy []*TSortedColumn `thrift:"sorted_by,4,optional" frugal:"4,optional,list" json:"sorted_by,omitempty"` +} + +func NewTHiveBucket() *THiveBucket { + return &THiveBucket{} +} + +func (p *THiveBucket) InitDefault() { +} + +var THiveBucket_BucketedBy_DEFAULT []string + +func (p *THiveBucket) GetBucketedBy() (v []string) { + if !p.IsSetBucketedBy() { + return THiveBucket_BucketedBy_DEFAULT + } + return p.BucketedBy +} + +var THiveBucket_BucketMode_DEFAULT *TBucketingMode + +func (p *THiveBucket) GetBucketMode() (v *TBucketingMode) { + if !p.IsSetBucketMode() { + return THiveBucket_BucketMode_DEFAULT + } + return p.BucketMode +} + +var THiveBucket_BucketCount_DEFAULT int32 + +func (p *THiveBucket) GetBucketCount() (v int32) { + if !p.IsSetBucketCount() { + return THiveBucket_BucketCount_DEFAULT + } + return *p.BucketCount +} + +var THiveBucket_SortedBy_DEFAULT []*TSortedColumn + +func (p *THiveBucket) GetSortedBy() (v []*TSortedColumn) { + if !p.IsSetSortedBy() { + return THiveBucket_SortedBy_DEFAULT + } + return p.SortedBy +} +func (p *THiveBucket) SetBucketedBy(val []string) { + p.BucketedBy = val +} +func (p *THiveBucket) SetBucketMode(val *TBucketingMode) { + p.BucketMode = val +} +func (p *THiveBucket) SetBucketCount(val *int32) { + p.BucketCount = val +} +func (p *THiveBucket) SetSortedBy(val []*TSortedColumn) { + p.SortedBy = val +} + +var fieldIDToName_THiveBucket = map[int16]string{ + 1: "bucketed_by", + 2: "bucket_mode", + 3: "bucket_count", + 4: "sorted_by", +} + +func (p *THiveBucket) IsSetBucketedBy() bool { + return p.BucketedBy != nil +} + +func (p *THiveBucket) IsSetBucketMode() bool { + return p.BucketMode != nil +} + +func (p *THiveBucket) IsSetBucketCount() bool { + return p.BucketCount != nil +} + +func (p *THiveBucket) IsSetSortedBy() bool { + return p.SortedBy != nil +} + +func (p *THiveBucket) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveBucket[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveBucket) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.BucketedBy = _field + return nil +} +func (p *THiveBucket) ReadField2(iprot thrift.TProtocol) error { + _field := NewTBucketingMode() + if err := _field.Read(iprot); err != nil { + return err + } + p.BucketMode = _field + return nil +} +func (p *THiveBucket) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.BucketCount = _field + return nil +} +func (p *THiveBucket) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TSortedColumn, 0, size) + values := make([]TSortedColumn, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SortedBy = _field + return nil +} + +func (p *THiveBucket) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THiveBucket"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THiveBucket) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetBucketedBy() { + if err = oprot.WriteFieldBegin("bucketed_by", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.BucketedBy)); err != nil { + return err + } + for _, v := range p.BucketedBy { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THiveBucket) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBucketMode() { + if err = oprot.WriteFieldBegin("bucket_mode", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.BucketMode.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THiveBucket) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBucketCount() { + if err = oprot.WriteFieldBegin("bucket_count", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BucketCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THiveBucket) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetSortedBy() { + if err = oprot.WriteFieldBegin("sorted_by", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortedBy)); err != nil { + return err + } + for _, v := range p.SortedBy { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *THiveBucket) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THiveBucket(%+v)", *p) + +} + +func (p *THiveBucket) DeepEqual(ano *THiveBucket) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.BucketedBy) { + return false + } + if !p.Field2DeepEqual(ano.BucketMode) { + return false + } + if !p.Field3DeepEqual(ano.BucketCount) { + return false + } + if !p.Field4DeepEqual(ano.SortedBy) { + return false + } + return true +} + +func (p *THiveBucket) Field1DeepEqual(src []string) bool { + + if len(p.BucketedBy) != len(src) { + return false + } + for i, v := range p.BucketedBy { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *THiveBucket) Field2DeepEqual(src *TBucketingMode) bool { + + if !p.BucketMode.DeepEqual(src) { + return false + } + return true +} +func (p *THiveBucket) Field3DeepEqual(src *int32) bool { + + if p.BucketCount == src { + return true + } else if p.BucketCount == nil || src == nil { + return false + } + if *p.BucketCount != *src { + return false + } + return true +} +func (p *THiveBucket) Field4DeepEqual(src []*TSortedColumn) bool { + + if len(p.SortedBy) != len(src) { + return false + } + for i, v := range p.SortedBy { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type THiveColumn struct { + Name *string `thrift:"name,1,optional" frugal:"1,optional,string" json:"name,omitempty"` + ColumnType *THiveColumnType `thrift:"column_type,2,optional" frugal:"2,optional,THiveColumnType" json:"column_type,omitempty"` +} + +func NewTHiveColumn() *THiveColumn { + return &THiveColumn{} +} + +func (p *THiveColumn) InitDefault() { +} + +var THiveColumn_Name_DEFAULT string + +func (p *THiveColumn) GetName() (v string) { + if !p.IsSetName() { + return THiveColumn_Name_DEFAULT + } + return *p.Name +} + +var THiveColumn_ColumnType_DEFAULT THiveColumnType + +func (p *THiveColumn) GetColumnType() (v THiveColumnType) { + if !p.IsSetColumnType() { + return THiveColumn_ColumnType_DEFAULT + } + return *p.ColumnType +} +func (p *THiveColumn) SetName(val *string) { + p.Name = val +} +func (p *THiveColumn) SetColumnType(val *THiveColumnType) { + p.ColumnType = val +} + +var fieldIDToName_THiveColumn = map[int16]string{ + 1: "name", + 2: "column_type", +} + +func (p *THiveColumn) IsSetName() bool { + return p.Name != nil +} + +func (p *THiveColumn) IsSetColumnType() bool { + return p.ColumnType != nil +} + +func (p *THiveColumn) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveColumn[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveColumn) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *THiveColumn) ReadField2(iprot thrift.TProtocol) error { + + var _field *THiveColumnType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := THiveColumnType(v) + _field = &tmp + } + p.ColumnType = _field + return nil +} + +func (p *THiveColumn) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THiveColumn"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THiveColumn) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THiveColumn) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnType() { + if err = oprot.WriteFieldBegin("column_type", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.ColumnType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THiveColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THiveColumn(%+v)", *p) + +} + +func (p *THiveColumn) DeepEqual(ano *THiveColumn) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Name) { + return false + } + if !p.Field2DeepEqual(ano.ColumnType) { + return false + } + return true +} + +func (p *THiveColumn) Field1DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *THiveColumn) Field2DeepEqual(src *THiveColumnType) bool { + + if p.ColumnType == src { + return true + } else if p.ColumnType == nil || src == nil { + return false + } + if *p.ColumnType != *src { + return false + } + return true +} + +type THivePartition struct { + Values []string `thrift:"values,1,optional" frugal:"1,optional,list" json:"values,omitempty"` + Location *THiveLocationParams `thrift:"location,2,optional" frugal:"2,optional,THiveLocationParams" json:"location,omitempty"` + FileFormat *plannodes.TFileFormatType `thrift:"file_format,3,optional" frugal:"3,optional,TFileFormatType" json:"file_format,omitempty"` +} + +func NewTHivePartition() *THivePartition { + return &THivePartition{} +} + +func (p *THivePartition) InitDefault() { +} + +var THivePartition_Values_DEFAULT []string + +func (p *THivePartition) GetValues() (v []string) { + if !p.IsSetValues() { + return THivePartition_Values_DEFAULT + } + return p.Values +} + +var THivePartition_Location_DEFAULT *THiveLocationParams + +func (p *THivePartition) GetLocation() (v *THiveLocationParams) { + if !p.IsSetLocation() { + return THivePartition_Location_DEFAULT + } + return p.Location +} + +var THivePartition_FileFormat_DEFAULT plannodes.TFileFormatType + +func (p *THivePartition) GetFileFormat() (v plannodes.TFileFormatType) { + if !p.IsSetFileFormat() { + return THivePartition_FileFormat_DEFAULT + } + return *p.FileFormat +} +func (p *THivePartition) SetValues(val []string) { + p.Values = val +} +func (p *THivePartition) SetLocation(val *THiveLocationParams) { + p.Location = val +} +func (p *THivePartition) SetFileFormat(val *plannodes.TFileFormatType) { + p.FileFormat = val +} + +var fieldIDToName_THivePartition = map[int16]string{ + 1: "values", + 2: "location", + 3: "file_format", +} + +func (p *THivePartition) IsSetValues() bool { + return p.Values != nil +} + +func (p *THivePartition) IsSetLocation() bool { + return p.Location != nil +} + +func (p *THivePartition) IsSetFileFormat() bool { + return p.FileFormat != nil +} + +func (p *THivePartition) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THivePartition[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THivePartition) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Values = _field + return nil +} +func (p *THivePartition) ReadField2(iprot thrift.TProtocol) error { + _field := NewTHiveLocationParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Location = _field + return nil +} +func (p *THivePartition) ReadField3(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileFormatType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileFormatType(v) + _field = &tmp + } + p.FileFormat = _field + return nil +} + +func (p *THivePartition) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THivePartition"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THivePartition) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetValues() { + if err = oprot.WriteFieldBegin("values", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { + return err + } + for _, v := range p.Values { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THivePartition) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetLocation() { + if err = oprot.WriteFieldBegin("location", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.Location.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THivePartition) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFileFormat() { + if err = oprot.WriteFieldBegin("file_format", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileFormat)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THivePartition) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THivePartition(%+v)", *p) + +} + +func (p *THivePartition) DeepEqual(ano *THivePartition) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Values) { + return false + } + if !p.Field2DeepEqual(ano.Location) { + return false + } + if !p.Field3DeepEqual(ano.FileFormat) { + return false + } + return true +} + +func (p *THivePartition) Field1DeepEqual(src []string) bool { + + if len(p.Values) != len(src) { + return false + } + for i, v := range p.Values { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *THivePartition) Field2DeepEqual(src *THiveLocationParams) bool { + + if !p.Location.DeepEqual(src) { + return false + } + return true +} +func (p *THivePartition) Field3DeepEqual(src *plannodes.TFileFormatType) bool { + + if p.FileFormat == src { + return true + } else if p.FileFormat == nil || src == nil { + return false + } + if *p.FileFormat != *src { + return false + } + return true +} + +type THiveSerDeProperties struct { + FieldDelim *string `thrift:"field_delim,1,optional" frugal:"1,optional,string" json:"field_delim,omitempty"` + LineDelim *string `thrift:"line_delim,2,optional" frugal:"2,optional,string" json:"line_delim,omitempty"` + CollectionDelim *string `thrift:"collection_delim,3,optional" frugal:"3,optional,string" json:"collection_delim,omitempty"` + MapkvDelim *string `thrift:"mapkv_delim,4,optional" frugal:"4,optional,string" json:"mapkv_delim,omitempty"` + EscapeChar *string `thrift:"escape_char,5,optional" frugal:"5,optional,string" json:"escape_char,omitempty"` + NullFormat *string `thrift:"null_format,6,optional" frugal:"6,optional,string" json:"null_format,omitempty"` +} + +func NewTHiveSerDeProperties() *THiveSerDeProperties { + return &THiveSerDeProperties{} +} + +func (p *THiveSerDeProperties) InitDefault() { +} + +var THiveSerDeProperties_FieldDelim_DEFAULT string + +func (p *THiveSerDeProperties) GetFieldDelim() (v string) { + if !p.IsSetFieldDelim() { + return THiveSerDeProperties_FieldDelim_DEFAULT + } + return *p.FieldDelim +} + +var THiveSerDeProperties_LineDelim_DEFAULT string + +func (p *THiveSerDeProperties) GetLineDelim() (v string) { + if !p.IsSetLineDelim() { + return THiveSerDeProperties_LineDelim_DEFAULT + } + return *p.LineDelim +} + +var THiveSerDeProperties_CollectionDelim_DEFAULT string + +func (p *THiveSerDeProperties) GetCollectionDelim() (v string) { + if !p.IsSetCollectionDelim() { + return THiveSerDeProperties_CollectionDelim_DEFAULT + } + return *p.CollectionDelim +} + +var THiveSerDeProperties_MapkvDelim_DEFAULT string + +func (p *THiveSerDeProperties) GetMapkvDelim() (v string) { + if !p.IsSetMapkvDelim() { + return THiveSerDeProperties_MapkvDelim_DEFAULT + } + return *p.MapkvDelim +} + +var THiveSerDeProperties_EscapeChar_DEFAULT string + +func (p *THiveSerDeProperties) GetEscapeChar() (v string) { + if !p.IsSetEscapeChar() { + return THiveSerDeProperties_EscapeChar_DEFAULT + } + return *p.EscapeChar +} + +var THiveSerDeProperties_NullFormat_DEFAULT string + +func (p *THiveSerDeProperties) GetNullFormat() (v string) { + if !p.IsSetNullFormat() { + return THiveSerDeProperties_NullFormat_DEFAULT + } + return *p.NullFormat +} +func (p *THiveSerDeProperties) SetFieldDelim(val *string) { + p.FieldDelim = val +} +func (p *THiveSerDeProperties) SetLineDelim(val *string) { + p.LineDelim = val +} +func (p *THiveSerDeProperties) SetCollectionDelim(val *string) { + p.CollectionDelim = val +} +func (p *THiveSerDeProperties) SetMapkvDelim(val *string) { + p.MapkvDelim = val +} +func (p *THiveSerDeProperties) SetEscapeChar(val *string) { + p.EscapeChar = val +} +func (p *THiveSerDeProperties) SetNullFormat(val *string) { + p.NullFormat = val +} + +var fieldIDToName_THiveSerDeProperties = map[int16]string{ + 1: "field_delim", + 2: "line_delim", + 3: "collection_delim", + 4: "mapkv_delim", + 5: "escape_char", + 6: "null_format", +} + +func (p *THiveSerDeProperties) IsSetFieldDelim() bool { + return p.FieldDelim != nil +} + +func (p *THiveSerDeProperties) IsSetLineDelim() bool { + return p.LineDelim != nil +} + +func (p *THiveSerDeProperties) IsSetCollectionDelim() bool { + return p.CollectionDelim != nil +} + +func (p *THiveSerDeProperties) IsSetMapkvDelim() bool { + return p.MapkvDelim != nil +} + +func (p *THiveSerDeProperties) IsSetEscapeChar() bool { + return p.EscapeChar != nil +} + +func (p *THiveSerDeProperties) IsSetNullFormat() bool { + return p.NullFormat != nil +} + +func (p *THiveSerDeProperties) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveSerDeProperties[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveSerDeProperties) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FieldDelim = _field + return nil +} +func (p *THiveSerDeProperties) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.LineDelim = _field + return nil +} +func (p *THiveSerDeProperties) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CollectionDelim = _field + return nil +} +func (p *THiveSerDeProperties) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.MapkvDelim = _field + return nil +} +func (p *THiveSerDeProperties) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.EscapeChar = _field + return nil +} +func (p *THiveSerDeProperties) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.NullFormat = _field + return nil +} + +func (p *THiveSerDeProperties) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THiveSerDeProperties"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THiveSerDeProperties) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFieldDelim() { + if err = oprot.WriteFieldBegin("field_delim", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FieldDelim); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THiveSerDeProperties) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetLineDelim() { + if err = oprot.WriteFieldBegin("line_delim", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.LineDelim); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THiveSerDeProperties) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCollectionDelim() { + if err = oprot.WriteFieldBegin("collection_delim", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CollectionDelim); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THiveSerDeProperties) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetMapkvDelim() { + if err = oprot.WriteFieldBegin("mapkv_delim", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.MapkvDelim); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *THiveSerDeProperties) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetEscapeChar() { + if err = oprot.WriteFieldBegin("escape_char", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.EscapeChar); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *THiveSerDeProperties) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetNullFormat() { + if err = oprot.WriteFieldBegin("null_format", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.NullFormat); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *THiveSerDeProperties) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THiveSerDeProperties(%+v)", *p) + +} + +func (p *THiveSerDeProperties) DeepEqual(ano *THiveSerDeProperties) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FieldDelim) { + return false + } + if !p.Field2DeepEqual(ano.LineDelim) { + return false + } + if !p.Field3DeepEqual(ano.CollectionDelim) { + return false + } + if !p.Field4DeepEqual(ano.MapkvDelim) { + return false + } + if !p.Field5DeepEqual(ano.EscapeChar) { + return false + } + if !p.Field6DeepEqual(ano.NullFormat) { + return false + } + return true +} + +func (p *THiveSerDeProperties) Field1DeepEqual(src *string) bool { + + if p.FieldDelim == src { + return true + } else if p.FieldDelim == nil || src == nil { + return false + } + if strings.Compare(*p.FieldDelim, *src) != 0 { + return false + } + return true +} +func (p *THiveSerDeProperties) Field2DeepEqual(src *string) bool { + + if p.LineDelim == src { + return true + } else if p.LineDelim == nil || src == nil { + return false + } + if strings.Compare(*p.LineDelim, *src) != 0 { + return false + } + return true +} +func (p *THiveSerDeProperties) Field3DeepEqual(src *string) bool { + + if p.CollectionDelim == src { + return true + } else if p.CollectionDelim == nil || src == nil { + return false + } + if strings.Compare(*p.CollectionDelim, *src) != 0 { + return false + } + return true +} +func (p *THiveSerDeProperties) Field4DeepEqual(src *string) bool { + + if p.MapkvDelim == src { + return true + } else if p.MapkvDelim == nil || src == nil { + return false + } + if strings.Compare(*p.MapkvDelim, *src) != 0 { + return false + } + return true +} +func (p *THiveSerDeProperties) Field5DeepEqual(src *string) bool { + + if p.EscapeChar == src { + return true + } else if p.EscapeChar == nil || src == nil { + return false + } + if strings.Compare(*p.EscapeChar, *src) != 0 { + return false + } + return true +} +func (p *THiveSerDeProperties) Field6DeepEqual(src *string) bool { + + if p.NullFormat == src { + return true + } else if p.NullFormat == nil || src == nil { + return false + } + if strings.Compare(*p.NullFormat, *src) != 0 { + return false + } + return true +} + +type THiveTableSink struct { + DbName *string `thrift:"db_name,1,optional" frugal:"1,optional,string" json:"db_name,omitempty"` + TableName *string `thrift:"table_name,2,optional" frugal:"2,optional,string" json:"table_name,omitempty"` + Columns []*THiveColumn `thrift:"columns,3,optional" frugal:"3,optional,list" json:"columns,omitempty"` + Partitions []*THivePartition `thrift:"partitions,4,optional" frugal:"4,optional,list" json:"partitions,omitempty"` + BucketInfo *THiveBucket `thrift:"bucket_info,5,optional" frugal:"5,optional,THiveBucket" json:"bucket_info,omitempty"` + FileFormat *plannodes.TFileFormatType `thrift:"file_format,6,optional" frugal:"6,optional,TFileFormatType" json:"file_format,omitempty"` + CompressionType *plannodes.TFileCompressType `thrift:"compression_type,7,optional" frugal:"7,optional,TFileCompressType" json:"compression_type,omitempty"` + Location *THiveLocationParams `thrift:"location,8,optional" frugal:"8,optional,THiveLocationParams" json:"location,omitempty"` + HadoopConfig map[string]string `thrift:"hadoop_config,9,optional" frugal:"9,optional,map" json:"hadoop_config,omitempty"` + Overwrite *bool `thrift:"overwrite,10,optional" frugal:"10,optional,bool" json:"overwrite,omitempty"` + SerdeProperties *THiveSerDeProperties `thrift:"serde_properties,11,optional" frugal:"11,optional,THiveSerDeProperties" json:"serde_properties,omitempty"` +} + +func NewTHiveTableSink() *THiveTableSink { + return &THiveTableSink{} +} + +func (p *THiveTableSink) InitDefault() { +} + +var THiveTableSink_DbName_DEFAULT string + +func (p *THiveTableSink) GetDbName() (v string) { + if !p.IsSetDbName() { + return THiveTableSink_DbName_DEFAULT + } + return *p.DbName +} + +var THiveTableSink_TableName_DEFAULT string + +func (p *THiveTableSink) GetTableName() (v string) { + if !p.IsSetTableName() { + return THiveTableSink_TableName_DEFAULT + } + return *p.TableName +} + +var THiveTableSink_Columns_DEFAULT []*THiveColumn + +func (p *THiveTableSink) GetColumns() (v []*THiveColumn) { + if !p.IsSetColumns() { + return THiveTableSink_Columns_DEFAULT + } + return p.Columns +} + +var THiveTableSink_Partitions_DEFAULT []*THivePartition + +func (p *THiveTableSink) GetPartitions() (v []*THivePartition) { + if !p.IsSetPartitions() { + return THiveTableSink_Partitions_DEFAULT + } + return p.Partitions +} + +var THiveTableSink_BucketInfo_DEFAULT *THiveBucket + +func (p *THiveTableSink) GetBucketInfo() (v *THiveBucket) { + if !p.IsSetBucketInfo() { + return THiveTableSink_BucketInfo_DEFAULT + } + return p.BucketInfo +} + +var THiveTableSink_FileFormat_DEFAULT plannodes.TFileFormatType + +func (p *THiveTableSink) GetFileFormat() (v plannodes.TFileFormatType) { + if !p.IsSetFileFormat() { + return THiveTableSink_FileFormat_DEFAULT + } + return *p.FileFormat +} + +var THiveTableSink_CompressionType_DEFAULT plannodes.TFileCompressType + +func (p *THiveTableSink) GetCompressionType() (v plannodes.TFileCompressType) { + if !p.IsSetCompressionType() { + return THiveTableSink_CompressionType_DEFAULT + } + return *p.CompressionType +} + +var THiveTableSink_Location_DEFAULT *THiveLocationParams + +func (p *THiveTableSink) GetLocation() (v *THiveLocationParams) { + if !p.IsSetLocation() { + return THiveTableSink_Location_DEFAULT + } + return p.Location +} + +var THiveTableSink_HadoopConfig_DEFAULT map[string]string + +func (p *THiveTableSink) GetHadoopConfig() (v map[string]string) { + if !p.IsSetHadoopConfig() { + return THiveTableSink_HadoopConfig_DEFAULT + } + return p.HadoopConfig +} + +var THiveTableSink_Overwrite_DEFAULT bool + +func (p *THiveTableSink) GetOverwrite() (v bool) { + if !p.IsSetOverwrite() { + return THiveTableSink_Overwrite_DEFAULT + } + return *p.Overwrite +} + +var THiveTableSink_SerdeProperties_DEFAULT *THiveSerDeProperties + +func (p *THiveTableSink) GetSerdeProperties() (v *THiveSerDeProperties) { + if !p.IsSetSerdeProperties() { + return THiveTableSink_SerdeProperties_DEFAULT + } + return p.SerdeProperties +} +func (p *THiveTableSink) SetDbName(val *string) { + p.DbName = val +} +func (p *THiveTableSink) SetTableName(val *string) { + p.TableName = val +} +func (p *THiveTableSink) SetColumns(val []*THiveColumn) { + p.Columns = val +} +func (p *THiveTableSink) SetPartitions(val []*THivePartition) { + p.Partitions = val +} +func (p *THiveTableSink) SetBucketInfo(val *THiveBucket) { + p.BucketInfo = val +} +func (p *THiveTableSink) SetFileFormat(val *plannodes.TFileFormatType) { + p.FileFormat = val +} +func (p *THiveTableSink) SetCompressionType(val *plannodes.TFileCompressType) { + p.CompressionType = val +} +func (p *THiveTableSink) SetLocation(val *THiveLocationParams) { + p.Location = val +} +func (p *THiveTableSink) SetHadoopConfig(val map[string]string) { + p.HadoopConfig = val +} +func (p *THiveTableSink) SetOverwrite(val *bool) { + p.Overwrite = val +} +func (p *THiveTableSink) SetSerdeProperties(val *THiveSerDeProperties) { + p.SerdeProperties = val +} + +var fieldIDToName_THiveTableSink = map[int16]string{ + 1: "db_name", + 2: "table_name", + 3: "columns", + 4: "partitions", + 5: "bucket_info", + 6: "file_format", + 7: "compression_type", + 8: "location", + 9: "hadoop_config", + 10: "overwrite", + 11: "serde_properties", +} + +func (p *THiveTableSink) IsSetDbName() bool { + return p.DbName != nil +} + +func (p *THiveTableSink) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *THiveTableSink) IsSetColumns() bool { + return p.Columns != nil +} + +func (p *THiveTableSink) IsSetPartitions() bool { + return p.Partitions != nil +} + +func (p *THiveTableSink) IsSetBucketInfo() bool { + return p.BucketInfo != nil +} + +func (p *THiveTableSink) IsSetFileFormat() bool { + return p.FileFormat != nil +} + +func (p *THiveTableSink) IsSetCompressionType() bool { + return p.CompressionType != nil +} + +func (p *THiveTableSink) IsSetLocation() bool { + return p.Location != nil +} + +func (p *THiveTableSink) IsSetHadoopConfig() bool { + return p.HadoopConfig != nil +} + +func (p *THiveTableSink) IsSetOverwrite() bool { + return p.Overwrite != nil +} + +func (p *THiveTableSink) IsSetSerdeProperties() bool { + return p.SerdeProperties != nil +} + +func (p *THiveTableSink) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.MAP { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveTableSink[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveTableSink) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DbName = _field + return nil +} +func (p *THiveTableSink) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableName = _field + return nil +} +func (p *THiveTableSink) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*THiveColumn, 0, size) + values := make([]THiveColumn, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Columns = _field + return nil +} +func (p *THiveTableSink) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*THivePartition, 0, size) + values := make([]THivePartition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Partitions = _field + return nil +} +func (p *THiveTableSink) ReadField5(iprot thrift.TProtocol) error { + _field := NewTHiveBucket() + if err := _field.Read(iprot); err != nil { + return err + } + p.BucketInfo = _field + return nil +} +func (p *THiveTableSink) ReadField6(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileFormatType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileFormatType(v) + _field = &tmp + } + p.FileFormat = _field + return nil +} +func (p *THiveTableSink) ReadField7(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileCompressType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileCompressType(v) + _field = &tmp + } + p.CompressionType = _field + return nil +} +func (p *THiveTableSink) ReadField8(iprot thrift.TProtocol) error { + _field := NewTHiveLocationParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Location = _field + return nil +} +func (p *THiveTableSink) ReadField9(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.HadoopConfig = _field + return nil +} +func (p *THiveTableSink) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Overwrite = _field + return nil +} +func (p *THiveTableSink) ReadField11(iprot thrift.TProtocol) error { + _field := NewTHiveSerDeProperties() + if err := _field.Read(iprot); err != nil { + return err + } + p.SerdeProperties = _field + return nil +} + +func (p *THiveTableSink) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THiveTableSink"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THiveTableSink) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THiveTableSink) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THiveTableSink) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetColumns() { + if err = oprot.WriteFieldBegin("columns", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { + return err + } + for _, v := range p.Columns { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THiveTableSink) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitions() { + if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return err + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *THiveTableSink) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBucketInfo() { + if err = oprot.WriteFieldBegin("bucket_info", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.BucketInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *THiveTableSink) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetFileFormat() { + if err = oprot.WriteFieldBegin("file_format", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileFormat)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *THiveTableSink) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressionType() { + if err = oprot.WriteFieldBegin("compression_type", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.CompressionType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *THiveTableSink) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetLocation() { + if err = oprot.WriteFieldBegin("location", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.Location.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *THiveTableSink) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetHadoopConfig() { + if err = oprot.WriteFieldBegin("hadoop_config", thrift.MAP, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.HadoopConfig)); err != nil { + return err + } + for k, v := range p.HadoopConfig { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *THiveTableSink) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetOverwrite() { + if err = oprot.WriteFieldBegin("overwrite", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Overwrite); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *THiveTableSink) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetSerdeProperties() { + if err = oprot.WriteFieldBegin("serde_properties", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.SerdeProperties.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *THiveTableSink) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THiveTableSink(%+v)", *p) + +} + +func (p *THiveTableSink) DeepEqual(ano *THiveTableSink) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DbName) { + return false + } + if !p.Field2DeepEqual(ano.TableName) { + return false + } + if !p.Field3DeepEqual(ano.Columns) { + return false + } + if !p.Field4DeepEqual(ano.Partitions) { + return false + } + if !p.Field5DeepEqual(ano.BucketInfo) { + return false + } + if !p.Field6DeepEqual(ano.FileFormat) { + return false + } + if !p.Field7DeepEqual(ano.CompressionType) { + return false + } + if !p.Field8DeepEqual(ano.Location) { + return false + } + if !p.Field9DeepEqual(ano.HadoopConfig) { + return false + } + if !p.Field10DeepEqual(ano.Overwrite) { + return false + } + if !p.Field11DeepEqual(ano.SerdeProperties) { + return false + } + return true +} + +func (p *THiveTableSink) Field1DeepEqual(src *string) bool { + + if p.DbName == src { + return true + } else if p.DbName == nil || src == nil { + return false + } + if strings.Compare(*p.DbName, *src) != 0 { + return false + } + return true +} +func (p *THiveTableSink) Field2DeepEqual(src *string) bool { + + if p.TableName == src { + return true + } else if p.TableName == nil || src == nil { + return false + } + if strings.Compare(*p.TableName, *src) != 0 { + return false + } + return true +} +func (p *THiveTableSink) Field3DeepEqual(src []*THiveColumn) bool { + + if len(p.Columns) != len(src) { + return false + } + for i, v := range p.Columns { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *THiveTableSink) Field4DeepEqual(src []*THivePartition) bool { + + if len(p.Partitions) != len(src) { + return false + } + for i, v := range p.Partitions { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *THiveTableSink) Field5DeepEqual(src *THiveBucket) bool { + + if !p.BucketInfo.DeepEqual(src) { + return false + } + return true +} +func (p *THiveTableSink) Field6DeepEqual(src *plannodes.TFileFormatType) bool { + + if p.FileFormat == src { + return true + } else if p.FileFormat == nil || src == nil { + return false + } + if *p.FileFormat != *src { + return false + } + return true +} +func (p *THiveTableSink) Field7DeepEqual(src *plannodes.TFileCompressType) bool { + + if p.CompressionType == src { + return true + } else if p.CompressionType == nil || src == nil { + return false + } + if *p.CompressionType != *src { + return false + } + return true +} +func (p *THiveTableSink) Field8DeepEqual(src *THiveLocationParams) bool { + + if !p.Location.DeepEqual(src) { + return false + } + return true +} +func (p *THiveTableSink) Field9DeepEqual(src map[string]string) bool { + + if len(p.HadoopConfig) != len(src) { + return false + } + for k, v := range p.HadoopConfig { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *THiveTableSink) Field10DeepEqual(src *bool) bool { + + if p.Overwrite == src { + return true + } else if p.Overwrite == nil || src == nil { + return false + } + if *p.Overwrite != *src { + return false + } + return true +} +func (p *THiveTableSink) Field11DeepEqual(src *THiveSerDeProperties) bool { + + if !p.SerdeProperties.DeepEqual(src) { + return false + } + return true +} + +type TS3MPUPendingUpload struct { + Bucket *string `thrift:"bucket,1,optional" frugal:"1,optional,string" json:"bucket,omitempty"` + Key *string `thrift:"key,2,optional" frugal:"2,optional,string" json:"key,omitempty"` + UploadId *string `thrift:"upload_id,3,optional" frugal:"3,optional,string" json:"upload_id,omitempty"` + Etags map[int32]string `thrift:"etags,4,optional" frugal:"4,optional,map" json:"etags,omitempty"` +} + +func NewTS3MPUPendingUpload() *TS3MPUPendingUpload { + return &TS3MPUPendingUpload{} +} + +func (p *TS3MPUPendingUpload) InitDefault() { +} + +var TS3MPUPendingUpload_Bucket_DEFAULT string + +func (p *TS3MPUPendingUpload) GetBucket() (v string) { + if !p.IsSetBucket() { + return TS3MPUPendingUpload_Bucket_DEFAULT + } + return *p.Bucket +} + +var TS3MPUPendingUpload_Key_DEFAULT string + +func (p *TS3MPUPendingUpload) GetKey() (v string) { + if !p.IsSetKey() { + return TS3MPUPendingUpload_Key_DEFAULT + } + return *p.Key +} + +var TS3MPUPendingUpload_UploadId_DEFAULT string + +func (p *TS3MPUPendingUpload) GetUploadId() (v string) { + if !p.IsSetUploadId() { + return TS3MPUPendingUpload_UploadId_DEFAULT + } + return *p.UploadId +} + +var TS3MPUPendingUpload_Etags_DEFAULT map[int32]string + +func (p *TS3MPUPendingUpload) GetEtags() (v map[int32]string) { + if !p.IsSetEtags() { + return TS3MPUPendingUpload_Etags_DEFAULT + } + return p.Etags +} +func (p *TS3MPUPendingUpload) SetBucket(val *string) { + p.Bucket = val +} +func (p *TS3MPUPendingUpload) SetKey(val *string) { + p.Key = val +} +func (p *TS3MPUPendingUpload) SetUploadId(val *string) { + p.UploadId = val +} +func (p *TS3MPUPendingUpload) SetEtags(val map[int32]string) { + p.Etags = val +} + +var fieldIDToName_TS3MPUPendingUpload = map[int16]string{ + 1: "bucket", + 2: "key", + 3: "upload_id", + 4: "etags", +} + +func (p *TS3MPUPendingUpload) IsSetBucket() bool { + return p.Bucket != nil +} + +func (p *TS3MPUPendingUpload) IsSetKey() bool { + return p.Key != nil +} + +func (p *TS3MPUPendingUpload) IsSetUploadId() bool { + return p.UploadId != nil +} + +func (p *TS3MPUPendingUpload) IsSetEtags() bool { + return p.Etags != nil +} + +func (p *TS3MPUPendingUpload) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TS3MPUPendingUpload[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Bucket = _field + return nil +} +func (p *TS3MPUPendingUpload) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Key = _field + return nil +} +func (p *TS3MPUPendingUpload) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UploadId = _field + return nil +} +func (p *TS3MPUPendingUpload) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]string, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.Etags = _field + return nil +} + +func (p *TS3MPUPendingUpload) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TS3MPUPendingUpload"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetBucket() { + if err = oprot.WriteFieldBegin("bucket", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Bucket); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetKey() { + if err = oprot.WriteFieldBegin("key", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Key); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetUploadId() { + if err = oprot.WriteFieldBegin("upload_id", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UploadId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetEtags() { + if err = oprot.WriteFieldBegin("etags", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRING, len(p.Etags)); err != nil { + return err + } + for k, v := range p.Etags { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TS3MPUPendingUpload(%+v)", *p) + +} + +func (p *TS3MPUPendingUpload) DeepEqual(ano *TS3MPUPendingUpload) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Bucket) { + return false + } + if !p.Field2DeepEqual(ano.Key) { + return false + } + if !p.Field3DeepEqual(ano.UploadId) { + return false + } + if !p.Field4DeepEqual(ano.Etags) { + return false + } + return true +} + +func (p *TS3MPUPendingUpload) Field1DeepEqual(src *string) bool { + + if p.Bucket == src { + return true + } else if p.Bucket == nil || src == nil { + return false + } + if strings.Compare(*p.Bucket, *src) != 0 { + return false + } + return true +} +func (p *TS3MPUPendingUpload) Field2DeepEqual(src *string) bool { + + if p.Key == src { + return true + } else if p.Key == nil || src == nil { + return false + } + if strings.Compare(*p.Key, *src) != 0 { + return false + } + return true +} +func (p *TS3MPUPendingUpload) Field3DeepEqual(src *string) bool { + + if p.UploadId == src { + return true + } else if p.UploadId == nil || src == nil { + return false + } + if strings.Compare(*p.UploadId, *src) != 0 { + return false + } + return true +} +func (p *TS3MPUPendingUpload) Field4DeepEqual(src map[int32]string) bool { + + if len(p.Etags) != len(src) { + return false + } + for k, v := range p.Etags { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} + +type THivePartitionUpdate struct { + Name *string `thrift:"name,1,optional" frugal:"1,optional,string" json:"name,omitempty"` + UpdateMode *TUpdateMode `thrift:"update_mode,2,optional" frugal:"2,optional,TUpdateMode" json:"update_mode,omitempty"` + Location *THiveLocationParams `thrift:"location,3,optional" frugal:"3,optional,THiveLocationParams" json:"location,omitempty"` + FileNames []string `thrift:"file_names,4,optional" frugal:"4,optional,list" json:"file_names,omitempty"` + RowCount *int64 `thrift:"row_count,5,optional" frugal:"5,optional,i64" json:"row_count,omitempty"` + FileSize *int64 `thrift:"file_size,6,optional" frugal:"6,optional,i64" json:"file_size,omitempty"` + S3MpuPendingUploads []*TS3MPUPendingUpload `thrift:"s3_mpu_pending_uploads,7,optional" frugal:"7,optional,list" json:"s3_mpu_pending_uploads,omitempty"` +} + +func NewTHivePartitionUpdate() *THivePartitionUpdate { + return &THivePartitionUpdate{} +} + +func (p *THivePartitionUpdate) InitDefault() { +} + +var THivePartitionUpdate_Name_DEFAULT string + +func (p *THivePartitionUpdate) GetName() (v string) { + if !p.IsSetName() { + return THivePartitionUpdate_Name_DEFAULT + } + return *p.Name +} + +var THivePartitionUpdate_UpdateMode_DEFAULT TUpdateMode + +func (p *THivePartitionUpdate) GetUpdateMode() (v TUpdateMode) { + if !p.IsSetUpdateMode() { + return THivePartitionUpdate_UpdateMode_DEFAULT + } + return *p.UpdateMode +} + +var THivePartitionUpdate_Location_DEFAULT *THiveLocationParams + +func (p *THivePartitionUpdate) GetLocation() (v *THiveLocationParams) { + if !p.IsSetLocation() { + return THivePartitionUpdate_Location_DEFAULT + } + return p.Location +} + +var THivePartitionUpdate_FileNames_DEFAULT []string + +func (p *THivePartitionUpdate) GetFileNames() (v []string) { + if !p.IsSetFileNames() { + return THivePartitionUpdate_FileNames_DEFAULT + } + return p.FileNames +} + +var THivePartitionUpdate_RowCount_DEFAULT int64 + +func (p *THivePartitionUpdate) GetRowCount() (v int64) { + if !p.IsSetRowCount() { + return THivePartitionUpdate_RowCount_DEFAULT + } + return *p.RowCount +} + +var THivePartitionUpdate_FileSize_DEFAULT int64 + +func (p *THivePartitionUpdate) GetFileSize() (v int64) { + if !p.IsSetFileSize() { + return THivePartitionUpdate_FileSize_DEFAULT + } + return *p.FileSize +} + +var THivePartitionUpdate_S3MpuPendingUploads_DEFAULT []*TS3MPUPendingUpload + +func (p *THivePartitionUpdate) GetS3MpuPendingUploads() (v []*TS3MPUPendingUpload) { + if !p.IsSetS3MpuPendingUploads() { + return THivePartitionUpdate_S3MpuPendingUploads_DEFAULT + } + return p.S3MpuPendingUploads +} +func (p *THivePartitionUpdate) SetName(val *string) { + p.Name = val +} +func (p *THivePartitionUpdate) SetUpdateMode(val *TUpdateMode) { + p.UpdateMode = val +} +func (p *THivePartitionUpdate) SetLocation(val *THiveLocationParams) { + p.Location = val +} +func (p *THivePartitionUpdate) SetFileNames(val []string) { + p.FileNames = val +} +func (p *THivePartitionUpdate) SetRowCount(val *int64) { + p.RowCount = val +} +func (p *THivePartitionUpdate) SetFileSize(val *int64) { + p.FileSize = val +} +func (p *THivePartitionUpdate) SetS3MpuPendingUploads(val []*TS3MPUPendingUpload) { + p.S3MpuPendingUploads = val +} + +var fieldIDToName_THivePartitionUpdate = map[int16]string{ + 1: "name", + 2: "update_mode", + 3: "location", + 4: "file_names", + 5: "row_count", + 6: "file_size", + 7: "s3_mpu_pending_uploads", +} + +func (p *THivePartitionUpdate) IsSetName() bool { + return p.Name != nil +} + +func (p *THivePartitionUpdate) IsSetUpdateMode() bool { + return p.UpdateMode != nil +} + +func (p *THivePartitionUpdate) IsSetLocation() bool { + return p.Location != nil +} + +func (p *THivePartitionUpdate) IsSetFileNames() bool { + return p.FileNames != nil +} + +func (p *THivePartitionUpdate) IsSetRowCount() bool { + return p.RowCount != nil +} + +func (p *THivePartitionUpdate) IsSetFileSize() bool { + return p.FileSize != nil +} + +func (p *THivePartitionUpdate) IsSetS3MpuPendingUploads() bool { + return p.S3MpuPendingUploads != nil +} + +func (p *THivePartitionUpdate) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THivePartitionUpdate[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THivePartitionUpdate) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *THivePartitionUpdate) ReadField2(iprot thrift.TProtocol) error { + + var _field *TUpdateMode + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TUpdateMode(v) + _field = &tmp + } + p.UpdateMode = _field + return nil +} +func (p *THivePartitionUpdate) ReadField3(iprot thrift.TProtocol) error { + _field := NewTHiveLocationParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Location = _field + return nil +} +func (p *THivePartitionUpdate) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FileNames = _field + return nil +} +func (p *THivePartitionUpdate) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RowCount = _field + return nil +} +func (p *THivePartitionUpdate) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FileSize = _field + return nil +} +func (p *THivePartitionUpdate) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TS3MPUPendingUpload, 0, size) + values := make([]TS3MPUPendingUpload, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.S3MpuPendingUploads = _field + return nil +} + +func (p *THivePartitionUpdate) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THivePartitionUpdate"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUpdateMode() { + if err = oprot.WriteFieldBegin("update_mode", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.UpdateMode)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLocation() { + if err = oprot.WriteFieldBegin("location", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.Location.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFileNames() { + if err = oprot.WriteFieldBegin("file_names", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.FileNames)); err != nil { + return err + } + for _, v := range p.FileNames { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRowCount() { + if err = oprot.WriteFieldBegin("row_count", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RowCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetFileSize() { + if err = oprot.WriteFieldBegin("file_size", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FileSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *THivePartitionUpdate) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetS3MpuPendingUploads() { + if err = oprot.WriteFieldBegin("s3_mpu_pending_uploads", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.S3MpuPendingUploads)); err != nil { + return err + } + for _, v := range p.S3MpuPendingUploads { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *THivePartitionUpdate) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THivePartitionUpdate(%+v)", *p) + +} + +func (p *THivePartitionUpdate) DeepEqual(ano *THivePartitionUpdate) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Name) { + return false + } + if !p.Field2DeepEqual(ano.UpdateMode) { + return false + } + if !p.Field3DeepEqual(ano.Location) { + return false + } + if !p.Field4DeepEqual(ano.FileNames) { + return false + } + if !p.Field5DeepEqual(ano.RowCount) { + return false + } + if !p.Field6DeepEqual(ano.FileSize) { + return false + } + if !p.Field7DeepEqual(ano.S3MpuPendingUploads) { + return false + } + return true +} + +func (p *THivePartitionUpdate) Field1DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *THivePartitionUpdate) Field2DeepEqual(src *TUpdateMode) bool { + + if p.UpdateMode == src { + return true + } else if p.UpdateMode == nil || src == nil { + return false + } + if *p.UpdateMode != *src { + return false + } + return true +} +func (p *THivePartitionUpdate) Field3DeepEqual(src *THiveLocationParams) bool { + + if !p.Location.DeepEqual(src) { + return false + } + return true +} +func (p *THivePartitionUpdate) Field4DeepEqual(src []string) bool { + + if len(p.FileNames) != len(src) { + return false + } + for i, v := range p.FileNames { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *THivePartitionUpdate) Field5DeepEqual(src *int64) bool { + + if p.RowCount == src { + return true + } else if p.RowCount == nil || src == nil { + return false + } + if *p.RowCount != *src { + return false + } + return true +} +func (p *THivePartitionUpdate) Field6DeepEqual(src *int64) bool { + + if p.FileSize == src { + return true + } else if p.FileSize == nil || src == nil { + return false + } + if *p.FileSize != *src { + return false + } + return true +} +func (p *THivePartitionUpdate) Field7DeepEqual(src []*TS3MPUPendingUpload) bool { + + if len(p.S3MpuPendingUploads) != len(src) { + return false + } + for i, v := range p.S3MpuPendingUploads { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TIcebergCommitData struct { + FilePath *string `thrift:"file_path,1,optional" frugal:"1,optional,string" json:"file_path,omitempty"` + RowCount *int64 `thrift:"row_count,2,optional" frugal:"2,optional,i64" json:"row_count,omitempty"` + FileSize *int64 `thrift:"file_size,3,optional" frugal:"3,optional,i64" json:"file_size,omitempty"` + FileContent *TFileContent `thrift:"file_content,4,optional" frugal:"4,optional,TFileContent" json:"file_content,omitempty"` + PartitionValues []string `thrift:"partition_values,5,optional" frugal:"5,optional,list" json:"partition_values,omitempty"` + ReferencedDataFiles []string `thrift:"referenced_data_files,6,optional" frugal:"6,optional,list" json:"referenced_data_files,omitempty"` +} + +func NewTIcebergCommitData() *TIcebergCommitData { + return &TIcebergCommitData{} +} + +func (p *TIcebergCommitData) InitDefault() { +} + +var TIcebergCommitData_FilePath_DEFAULT string + +func (p *TIcebergCommitData) GetFilePath() (v string) { + if !p.IsSetFilePath() { + return TIcebergCommitData_FilePath_DEFAULT + } + return *p.FilePath +} + +var TIcebergCommitData_RowCount_DEFAULT int64 + +func (p *TIcebergCommitData) GetRowCount() (v int64) { + if !p.IsSetRowCount() { + return TIcebergCommitData_RowCount_DEFAULT + } + return *p.RowCount +} + +var TIcebergCommitData_FileSize_DEFAULT int64 + +func (p *TIcebergCommitData) GetFileSize() (v int64) { + if !p.IsSetFileSize() { + return TIcebergCommitData_FileSize_DEFAULT + } + return *p.FileSize +} + +var TIcebergCommitData_FileContent_DEFAULT TFileContent + +func (p *TIcebergCommitData) GetFileContent() (v TFileContent) { + if !p.IsSetFileContent() { + return TIcebergCommitData_FileContent_DEFAULT + } + return *p.FileContent +} + +var TIcebergCommitData_PartitionValues_DEFAULT []string + +func (p *TIcebergCommitData) GetPartitionValues() (v []string) { + if !p.IsSetPartitionValues() { + return TIcebergCommitData_PartitionValues_DEFAULT + } + return p.PartitionValues +} + +var TIcebergCommitData_ReferencedDataFiles_DEFAULT []string + +func (p *TIcebergCommitData) GetReferencedDataFiles() (v []string) { + if !p.IsSetReferencedDataFiles() { + return TIcebergCommitData_ReferencedDataFiles_DEFAULT + } + return p.ReferencedDataFiles +} +func (p *TIcebergCommitData) SetFilePath(val *string) { + p.FilePath = val +} +func (p *TIcebergCommitData) SetRowCount(val *int64) { + p.RowCount = val +} +func (p *TIcebergCommitData) SetFileSize(val *int64) { + p.FileSize = val +} +func (p *TIcebergCommitData) SetFileContent(val *TFileContent) { + p.FileContent = val +} +func (p *TIcebergCommitData) SetPartitionValues(val []string) { + p.PartitionValues = val +} +func (p *TIcebergCommitData) SetReferencedDataFiles(val []string) { + p.ReferencedDataFiles = val +} + +var fieldIDToName_TIcebergCommitData = map[int16]string{ + 1: "file_path", + 2: "row_count", + 3: "file_size", + 4: "file_content", + 5: "partition_values", + 6: "referenced_data_files", +} + +func (p *TIcebergCommitData) IsSetFilePath() bool { + return p.FilePath != nil +} + +func (p *TIcebergCommitData) IsSetRowCount() bool { + return p.RowCount != nil +} + +func (p *TIcebergCommitData) IsSetFileSize() bool { + return p.FileSize != nil +} + +func (p *TIcebergCommitData) IsSetFileContent() bool { + return p.FileContent != nil +} + +func (p *TIcebergCommitData) IsSetPartitionValues() bool { + return p.PartitionValues != nil +} + +func (p *TIcebergCommitData) IsSetReferencedDataFiles() bool { + return p.ReferencedDataFiles != nil +} + +func (p *TIcebergCommitData) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.LIST { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergCommitData[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIcebergCommitData) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FilePath = _field + return nil +} +func (p *TIcebergCommitData) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RowCount = _field + return nil +} +func (p *TIcebergCommitData) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FileSize = _field + return nil +} +func (p *TIcebergCommitData) ReadField4(iprot thrift.TProtocol) error { + + var _field *TFileContent + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TFileContent(v) + _field = &tmp + } + p.FileContent = _field + return nil +} +func (p *TIcebergCommitData) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PartitionValues = _field + return nil +} +func (p *TIcebergCommitData) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ReferencedDataFiles = _field + return nil +} + +func (p *TIcebergCommitData) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TIcebergCommitData"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TIcebergCommitData) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFilePath() { + if err = oprot.WriteFieldBegin("file_path", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FilePath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TIcebergCommitData) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetRowCount() { + if err = oprot.WriteFieldBegin("row_count", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RowCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TIcebergCommitData) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFileSize() { + if err = oprot.WriteFieldBegin("file_size", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FileSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TIcebergCommitData) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFileContent() { + if err = oprot.WriteFieldBegin("file_content", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileContent)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TIcebergCommitData) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionValues() { + if err = oprot.WriteFieldBegin("partition_values", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.PartitionValues)); err != nil { + return err + } + for _, v := range p.PartitionValues { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TIcebergCommitData) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetReferencedDataFiles() { + if err = oprot.WriteFieldBegin("referenced_data_files", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ReferencedDataFiles)); err != nil { + return err + } + for _, v := range p.ReferencedDataFiles { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TIcebergCommitData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TIcebergCommitData(%+v)", *p) + +} + +func (p *TIcebergCommitData) DeepEqual(ano *TIcebergCommitData) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FilePath) { + return false + } + if !p.Field2DeepEqual(ano.RowCount) { + return false + } + if !p.Field3DeepEqual(ano.FileSize) { + return false + } + if !p.Field4DeepEqual(ano.FileContent) { + return false + } + if !p.Field5DeepEqual(ano.PartitionValues) { + return false + } + if !p.Field6DeepEqual(ano.ReferencedDataFiles) { + return false + } + return true +} + +func (p *TIcebergCommitData) Field1DeepEqual(src *string) bool { + + if p.FilePath == src { + return true + } else if p.FilePath == nil || src == nil { + return false + } + if strings.Compare(*p.FilePath, *src) != 0 { + return false + } + return true +} +func (p *TIcebergCommitData) Field2DeepEqual(src *int64) bool { + + if p.RowCount == src { + return true + } else if p.RowCount == nil || src == nil { + return false + } + if *p.RowCount != *src { + return false + } + return true +} +func (p *TIcebergCommitData) Field3DeepEqual(src *int64) bool { + + if p.FileSize == src { + return true + } else if p.FileSize == nil || src == nil { + return false + } + if *p.FileSize != *src { + return false + } + return true +} +func (p *TIcebergCommitData) Field4DeepEqual(src *TFileContent) bool { + + if p.FileContent == src { + return true + } else if p.FileContent == nil || src == nil { + return false + } + if *p.FileContent != *src { + return false + } + return true +} +func (p *TIcebergCommitData) Field5DeepEqual(src []string) bool { + + if len(p.PartitionValues) != len(src) { + return false + } + for i, v := range p.PartitionValues { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TIcebergCommitData) Field6DeepEqual(src []string) bool { + + if len(p.ReferencedDataFiles) != len(src) { + return false + } + for i, v := range p.ReferencedDataFiles { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} + +type TSortField struct { + SourceColumnId *int32 `thrift:"source_column_id,1,optional" frugal:"1,optional,i32" json:"source_column_id,omitempty"` + Ascending *bool `thrift:"ascending,2,optional" frugal:"2,optional,bool" json:"ascending,omitempty"` + NullFirst *bool `thrift:"null_first,3,optional" frugal:"3,optional,bool" json:"null_first,omitempty"` +} + +func NewTSortField() *TSortField { + return &TSortField{} +} + +func (p *TSortField) InitDefault() { +} + +var TSortField_SourceColumnId_DEFAULT int32 + +func (p *TSortField) GetSourceColumnId() (v int32) { + if !p.IsSetSourceColumnId() { + return TSortField_SourceColumnId_DEFAULT + } + return *p.SourceColumnId +} + +var TSortField_Ascending_DEFAULT bool + +func (p *TSortField) GetAscending() (v bool) { + if !p.IsSetAscending() { + return TSortField_Ascending_DEFAULT + } + return *p.Ascending +} + +var TSortField_NullFirst_DEFAULT bool + +func (p *TSortField) GetNullFirst() (v bool) { + if !p.IsSetNullFirst() { + return TSortField_NullFirst_DEFAULT + } + return *p.NullFirst +} +func (p *TSortField) SetSourceColumnId(val *int32) { + p.SourceColumnId = val +} +func (p *TSortField) SetAscending(val *bool) { + p.Ascending = val +} +func (p *TSortField) SetNullFirst(val *bool) { + p.NullFirst = val +} + +var fieldIDToName_TSortField = map[int16]string{ + 1: "source_column_id", + 2: "ascending", + 3: "null_first", +} + +func (p *TSortField) IsSetSourceColumnId() bool { + return p.SourceColumnId != nil +} + +func (p *TSortField) IsSetAscending() bool { + return p.Ascending != nil +} + +func (p *TSortField) IsSetNullFirst() bool { + return p.NullFirst != nil +} + +func (p *TSortField) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortField[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSortField) ReadField1(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SourceColumnId = _field + return nil +} +func (p *TSortField) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Ascending = _field + return nil +} +func (p *TSortField) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.NullFirst = _field + return nil +} + +func (p *TSortField) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSortField"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSortField) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSourceColumnId() { + if err = oprot.WriteFieldBegin("source_column_id", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SourceColumnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSortField) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetAscending() { + if err = oprot.WriteFieldBegin("ascending", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Ascending); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TSortField) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetNullFirst() { + if err = oprot.WriteFieldBegin("null_first", thrift.BOOL, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.NullFirst); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TSortField) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSortField(%+v)", *p) + +} + +func (p *TSortField) DeepEqual(ano *TSortField) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SourceColumnId) { + return false + } + if !p.Field2DeepEqual(ano.Ascending) { + return false + } + if !p.Field3DeepEqual(ano.NullFirst) { + return false + } + return true +} + +func (p *TSortField) Field1DeepEqual(src *int32) bool { + + if p.SourceColumnId == src { + return true + } else if p.SourceColumnId == nil || src == nil { + return false + } + if *p.SourceColumnId != *src { + return false + } + return true +} +func (p *TSortField) Field2DeepEqual(src *bool) bool { + + if p.Ascending == src { + return true + } else if p.Ascending == nil || src == nil { + return false + } + if *p.Ascending != *src { + return false + } + return true +} +func (p *TSortField) Field3DeepEqual(src *bool) bool { + + if p.NullFirst == src { + return true + } else if p.NullFirst == nil || src == nil { + return false + } + if *p.NullFirst != *src { + return false + } + return true +} + +type TIcebergTableSink struct { + DbName *string `thrift:"db_name,1,optional" frugal:"1,optional,string" json:"db_name,omitempty"` + TbName *string `thrift:"tb_name,2,optional" frugal:"2,optional,string" json:"tb_name,omitempty"` + SchemaJson *string `thrift:"schema_json,3,optional" frugal:"3,optional,string" json:"schema_json,omitempty"` + PartitionSpecsJson map[int32]string `thrift:"partition_specs_json,4,optional" frugal:"4,optional,map" json:"partition_specs_json,omitempty"` + PartitionSpecId *int32 `thrift:"partition_spec_id,5,optional" frugal:"5,optional,i32" json:"partition_spec_id,omitempty"` + SortFields []*TSortField `thrift:"sort_fields,6,optional" frugal:"6,optional,list" json:"sort_fields,omitempty"` + FileFormat *plannodes.TFileFormatType `thrift:"file_format,7,optional" frugal:"7,optional,TFileFormatType" json:"file_format,omitempty"` + OutputPath *string `thrift:"output_path,8,optional" frugal:"8,optional,string" json:"output_path,omitempty"` + HadoopConfig map[string]string `thrift:"hadoop_config,9,optional" frugal:"9,optional,map" json:"hadoop_config,omitempty"` + Overwrite *bool `thrift:"overwrite,10,optional" frugal:"10,optional,bool" json:"overwrite,omitempty"` + FileType *types.TFileType `thrift:"file_type,11,optional" frugal:"11,optional,TFileType" json:"file_type,omitempty"` + OriginalOutputPath *string `thrift:"original_output_path,12,optional" frugal:"12,optional,string" json:"original_output_path,omitempty"` + CompressionType *plannodes.TFileCompressType `thrift:"compression_type,13,optional" frugal:"13,optional,TFileCompressType" json:"compression_type,omitempty"` +} + +func NewTIcebergTableSink() *TIcebergTableSink { + return &TIcebergTableSink{} +} + +func (p *TIcebergTableSink) InitDefault() { +} + +var TIcebergTableSink_DbName_DEFAULT string + +func (p *TIcebergTableSink) GetDbName() (v string) { + if !p.IsSetDbName() { + return TIcebergTableSink_DbName_DEFAULT + } + return *p.DbName +} + +var TIcebergTableSink_TbName_DEFAULT string + +func (p *TIcebergTableSink) GetTbName() (v string) { + if !p.IsSetTbName() { + return TIcebergTableSink_TbName_DEFAULT + } + return *p.TbName +} + +var TIcebergTableSink_SchemaJson_DEFAULT string + +func (p *TIcebergTableSink) GetSchemaJson() (v string) { + if !p.IsSetSchemaJson() { + return TIcebergTableSink_SchemaJson_DEFAULT + } + return *p.SchemaJson +} + +var TIcebergTableSink_PartitionSpecsJson_DEFAULT map[int32]string + +func (p *TIcebergTableSink) GetPartitionSpecsJson() (v map[int32]string) { + if !p.IsSetPartitionSpecsJson() { + return TIcebergTableSink_PartitionSpecsJson_DEFAULT + } + return p.PartitionSpecsJson +} + +var TIcebergTableSink_PartitionSpecId_DEFAULT int32 + +func (p *TIcebergTableSink) GetPartitionSpecId() (v int32) { + if !p.IsSetPartitionSpecId() { + return TIcebergTableSink_PartitionSpecId_DEFAULT + } + return *p.PartitionSpecId +} + +var TIcebergTableSink_SortFields_DEFAULT []*TSortField + +func (p *TIcebergTableSink) GetSortFields() (v []*TSortField) { + if !p.IsSetSortFields() { + return TIcebergTableSink_SortFields_DEFAULT + } + return p.SortFields +} + +var TIcebergTableSink_FileFormat_DEFAULT plannodes.TFileFormatType + +func (p *TIcebergTableSink) GetFileFormat() (v plannodes.TFileFormatType) { + if !p.IsSetFileFormat() { + return TIcebergTableSink_FileFormat_DEFAULT + } + return *p.FileFormat +} + +var TIcebergTableSink_OutputPath_DEFAULT string + +func (p *TIcebergTableSink) GetOutputPath() (v string) { + if !p.IsSetOutputPath() { + return TIcebergTableSink_OutputPath_DEFAULT + } + return *p.OutputPath +} + +var TIcebergTableSink_HadoopConfig_DEFAULT map[string]string + +func (p *TIcebergTableSink) GetHadoopConfig() (v map[string]string) { + if !p.IsSetHadoopConfig() { + return TIcebergTableSink_HadoopConfig_DEFAULT + } + return p.HadoopConfig +} + +var TIcebergTableSink_Overwrite_DEFAULT bool + +func (p *TIcebergTableSink) GetOverwrite() (v bool) { + if !p.IsSetOverwrite() { + return TIcebergTableSink_Overwrite_DEFAULT + } + return *p.Overwrite +} + +var TIcebergTableSink_FileType_DEFAULT types.TFileType + +func (p *TIcebergTableSink) GetFileType() (v types.TFileType) { + if !p.IsSetFileType() { + return TIcebergTableSink_FileType_DEFAULT + } + return *p.FileType +} + +var TIcebergTableSink_OriginalOutputPath_DEFAULT string + +func (p *TIcebergTableSink) GetOriginalOutputPath() (v string) { + if !p.IsSetOriginalOutputPath() { + return TIcebergTableSink_OriginalOutputPath_DEFAULT + } + return *p.OriginalOutputPath +} + +var TIcebergTableSink_CompressionType_DEFAULT plannodes.TFileCompressType + +func (p *TIcebergTableSink) GetCompressionType() (v plannodes.TFileCompressType) { + if !p.IsSetCompressionType() { + return TIcebergTableSink_CompressionType_DEFAULT + } + return *p.CompressionType +} +func (p *TIcebergTableSink) SetDbName(val *string) { + p.DbName = val +} +func (p *TIcebergTableSink) SetTbName(val *string) { + p.TbName = val +} +func (p *TIcebergTableSink) SetSchemaJson(val *string) { + p.SchemaJson = val +} +func (p *TIcebergTableSink) SetPartitionSpecsJson(val map[int32]string) { + p.PartitionSpecsJson = val +} +func (p *TIcebergTableSink) SetPartitionSpecId(val *int32) { + p.PartitionSpecId = val +} +func (p *TIcebergTableSink) SetSortFields(val []*TSortField) { + p.SortFields = val +} +func (p *TIcebergTableSink) SetFileFormat(val *plannodes.TFileFormatType) { + p.FileFormat = val +} +func (p *TIcebergTableSink) SetOutputPath(val *string) { + p.OutputPath = val +} +func (p *TIcebergTableSink) SetHadoopConfig(val map[string]string) { + p.HadoopConfig = val +} +func (p *TIcebergTableSink) SetOverwrite(val *bool) { + p.Overwrite = val +} +func (p *TIcebergTableSink) SetFileType(val *types.TFileType) { + p.FileType = val +} +func (p *TIcebergTableSink) SetOriginalOutputPath(val *string) { + p.OriginalOutputPath = val +} +func (p *TIcebergTableSink) SetCompressionType(val *plannodes.TFileCompressType) { + p.CompressionType = val +} + +var fieldIDToName_TIcebergTableSink = map[int16]string{ + 1: "db_name", + 2: "tb_name", + 3: "schema_json", + 4: "partition_specs_json", + 5: "partition_spec_id", + 6: "sort_fields", + 7: "file_format", + 8: "output_path", + 9: "hadoop_config", + 10: "overwrite", + 11: "file_type", + 12: "original_output_path", + 13: "compression_type", +} + +func (p *TIcebergTableSink) IsSetDbName() bool { + return p.DbName != nil +} + +func (p *TIcebergTableSink) IsSetTbName() bool { + return p.TbName != nil +} + +func (p *TIcebergTableSink) IsSetSchemaJson() bool { + return p.SchemaJson != nil +} + +func (p *TIcebergTableSink) IsSetPartitionSpecsJson() bool { + return p.PartitionSpecsJson != nil +} + +func (p *TIcebergTableSink) IsSetPartitionSpecId() bool { + return p.PartitionSpecId != nil +} + +func (p *TIcebergTableSink) IsSetSortFields() bool { + return p.SortFields != nil +} + +func (p *TIcebergTableSink) IsSetFileFormat() bool { + return p.FileFormat != nil +} + +func (p *TIcebergTableSink) IsSetOutputPath() bool { + return p.OutputPath != nil +} + +func (p *TIcebergTableSink) IsSetHadoopConfig() bool { + return p.HadoopConfig != nil +} + +func (p *TIcebergTableSink) IsSetOverwrite() bool { + return p.Overwrite != nil +} + +func (p *TIcebergTableSink) IsSetFileType() bool { + return p.FileType != nil +} + +func (p *TIcebergTableSink) IsSetOriginalOutputPath() bool { + return p.OriginalOutputPath != nil +} + +func (p *TIcebergTableSink) IsSetCompressionType() bool { + return p.CompressionType != nil +} + +func (p *TIcebergTableSink) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I32 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.MAP { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I32 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I32 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergTableSink[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIcebergTableSink) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DbName = _field + return nil +} +func (p *TIcebergTableSink) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TbName = _field + return nil +} +func (p *TIcebergTableSink) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SchemaJson = _field + return nil +} +func (p *TIcebergTableSink) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]string, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PartitionSpecsJson = _field + return nil +} +func (p *TIcebergTableSink) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.PartitionSpecId = _field + return nil +} +func (p *TIcebergTableSink) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TSortField, 0, size) + values := make([]TSortField, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SortFields = _field + return nil +} +func (p *TIcebergTableSink) ReadField7(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileFormatType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileFormatType(v) + _field = &tmp + } + p.FileFormat = _field + return nil +} +func (p *TIcebergTableSink) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OutputPath = _field + return nil +} +func (p *TIcebergTableSink) ReadField9(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.HadoopConfig = _field + return nil +} +func (p *TIcebergTableSink) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Overwrite = _field + return nil +} +func (p *TIcebergTableSink) ReadField11(iprot thrift.TProtocol) error { + + var _field *types.TFileType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TFileType(v) + _field = &tmp + } + p.FileType = _field + return nil +} +func (p *TIcebergTableSink) ReadField12(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OriginalOutputPath = _field + return nil +} +func (p *TIcebergTableSink) ReadField13(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileCompressType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileCompressType(v) + _field = &tmp + } + p.CompressionType = _field + return nil +} + +func (p *TIcebergTableSink) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TIcebergTableSink"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTbName() { + if err = oprot.WriteFieldBegin("tb_name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaJson() { + if err = oprot.WriteFieldBegin("schema_json", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SchemaJson); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionSpecsJson() { + if err = oprot.WriteFieldBegin("partition_specs_json", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRING, len(p.PartitionSpecsJson)); err != nil { + return err + } + for k, v := range p.PartitionSpecsJson { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionSpecId() { + if err = oprot.WriteFieldBegin("partition_spec_id", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.PartitionSpecId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetSortFields() { + if err = oprot.WriteFieldBegin("sort_fields", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortFields)); err != nil { + return err + } + for _, v := range p.SortFields { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetFileFormat() { + if err = oprot.WriteFieldBegin("file_format", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileFormat)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetOutputPath() { + if err = oprot.WriteFieldBegin("output_path", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OutputPath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetHadoopConfig() { + if err = oprot.WriteFieldBegin("hadoop_config", thrift.MAP, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.HadoopConfig)); err != nil { + return err + } + for k, v := range p.HadoopConfig { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetOverwrite() { + if err = oprot.WriteFieldBegin("overwrite", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Overwrite); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetFileType() { + if err = oprot.WriteFieldBegin("file_type", thrift.I32, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetOriginalOutputPath() { + if err = oprot.WriteFieldBegin("original_output_path", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OriginalOutputPath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TIcebergTableSink) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressionType() { + if err = oprot.WriteFieldBegin("compression_type", thrift.I32, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.CompressionType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TIcebergTableSink) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TIcebergTableSink(%+v)", *p) + +} + +func (p *TIcebergTableSink) DeepEqual(ano *TIcebergTableSink) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DbName) { + return false + } + if !p.Field2DeepEqual(ano.TbName) { + return false + } + if !p.Field3DeepEqual(ano.SchemaJson) { + return false + } + if !p.Field4DeepEqual(ano.PartitionSpecsJson) { + return false + } + if !p.Field5DeepEqual(ano.PartitionSpecId) { + return false + } + if !p.Field6DeepEqual(ano.SortFields) { + return false + } + if !p.Field7DeepEqual(ano.FileFormat) { + return false + } + if !p.Field8DeepEqual(ano.OutputPath) { + return false + } + if !p.Field9DeepEqual(ano.HadoopConfig) { + return false + } + if !p.Field10DeepEqual(ano.Overwrite) { + return false + } + if !p.Field11DeepEqual(ano.FileType) { + return false + } + if !p.Field12DeepEqual(ano.OriginalOutputPath) { + return false + } + if !p.Field13DeepEqual(ano.CompressionType) { + return false + } + return true +} + +func (p *TIcebergTableSink) Field1DeepEqual(src *string) bool { + + if p.DbName == src { + return true + } else if p.DbName == nil || src == nil { + return false + } + if strings.Compare(*p.DbName, *src) != 0 { + return false + } + return true +} +func (p *TIcebergTableSink) Field2DeepEqual(src *string) bool { + + if p.TbName == src { + return true + } else if p.TbName == nil || src == nil { + return false + } + if strings.Compare(*p.TbName, *src) != 0 { + return false + } + return true +} +func (p *TIcebergTableSink) Field3DeepEqual(src *string) bool { + + if p.SchemaJson == src { + return true + } else if p.SchemaJson == nil || src == nil { + return false + } + if strings.Compare(*p.SchemaJson, *src) != 0 { + return false + } + return true +} +func (p *TIcebergTableSink) Field4DeepEqual(src map[int32]string) bool { + + if len(p.PartitionSpecsJson) != len(src) { + return false + } + for k, v := range p.PartitionSpecsJson { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TIcebergTableSink) Field5DeepEqual(src *int32) bool { + + if p.PartitionSpecId == src { + return true + } else if p.PartitionSpecId == nil || src == nil { + return false + } + if *p.PartitionSpecId != *src { + return false + } + return true +} +func (p *TIcebergTableSink) Field6DeepEqual(src []*TSortField) bool { + + if len(p.SortFields) != len(src) { + return false + } + for i, v := range p.SortFields { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TIcebergTableSink) Field7DeepEqual(src *plannodes.TFileFormatType) bool { + + if p.FileFormat == src { + return true + } else if p.FileFormat == nil || src == nil { + return false + } + if *p.FileFormat != *src { + return false + } + return true +} +func (p *TIcebergTableSink) Field8DeepEqual(src *string) bool { + + if p.OutputPath == src { + return true + } else if p.OutputPath == nil || src == nil { + return false + } + if strings.Compare(*p.OutputPath, *src) != 0 { + return false + } + return true +} +func (p *TIcebergTableSink) Field9DeepEqual(src map[string]string) bool { + + if len(p.HadoopConfig) != len(src) { + return false + } + for k, v := range p.HadoopConfig { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TIcebergTableSink) Field10DeepEqual(src *bool) bool { + + if p.Overwrite == src { + return true + } else if p.Overwrite == nil || src == nil { + return false + } + if *p.Overwrite != *src { + return false + } + return true +} +func (p *TIcebergTableSink) Field11DeepEqual(src *types.TFileType) bool { + + if p.FileType == src { + return true + } else if p.FileType == nil || src == nil { + return false + } + if *p.FileType != *src { + return false + } + return true +} +func (p *TIcebergTableSink) Field12DeepEqual(src *string) bool { + + if p.OriginalOutputPath == src { + return true + } else if p.OriginalOutputPath == nil || src == nil { + return false + } + if strings.Compare(*p.OriginalOutputPath, *src) != 0 { + return false + } + return true +} +func (p *TIcebergTableSink) Field13DeepEqual(src *plannodes.TFileCompressType) bool { + + if p.CompressionType == src { + return true + } else if p.CompressionType == nil || src == nil { + return false + } + if *p.CompressionType != *src { + return false + } + return true +} + +type TDataSink struct { + Type TDataSinkType `thrift:"type,1,required" frugal:"1,required,TDataSinkType" json:"type"` + StreamSink *TDataStreamSink `thrift:"stream_sink,2,optional" frugal:"2,optional,TDataStreamSink" json:"stream_sink,omitempty"` + ResultSink *TResultSink `thrift:"result_sink,3,optional" frugal:"3,optional,TResultSink" json:"result_sink,omitempty"` + MysqlTableSink *TMysqlTableSink `thrift:"mysql_table_sink,5,optional" frugal:"5,optional,TMysqlTableSink" json:"mysql_table_sink,omitempty"` + ExportSink *TExportSink `thrift:"export_sink,6,optional" frugal:"6,optional,TExportSink" json:"export_sink,omitempty"` + OlapTableSink *TOlapTableSink `thrift:"olap_table_sink,7,optional" frugal:"7,optional,TOlapTableSink" json:"olap_table_sink,omitempty"` + MemoryScratchSink *TMemoryScratchSink `thrift:"memory_scratch_sink,8,optional" frugal:"8,optional,TMemoryScratchSink" json:"memory_scratch_sink,omitempty"` + OdbcTableSink *TOdbcTableSink `thrift:"odbc_table_sink,9,optional" frugal:"9,optional,TOdbcTableSink" json:"odbc_table_sink,omitempty"` + ResultFileSink *TResultFileSink `thrift:"result_file_sink,10,optional" frugal:"10,optional,TResultFileSink" json:"result_file_sink,omitempty"` + JdbcTableSink *TJdbcTableSink `thrift:"jdbc_table_sink,11,optional" frugal:"11,optional,TJdbcTableSink" json:"jdbc_table_sink,omitempty"` + MultiCastStreamSink *TMultiCastDataStreamSink `thrift:"multi_cast_stream_sink,12,optional" frugal:"12,optional,TMultiCastDataStreamSink" json:"multi_cast_stream_sink,omitempty"` + HiveTableSink *THiveTableSink `thrift:"hive_table_sink,13,optional" frugal:"13,optional,THiveTableSink" json:"hive_table_sink,omitempty"` + IcebergTableSink *TIcebergTableSink `thrift:"iceberg_table_sink,14,optional" frugal:"14,optional,TIcebergTableSink" json:"iceberg_table_sink,omitempty"` +} + +func NewTDataSink() *TDataSink { + return &TDataSink{} +} + +func (p *TDataSink) InitDefault() { +} + +func (p *TDataSink) GetType() (v TDataSinkType) { + return p.Type +} + +var TDataSink_StreamSink_DEFAULT *TDataStreamSink + +func (p *TDataSink) GetStreamSink() (v *TDataStreamSink) { + if !p.IsSetStreamSink() { + return TDataSink_StreamSink_DEFAULT + } + return p.StreamSink +} + +var TDataSink_ResultSink_DEFAULT *TResultSink + +func (p *TDataSink) GetResultSink() (v *TResultSink) { + if !p.IsSetResultSink() { + return TDataSink_ResultSink_DEFAULT + } + return p.ResultSink +} + +var TDataSink_MysqlTableSink_DEFAULT *TMysqlTableSink + +func (p *TDataSink) GetMysqlTableSink() (v *TMysqlTableSink) { + if !p.IsSetMysqlTableSink() { + return TDataSink_MysqlTableSink_DEFAULT + } + return p.MysqlTableSink +} + +var TDataSink_ExportSink_DEFAULT *TExportSink + +func (p *TDataSink) GetExportSink() (v *TExportSink) { + if !p.IsSetExportSink() { + return TDataSink_ExportSink_DEFAULT + } + return p.ExportSink +} + +var TDataSink_OlapTableSink_DEFAULT *TOlapTableSink + +func (p *TDataSink) GetOlapTableSink() (v *TOlapTableSink) { + if !p.IsSetOlapTableSink() { + return TDataSink_OlapTableSink_DEFAULT + } + return p.OlapTableSink +} + +var TDataSink_MemoryScratchSink_DEFAULT *TMemoryScratchSink + +func (p *TDataSink) GetMemoryScratchSink() (v *TMemoryScratchSink) { + if !p.IsSetMemoryScratchSink() { + return TDataSink_MemoryScratchSink_DEFAULT + } + return p.MemoryScratchSink +} + +var TDataSink_OdbcTableSink_DEFAULT *TOdbcTableSink + +func (p *TDataSink) GetOdbcTableSink() (v *TOdbcTableSink) { + if !p.IsSetOdbcTableSink() { + return TDataSink_OdbcTableSink_DEFAULT + } + return p.OdbcTableSink +} + +var TDataSink_ResultFileSink_DEFAULT *TResultFileSink + +func (p *TDataSink) GetResultFileSink() (v *TResultFileSink) { + if !p.IsSetResultFileSink() { + return TDataSink_ResultFileSink_DEFAULT + } + return p.ResultFileSink +} + +var TDataSink_JdbcTableSink_DEFAULT *TJdbcTableSink + +func (p *TDataSink) GetJdbcTableSink() (v *TJdbcTableSink) { + if !p.IsSetJdbcTableSink() { + return TDataSink_JdbcTableSink_DEFAULT + } + return p.JdbcTableSink +} + +var TDataSink_MultiCastStreamSink_DEFAULT *TMultiCastDataStreamSink + +func (p *TDataSink) GetMultiCastStreamSink() (v *TMultiCastDataStreamSink) { + if !p.IsSetMultiCastStreamSink() { + return TDataSink_MultiCastStreamSink_DEFAULT + } + return p.MultiCastStreamSink +} + +var TDataSink_HiveTableSink_DEFAULT *THiveTableSink + +func (p *TDataSink) GetHiveTableSink() (v *THiveTableSink) { + if !p.IsSetHiveTableSink() { + return TDataSink_HiveTableSink_DEFAULT + } + return p.HiveTableSink +} + +var TDataSink_IcebergTableSink_DEFAULT *TIcebergTableSink + +func (p *TDataSink) GetIcebergTableSink() (v *TIcebergTableSink) { + if !p.IsSetIcebergTableSink() { + return TDataSink_IcebergTableSink_DEFAULT + } + return p.IcebergTableSink +} func (p *TDataSink) SetType(val TDataSinkType) { p.Type = val } @@ -8771,6 +16512,12 @@ func (p *TDataSink) SetJdbcTableSink(val *TJdbcTableSink) { func (p *TDataSink) SetMultiCastStreamSink(val *TMultiCastDataStreamSink) { p.MultiCastStreamSink = val } +func (p *TDataSink) SetHiveTableSink(val *THiveTableSink) { + p.HiveTableSink = val +} +func (p *TDataSink) SetIcebergTableSink(val *TIcebergTableSink) { + p.IcebergTableSink = val +} var fieldIDToName_TDataSink = map[int16]string{ 1: "type", @@ -8784,6 +16531,8 @@ var fieldIDToName_TDataSink = map[int16]string{ 10: "result_file_sink", 11: "jdbc_table_sink", 12: "multi_cast_stream_sink", + 13: "hive_table_sink", + 14: "iceberg_table_sink", } func (p *TDataSink) IsSetStreamSink() bool { @@ -8826,6 +16575,14 @@ func (p *TDataSink) IsSetMultiCastStreamSink() bool { return p.MultiCastStreamSink != nil } +func (p *TDataSink) IsSetHiveTableSink() bool { + return p.HiveTableSink != nil +} + +func (p *TDataSink) IsSetIcebergTableSink() bool { + return p.IcebergTableSink != nil +} + func (p *TDataSink) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -8852,117 +16609,110 @@ func (p *TDataSink) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8994,91 +16744,110 @@ RequiredFieldNotSetError: } func (p *TDataSink) ReadField1(iprot thrift.TProtocol) error { + + var _field TDataSinkType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TDataSinkType(v) + _field = TDataSinkType(v) } + p.Type = _field return nil } - func (p *TDataSink) ReadField2(iprot thrift.TProtocol) error { - p.StreamSink = NewTDataStreamSink() - if err := p.StreamSink.Read(iprot); err != nil { + _field := NewTDataStreamSink() + if err := _field.Read(iprot); err != nil { return err } + p.StreamSink = _field return nil } - func (p *TDataSink) ReadField3(iprot thrift.TProtocol) error { - p.ResultSink = NewTResultSink() - if err := p.ResultSink.Read(iprot); err != nil { + _field := NewTResultSink() + if err := _field.Read(iprot); err != nil { return err } + p.ResultSink = _field return nil } - func (p *TDataSink) ReadField5(iprot thrift.TProtocol) error { - p.MysqlTableSink = NewTMysqlTableSink() - if err := p.MysqlTableSink.Read(iprot); err != nil { + _field := NewTMysqlTableSink() + if err := _field.Read(iprot); err != nil { return err } + p.MysqlTableSink = _field return nil } - func (p *TDataSink) ReadField6(iprot thrift.TProtocol) error { - p.ExportSink = NewTExportSink() - if err := p.ExportSink.Read(iprot); err != nil { + _field := NewTExportSink() + if err := _field.Read(iprot); err != nil { return err } + p.ExportSink = _field return nil } - func (p *TDataSink) ReadField7(iprot thrift.TProtocol) error { - p.OlapTableSink = NewTOlapTableSink() - if err := p.OlapTableSink.Read(iprot); err != nil { + _field := NewTOlapTableSink() + if err := _field.Read(iprot); err != nil { return err } + p.OlapTableSink = _field return nil } - func (p *TDataSink) ReadField8(iprot thrift.TProtocol) error { - p.MemoryScratchSink = NewTMemoryScratchSink() - if err := p.MemoryScratchSink.Read(iprot); err != nil { + _field := NewTMemoryScratchSink() + if err := _field.Read(iprot); err != nil { return err } + p.MemoryScratchSink = _field return nil } - func (p *TDataSink) ReadField9(iprot thrift.TProtocol) error { - p.OdbcTableSink = NewTOdbcTableSink() - if err := p.OdbcTableSink.Read(iprot); err != nil { + _field := NewTOdbcTableSink() + if err := _field.Read(iprot); err != nil { return err } + p.OdbcTableSink = _field return nil } - func (p *TDataSink) ReadField10(iprot thrift.TProtocol) error { - p.ResultFileSink = NewTResultFileSink() - if err := p.ResultFileSink.Read(iprot); err != nil { + _field := NewTResultFileSink() + if err := _field.Read(iprot); err != nil { return err } + p.ResultFileSink = _field return nil } - func (p *TDataSink) ReadField11(iprot thrift.TProtocol) error { - p.JdbcTableSink = NewTJdbcTableSink() - if err := p.JdbcTableSink.Read(iprot); err != nil { + _field := NewTJdbcTableSink() + if err := _field.Read(iprot); err != nil { return err } + p.JdbcTableSink = _field return nil } - func (p *TDataSink) ReadField12(iprot thrift.TProtocol) error { - p.MultiCastStreamSink = NewTMultiCastDataStreamSink() - if err := p.MultiCastStreamSink.Read(iprot); err != nil { + _field := NewTMultiCastDataStreamSink() + if err := _field.Read(iprot); err != nil { + return err + } + p.MultiCastStreamSink = _field + return nil +} +func (p *TDataSink) ReadField13(iprot thrift.TProtocol) error { + _field := NewTHiveTableSink() + if err := _field.Read(iprot); err != nil { + return err + } + p.HiveTableSink = _field + return nil +} +func (p *TDataSink) ReadField14(iprot thrift.TProtocol) error { + _field := NewTIcebergTableSink() + if err := _field.Read(iprot); err != nil { return err } + p.IcebergTableSink = _field return nil } @@ -9132,7 +16901,14 @@ func (p *TDataSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } - + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9358,11 +17134,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TDataSink) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetHiveTableSink() { + if err = oprot.WriteFieldBegin("hive_table_sink", thrift.STRUCT, 13); err != nil { + goto WriteFieldBeginError + } + if err := p.HiveTableSink.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TDataSink) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetIcebergTableSink() { + if err = oprot.WriteFieldBegin("iceberg_table_sink", thrift.STRUCT, 14); err != nil { + goto WriteFieldBeginError + } + if err := p.IcebergTableSink.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + func (p *TDataSink) String() string { if p == nil { return "" } return fmt.Sprintf("TDataSink(%+v)", *p) + } func (p *TDataSink) DeepEqual(ano *TDataSink) bool { @@ -9404,6 +17219,12 @@ func (p *TDataSink) DeepEqual(ano *TDataSink) bool { if !p.Field12DeepEqual(ano.MultiCastStreamSink) { return false } + if !p.Field13DeepEqual(ano.HiveTableSink) { + return false + } + if !p.Field14DeepEqual(ano.IcebergTableSink) { + return false + } return true } @@ -9484,3 +17305,17 @@ func (p *TDataSink) Field12DeepEqual(src *TMultiCastDataStreamSink) bool { } return true } +func (p *TDataSink) Field13DeepEqual(src *THiveTableSink) bool { + + if !p.HiveTableSink.DeepEqual(src) { + return false + } + return true +} +func (p *TDataSink) Field14DeepEqual(src *TIcebergTableSink) bool { + + if !p.IcebergTableSink.DeepEqual(src) { + return false + } + return true +} diff --git a/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go b/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go index c519a27f..8f4a005f 100644 --- a/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go +++ b/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package datasinks @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/partitions" @@ -589,6 +590,48 @@ func (p *TResultFileSinkOptions) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 18: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -976,6 +1019,47 @@ func (p *TResultFileSinkOptions) FastReadField17(buf []byte) (int, error) { return offset, nil } +func (p *TResultFileSinkOptions) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.WithBom = &v + + } + return offset, nil +} + +func (p *TResultFileSinkOptions) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileCompressType(v) + p.OrcCompressionType = &tmp + + } + return offset, nil +} + +func (p *TResultFileSinkOptions) FastReadField20(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OrcWriterVersion = &v + + } + return offset, nil +} + // for compatibility func (p *TResultFileSinkOptions) FastWrite(buf []byte) int { return 0 @@ -988,6 +1072,8 @@ func (p *TResultFileSinkOptions) FastWriteNocopy(buf []byte, binaryWriter bthrif offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -1002,6 +1088,7 @@ func (p *TResultFileSinkOptions) FastWriteNocopy(buf []byte, binaryWriter bthrif offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1029,6 +1116,9 @@ func (p *TResultFileSinkOptions) BLength() int { l += p.field15Length() l += p.field16Length() l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1270,6 +1360,39 @@ func (p *TResultFileSinkOptions) fastWriteField17(buf []byte, binaryWriter bthri return offset } +func (p *TResultFileSinkOptions) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWithBom() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "with_bom", thrift.BOOL, 18) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.WithBom) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TResultFileSinkOptions) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOrcCompressionType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "orc_compression_type", thrift.I32, 19) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.OrcCompressionType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TResultFileSinkOptions) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOrcWriterVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "orc_writer_version", thrift.I64, 20) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.OrcWriterVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TResultFileSinkOptions) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("file_path", thrift.STRING, 1) @@ -1481,6 +1604,39 @@ func (p *TResultFileSinkOptions) field17Length() int { return l } +func (p *TResultFileSinkOptions) field18Length() int { + l := 0 + if p.IsSetWithBom() { + l += bthrift.Binary.FieldBeginLength("with_bom", thrift.BOOL, 18) + l += bthrift.Binary.BoolLength(*p.WithBom) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TResultFileSinkOptions) field19Length() int { + l := 0 + if p.IsSetOrcCompressionType() { + l += bthrift.Binary.FieldBeginLength("orc_compression_type", thrift.I32, 19) + l += bthrift.Binary.I32Length(int32(*p.OrcCompressionType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TResultFileSinkOptions) field20Length() int { + l := 0 + if p.IsSetOrcWriterVersion() { + l += bthrift.Binary.FieldBeginLength("orc_writer_version", thrift.I64, 20) + l += bthrift.Binary.I64Length(*p.OrcWriterVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMemoryScratchSink) FastRead(buf []byte) (int, error) { var err error var offset int @@ -1505,7 +1661,7 @@ func (p *TMemoryScratchSink) FastRead(buf []byte) (int, error) { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l if err != nil { - goto SkipFieldTypeError + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -1525,9 +1681,8 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: @@ -1919,6 +2074,90 @@ func (p *TDataStreamSink) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -2099,51 +2338,155 @@ func (p *TDataStreamSink) FastReadField7(buf []byte) (int, error) { return offset, nil } -// for compatibility -func (p *TDataStreamSink) FastWrite(buf []byte) int { - return 0 -} - -func (p *TDataStreamSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TDataStreamSink) FastReadField8(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDataStreamSink") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} -func (p *TDataStreamSink) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TDataStreamSink") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() + tmp := descriptors.NewTOlapTableSchemaParam() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l + p.TabletSinkSchema = tmp + return offset, nil } -func (p *TDataStreamSink) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TDataStreamSink) FastReadField9(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dest_node_id", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], p.DestNodeId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := descriptors.NewTOlapTablePartitionParam() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TabletSinkPartition = tmp + return offset, nil +} + +func (p *TDataStreamSink) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := descriptors.NewTOlapTableLocationParam() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TabletSinkLocation = tmp + return offset, nil +} + +func (p *TDataStreamSink) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TabletSinkTxnId = &v + + } + return offset, nil +} + +func (p *TDataStreamSink) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TabletSinkTupleId = &v + + } + return offset, nil +} + +func (p *TDataStreamSink) FastReadField13(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletSinkExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TabletSinkExprs = append(p.TabletSinkExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TDataStreamSink) FastWrite(buf []byte) int { + return 0 +} + +func (p *TDataStreamSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDataStreamSink") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TDataStreamSink) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TDataStreamSink") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TDataStreamSink) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dest_node_id", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], p.DestNodeId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } @@ -2231,6 +2574,76 @@ func (p *TDataStreamSink) fastWriteField7(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TDataStreamSink) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkSchema() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_schema", thrift.STRUCT, 8) + offset += p.TabletSinkSchema.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataStreamSink) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkPartition() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_partition", thrift.STRUCT, 9) + offset += p.TabletSinkPartition.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataStreamSink) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkLocation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_location", thrift.STRUCT, 10) + offset += p.TabletSinkLocation.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataStreamSink) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_txn_id", thrift.I64, 11) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TabletSinkTxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataStreamSink) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkTupleId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_tuple_id", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.TabletSinkTupleId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataStreamSink) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkExprs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_exprs", thrift.LIST, 13) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TabletSinkExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TDataStreamSink) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("dest_node_id", thrift.I32, 1) @@ -2312,6 +2725,72 @@ func (p *TDataStreamSink) field7Length() int { return l } +func (p *TDataStreamSink) field8Length() int { + l := 0 + if p.IsSetTabletSinkSchema() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_schema", thrift.STRUCT, 8) + l += p.TabletSinkSchema.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataStreamSink) field9Length() int { + l := 0 + if p.IsSetTabletSinkPartition() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_partition", thrift.STRUCT, 9) + l += p.TabletSinkPartition.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataStreamSink) field10Length() int { + l := 0 + if p.IsSetTabletSinkLocation() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_location", thrift.STRUCT, 10) + l += p.TabletSinkLocation.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataStreamSink) field11Length() int { + l := 0 + if p.IsSetTabletSinkTxnId() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_txn_id", thrift.I64, 11) + l += bthrift.Binary.I64Length(*p.TabletSinkTxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataStreamSink) field12Length() int { + l := 0 + if p.IsSetTabletSinkTupleId() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_tuple_id", thrift.I32, 12) + l += bthrift.Binary.I32Length(*p.TabletSinkTupleId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataStreamSink) field13Length() int { + l := 0 + if p.IsSetTabletSinkExprs() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_exprs", thrift.LIST, 13) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TabletSinkExprs)) + for _, v := range p.TabletSinkExprs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMultiCastDataStreamSink) FastRead(buf []byte) (int, error) { var err error var offset int @@ -5338,42 +5817,98 @@ func (p *TOlapTableSink) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + case 21: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - if !issetLoadId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetDbId { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetTableId { + case 22: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField22(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 23: + if fieldTypeId == thrift.DOUBLE { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 24: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField24(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetLoadId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetDbId { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetTableId { fieldId = 4 goto RequiredFieldNotSetError } @@ -5695,6 +6230,60 @@ func (p *TOlapTableSink) FastReadField20(buf []byte) (int, error) { return offset, nil } +func (p *TOlapTableSink) FastReadField21(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BaseSchemaVersion = &v + + } + return offset, nil +} + +func (p *TOlapTableSink) FastReadField22(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TGroupCommitMode(v) + p.GroupCommitMode = &tmp + + } + return offset, nil +} + +func (p *TOlapTableSink) FastReadField23(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadDouble(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxFilterRatio = &v + + } + return offset, nil +} + +func (p *TOlapTableSink) FastReadField24(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StorageVaultId = &v + + } + return offset, nil +} + // for compatibility func (p *TOlapTableSink) FastWrite(buf []byte) int { return 0 @@ -5716,6 +6305,8 @@ func (p *TOlapTableSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary offset += p.fastWriteField17(buf[offset:], binaryWriter) offset += p.fastWriteField19(buf[offset:], binaryWriter) offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) @@ -5724,6 +6315,8 @@ func (p *TOlapTableSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField24(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -5754,6 +6347,10 @@ func (p *TOlapTableSink) BLength() int { l += p.field18Length() l += p.field19Length() l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() + l += p.field24Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -5952,6 +6549,50 @@ func (p *TOlapTableSink) fastWriteField20(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TOlapTableSink) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBaseSchemaVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_schema_version", thrift.I64, 21) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BaseSchemaVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSink) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit_mode", thrift.I32, 22) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.GroupCommitMode)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSink) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxFilterRatio() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_filter_ratio", thrift.DOUBLE, 23) + offset += bthrift.Binary.WriteDouble(buf[offset:], *p.MaxFilterRatio) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSink) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStorageVaultId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_vault_id", thrift.STRING, 24) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StorageVaultId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TOlapTableSink) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 1) @@ -6144,13 +6785,56 @@ func (p *TOlapTableSink) field20Length() int { return l } -func (p *TDataSink) FastRead(buf []byte) (int, error) { +func (p *TOlapTableSink) field21Length() int { + l := 0 + if p.IsSetBaseSchemaVersion() { + l += bthrift.Binary.FieldBeginLength("base_schema_version", thrift.I64, 21) + l += bthrift.Binary.I64Length(*p.BaseSchemaVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSink) field22Length() int { + l := 0 + if p.IsSetGroupCommitMode() { + l += bthrift.Binary.FieldBeginLength("group_commit_mode", thrift.I32, 22) + l += bthrift.Binary.I32Length(int32(*p.GroupCommitMode)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSink) field23Length() int { + l := 0 + if p.IsSetMaxFilterRatio() { + l += bthrift.Binary.FieldBeginLength("max_filter_ratio", thrift.DOUBLE, 23) + l += bthrift.Binary.DoubleLength(*p.MaxFilterRatio) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSink) field24Length() int { + l := 0 + if p.IsSetStorageVaultId() { + l += bthrift.Binary.FieldBeginLength("storage_vault_id", thrift.STRING, 24) + l += bthrift.Binary.StringLengthNocopy(*p.StorageVaultId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveLocationParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetType bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -6168,13 +6852,12 @@ func (p *TDataSink) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -6183,7 +6866,7 @@ func (p *TDataSink) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -6197,7 +6880,7 @@ func (p *TDataSink) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -6210,51 +6893,9 @@ func (p *TDataSink) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField8(buf[offset:]) + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -6266,30 +6907,4959 @@ func (p *TDataSink) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 9: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 10: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveLocationParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveLocationParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.WritePath = &v + + } + return offset, nil +} + +func (p *THiveLocationParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TargetPath = &v + + } + return offset, nil +} + +func (p *THiveLocationParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TFileType(v) + p.FileType = &tmp + + } + return offset, nil +} + +func (p *THiveLocationParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OriginalWritePath = &v + + } + return offset, nil +} + +// for compatibility +func (p *THiveLocationParams) FastWrite(buf []byte) int { + return 0 +} + +func (p *THiveLocationParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THiveLocationParams") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THiveLocationParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THiveLocationParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THiveLocationParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWritePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "write_path", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.WritePath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveLocationParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTargetPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "target_path", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TargetPath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveLocationParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_type", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveLocationParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOriginalWritePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "original_write_path", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OriginalWritePath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveLocationParams) field1Length() int { + l := 0 + if p.IsSetWritePath() { + l += bthrift.Binary.FieldBeginLength("write_path", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.WritePath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveLocationParams) field2Length() int { + l := 0 + if p.IsSetTargetPath() { + l += bthrift.Binary.FieldBeginLength("target_path", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.TargetPath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveLocationParams) field3Length() int { + l := 0 + if p.IsSetFileType() { + l += bthrift.Binary.FieldBeginLength("file_type", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(*p.FileType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveLocationParams) field4Length() int { + l := 0 + if p.IsSetOriginalWritePath() { + l += bthrift.Binary.FieldBeginLength("original_write_path", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.OriginalWritePath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortedColumn) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortedColumn[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSortedColumn) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SortColumnName = &v + + } + return offset, nil +} + +func (p *TSortedColumn) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Order = &v + + } + return offset, nil +} + +// for compatibility +func (p *TSortedColumn) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSortedColumn) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSortedColumn") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TSortedColumn) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSortedColumn") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TSortedColumn) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSortColumnName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_column_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SortColumnName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortedColumn) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOrder() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "order", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.Order) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortedColumn) field1Length() int { + l := 0 + if p.IsSetSortColumnName() { + l += bthrift.Binary.FieldBeginLength("sort_column_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.SortColumnName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortedColumn) field2Length() int { + l := 0 + if p.IsSetOrder() { + l += bthrift.Binary.FieldBeginLength("order", thrift.I32, 2) + l += bthrift.Binary.I32Length(*p.Order) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBucketingMode) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBucketingMode[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBucketingMode) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BucketVersion = &v + + } + return offset, nil +} + +// for compatibility +func (p *TBucketingMode) FastWrite(buf []byte) int { + return 0 +} + +func (p *TBucketingMode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBucketingMode") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TBucketingMode) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TBucketingMode") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TBucketingMode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucketVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucket_version", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BucketVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBucketingMode) field1Length() int { + l := 0 + if p.IsSetBucketVersion() { + l += bthrift.Binary.FieldBeginLength("bucket_version", thrift.I32, 1) + l += bthrift.Binary.I32Length(*p.BucketVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveBucket) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveBucket[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveBucket) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.BucketedBy = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.BucketedBy = append(p.BucketedBy, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *THiveBucket) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := NewTBucketingMode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BucketMode = tmp + return offset, nil +} + +func (p *THiveBucket) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BucketCount = &v + + } + return offset, nil +} + +func (p *THiveBucket) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SortedBy = make([]*TSortedColumn, 0, size) + for i := 0; i < size; i++ { + _elem := NewTSortedColumn() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.SortedBy = append(p.SortedBy, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *THiveBucket) FastWrite(buf []byte) int { + return 0 +} + +func (p *THiveBucket) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THiveBucket") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THiveBucket) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THiveBucket") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THiveBucket) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucketedBy() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucketed_by", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.BucketedBy { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveBucket) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucketMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucket_mode", thrift.STRUCT, 2) + offset += p.BucketMode.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveBucket) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucketCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucket_count", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BucketCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveBucket) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSortedBy() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sorted_by", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.SortedBy { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveBucket) field1Length() int { + l := 0 + if p.IsSetBucketedBy() { + l += bthrift.Binary.FieldBeginLength("bucketed_by", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.BucketedBy)) + for _, v := range p.BucketedBy { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveBucket) field2Length() int { + l := 0 + if p.IsSetBucketMode() { + l += bthrift.Binary.FieldBeginLength("bucket_mode", thrift.STRUCT, 2) + l += p.BucketMode.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveBucket) field3Length() int { + l := 0 + if p.IsSetBucketCount() { + l += bthrift.Binary.FieldBeginLength("bucket_count", thrift.I32, 3) + l += bthrift.Binary.I32Length(*p.BucketCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveBucket) field4Length() int { + l := 0 + if p.IsSetSortedBy() { + l += bthrift.Binary.FieldBeginLength("sorted_by", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.SortedBy)) + for _, v := range p.SortedBy { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveColumn) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveColumn[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveColumn) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Name = &v + + } + return offset, nil +} + +func (p *THiveColumn) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := THiveColumnType(v) + p.ColumnType = &tmp + + } + return offset, nil +} + +// for compatibility +func (p *THiveColumn) FastWrite(buf []byte) int { + return 0 +} + +func (p *THiveColumn) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THiveColumn") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THiveColumn) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THiveColumn") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THiveColumn) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveColumn) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_type", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.ColumnType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveColumn) field1Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Name) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveColumn) field2Length() int { + l := 0 + if p.IsSetColumnType() { + l += bthrift.Binary.FieldBeginLength("column_type", thrift.I32, 2) + l += bthrift.Binary.I32Length(int32(*p.ColumnType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartition) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THivePartition[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THivePartition) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Values = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.Values = append(p.Values, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *THivePartition) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHiveLocationParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Location = tmp + return offset, nil +} + +func (p *THivePartition) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileFormatType(v) + p.FileFormat = &tmp + + } + return offset, nil +} + +// for compatibility +func (p *THivePartition) FastWrite(buf []byte) int { + return 0 +} + +func (p *THivePartition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THivePartition") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THivePartition) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THivePartition") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THivePartition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetValues() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "values", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.Values { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLocation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "location", thrift.STRUCT, 2) + offset += p.Location.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_format", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileFormat)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartition) field1Length() int { + l := 0 + if p.IsSetValues() { + l += bthrift.Binary.FieldBeginLength("values", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Values)) + for _, v := range p.Values { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartition) field2Length() int { + l := 0 + if p.IsSetLocation() { + l += bthrift.Binary.FieldBeginLength("location", thrift.STRUCT, 2) + l += p.Location.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartition) field3Length() int { + l := 0 + if p.IsSetFileFormat() { + l += bthrift.Binary.FieldBeginLength("file_format", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(*p.FileFormat)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveSerDeProperties) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveSerDeProperties[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveSerDeProperties) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FieldDelim = &v + + } + return offset, nil +} + +func (p *THiveSerDeProperties) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LineDelim = &v + + } + return offset, nil +} + +func (p *THiveSerDeProperties) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CollectionDelim = &v + + } + return offset, nil +} + +func (p *THiveSerDeProperties) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MapkvDelim = &v + + } + return offset, nil +} + +func (p *THiveSerDeProperties) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EscapeChar = &v + + } + return offset, nil +} + +func (p *THiveSerDeProperties) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NullFormat = &v + + } + return offset, nil +} + +// for compatibility +func (p *THiveSerDeProperties) FastWrite(buf []byte) int { + return 0 +} + +func (p *THiveSerDeProperties) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THiveSerDeProperties") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THiveSerDeProperties) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THiveSerDeProperties") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THiveSerDeProperties) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFieldDelim() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "field_delim", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FieldDelim) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveSerDeProperties) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLineDelim() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "line_delim", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LineDelim) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveSerDeProperties) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCollectionDelim() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "collection_delim", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CollectionDelim) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveSerDeProperties) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMapkvDelim() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mapkv_delim", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.MapkvDelim) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveSerDeProperties) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEscapeChar() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "escape_char", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.EscapeChar) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveSerDeProperties) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNullFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "null_format", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.NullFormat) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveSerDeProperties) field1Length() int { + l := 0 + if p.IsSetFieldDelim() { + l += bthrift.Binary.FieldBeginLength("field_delim", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.FieldDelim) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveSerDeProperties) field2Length() int { + l := 0 + if p.IsSetLineDelim() { + l += bthrift.Binary.FieldBeginLength("line_delim", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.LineDelim) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveSerDeProperties) field3Length() int { + l := 0 + if p.IsSetCollectionDelim() { + l += bthrift.Binary.FieldBeginLength("collection_delim", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.CollectionDelim) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveSerDeProperties) field4Length() int { + l := 0 + if p.IsSetMapkvDelim() { + l += bthrift.Binary.FieldBeginLength("mapkv_delim", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.MapkvDelim) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveSerDeProperties) field5Length() int { + l := 0 + if p.IsSetEscapeChar() { + l += bthrift.Binary.FieldBeginLength("escape_char", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.EscapeChar) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveSerDeProperties) field6Length() int { + l := 0 + if p.IsSetNullFormat() { + l += bthrift.Binary.FieldBeginLength("null_format", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.NullFormat) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveTableSink[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THiveTableSink) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbName = &v + + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableName = &v + + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Columns = make([]*THiveColumn, 0, size) + for i := 0; i < size; i++ { + _elem := NewTHiveColumn() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Columns = append(p.Columns, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Partitions = make([]*THivePartition, 0, size) + for i := 0; i < size; i++ { + _elem := NewTHivePartition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Partitions = append(p.Partitions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHiveBucket() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BucketInfo = tmp + return offset, nil +} + +func (p *THiveTableSink) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileFormatType(v) + p.FileFormat = &tmp + + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileCompressType(v) + p.CompressionType = &tmp + + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHiveLocationParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Location = tmp + return offset, nil +} + +func (p *THiveTableSink) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.HadoopConfig = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.HadoopConfig[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Overwrite = &v + + } + return offset, nil +} + +func (p *THiveTableSink) FastReadField11(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHiveSerDeProperties() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.SerdeProperties = tmp + return offset, nil +} + +// for compatibility +func (p *THiveTableSink) FastWrite(buf []byte) int { + return 0 +} + +func (p *THiveTableSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THiveTableSink") + if p != nil { + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THiveTableSink) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THiveTableSink") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THiveTableSink) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Columns { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Partitions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucketInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucket_info", thrift.STRUCT, 5) + offset += p.BucketInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_format", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileFormat)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressionType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compression_type", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressionType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLocation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "location", thrift.STRUCT, 8) + offset += p.Location.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHadoopConfig() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hadoop_config", thrift.MAP, 9) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.HadoopConfig { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOverwrite() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Overwrite) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSerdeProperties() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "serde_properties", thrift.STRUCT, 11) + offset += p.SerdeProperties.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THiveTableSink) field1Length() int { + l := 0 + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field2Length() int { + l := 0 + if p.IsSetTableName() { + l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.TableName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field3Length() int { + l := 0 + if p.IsSetColumns() { + l += bthrift.Binary.FieldBeginLength("columns", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Columns)) + for _, v := range p.Columns { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field4Length() int { + l := 0 + if p.IsSetPartitions() { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) + for _, v := range p.Partitions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field5Length() int { + l := 0 + if p.IsSetBucketInfo() { + l += bthrift.Binary.FieldBeginLength("bucket_info", thrift.STRUCT, 5) + l += p.BucketInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field6Length() int { + l := 0 + if p.IsSetFileFormat() { + l += bthrift.Binary.FieldBeginLength("file_format", thrift.I32, 6) + l += bthrift.Binary.I32Length(int32(*p.FileFormat)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field7Length() int { + l := 0 + if p.IsSetCompressionType() { + l += bthrift.Binary.FieldBeginLength("compression_type", thrift.I32, 7) + l += bthrift.Binary.I32Length(int32(*p.CompressionType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field8Length() int { + l := 0 + if p.IsSetLocation() { + l += bthrift.Binary.FieldBeginLength("location", thrift.STRUCT, 8) + l += p.Location.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field9Length() int { + l := 0 + if p.IsSetHadoopConfig() { + l += bthrift.Binary.FieldBeginLength("hadoop_config", thrift.MAP, 9) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.HadoopConfig)) + for k, v := range p.HadoopConfig { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field10Length() int { + l := 0 + if p.IsSetOverwrite() { + l += bthrift.Binary.FieldBeginLength("overwrite", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.Overwrite) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THiveTableSink) field11Length() int { + l := 0 + if p.IsSetSerdeProperties() { + l += bthrift.Binary.FieldBeginLength("serde_properties", thrift.STRUCT, 11) + l += p.SerdeProperties.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TS3MPUPendingUpload) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TS3MPUPendingUpload[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TS3MPUPendingUpload) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Bucket = &v + + } + return offset, nil +} + +func (p *TS3MPUPendingUpload) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Key = &v + + } + return offset, nil +} + +func (p *TS3MPUPendingUpload) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UploadId = &v + + } + return offset, nil +} + +func (p *TS3MPUPendingUpload) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Etags = make(map[int32]string, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.Etags[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TS3MPUPendingUpload) FastWrite(buf []byte) int { + return 0 +} + +func (p *TS3MPUPendingUpload) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TS3MPUPendingUpload") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TS3MPUPendingUpload) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TS3MPUPendingUpload") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TS3MPUPendingUpload) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucket() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucket", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Bucket) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TS3MPUPendingUpload) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TS3MPUPendingUpload) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUploadId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "upload_id", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UploadId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TS3MPUPendingUpload) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEtags() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "etags", thrift.MAP, 4) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRING, 0) + var length int + for k, v := range p.Etags { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TS3MPUPendingUpload) field1Length() int { + l := 0 + if p.IsSetBucket() { + l += bthrift.Binary.FieldBeginLength("bucket", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Bucket) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TS3MPUPendingUpload) field2Length() int { + l := 0 + if p.IsSetKey() { + l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Key) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TS3MPUPendingUpload) field3Length() int { + l := 0 + if p.IsSetUploadId() { + l += bthrift.Binary.FieldBeginLength("upload_id", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.UploadId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TS3MPUPendingUpload) field4Length() int { + l := 0 + if p.IsSetEtags() { + l += bthrift.Binary.FieldBeginLength("etags", thrift.MAP, 4) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRING, len(p.Etags)) + for k, v := range p.Etags { + + l += bthrift.Binary.I32Length(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THivePartitionUpdate[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *THivePartitionUpdate) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Name = &v + + } + return offset, nil +} + +func (p *THivePartitionUpdate) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TUpdateMode(v) + p.UpdateMode = &tmp + + } + return offset, nil +} + +func (p *THivePartitionUpdate) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHiveLocationParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Location = tmp + return offset, nil +} + +func (p *THivePartitionUpdate) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FileNames = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.FileNames = append(p.FileNames, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *THivePartitionUpdate) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RowCount = &v + + } + return offset, nil +} + +func (p *THivePartitionUpdate) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FileSize = &v + + } + return offset, nil +} + +func (p *THivePartitionUpdate) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.S3MpuPendingUploads = make([]*TS3MPUPendingUpload, 0, size) + for i := 0; i < size; i++ { + _elem := NewTS3MPUPendingUpload() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.S3MpuPendingUploads = append(p.S3MpuPendingUploads, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *THivePartitionUpdate) FastWrite(buf []byte) int { + return 0 +} + +func (p *THivePartitionUpdate) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THivePartitionUpdate") + if p != nil { + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THivePartitionUpdate) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THivePartitionUpdate") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THivePartitionUpdate) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUpdateMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "update_mode", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.UpdateMode)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLocation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "location", thrift.STRUCT, 3) + offset += p.Location.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_names", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.FileNames { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRowCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_count", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_size", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FileSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetS3MpuPendingUploads() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "s3_mpu_pending_uploads", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.S3MpuPendingUploads { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THivePartitionUpdate) field1Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Name) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) field2Length() int { + l := 0 + if p.IsSetUpdateMode() { + l += bthrift.Binary.FieldBeginLength("update_mode", thrift.I32, 2) + l += bthrift.Binary.I32Length(int32(*p.UpdateMode)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) field3Length() int { + l := 0 + if p.IsSetLocation() { + l += bthrift.Binary.FieldBeginLength("location", thrift.STRUCT, 3) + l += p.Location.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) field4Length() int { + l := 0 + if p.IsSetFileNames() { + l += bthrift.Binary.FieldBeginLength("file_names", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.FileNames)) + for _, v := range p.FileNames { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) field5Length() int { + l := 0 + if p.IsSetRowCount() { + l += bthrift.Binary.FieldBeginLength("row_count", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.RowCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) field6Length() int { + l := 0 + if p.IsSetFileSize() { + l += bthrift.Binary.FieldBeginLength("file_size", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.FileSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THivePartitionUpdate) field7Length() int { + l := 0 + if p.IsSetS3MpuPendingUploads() { + l += bthrift.Binary.FieldBeginLength("s3_mpu_pending_uploads", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.S3MpuPendingUploads)) + for _, v := range p.S3MpuPendingUploads { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergCommitData) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergCommitData[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIcebergCommitData) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FilePath = &v + + } + return offset, nil +} + +func (p *TIcebergCommitData) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RowCount = &v + + } + return offset, nil +} + +func (p *TIcebergCommitData) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FileSize = &v + + } + return offset, nil +} + +func (p *TIcebergCommitData) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TFileContent(v) + p.FileContent = &tmp + + } + return offset, nil +} + +func (p *TIcebergCommitData) FastReadField5(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionValues = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.PartitionValues = append(p.PartitionValues, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TIcebergCommitData) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ReferencedDataFiles = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ReferencedDataFiles = append(p.ReferencedDataFiles, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TIcebergCommitData) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIcebergCommitData) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIcebergCommitData") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIcebergCommitData) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIcebergCommitData") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIcebergCommitData) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFilePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_path", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FilePath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergCommitData) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRowCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_count", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergCommitData) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_size", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FileSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergCommitData) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileContent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_content", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileContent)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergCommitData) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionValues() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_values", thrift.LIST, 5) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.PartitionValues { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergCommitData) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReferencedDataFiles() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "referenced_data_files", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ReferencedDataFiles { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergCommitData) field1Length() int { + l := 0 + if p.IsSetFilePath() { + l += bthrift.Binary.FieldBeginLength("file_path", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.FilePath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergCommitData) field2Length() int { + l := 0 + if p.IsSetRowCount() { + l += bthrift.Binary.FieldBeginLength("row_count", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.RowCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergCommitData) field3Length() int { + l := 0 + if p.IsSetFileSize() { + l += bthrift.Binary.FieldBeginLength("file_size", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.FileSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergCommitData) field4Length() int { + l := 0 + if p.IsSetFileContent() { + l += bthrift.Binary.FieldBeginLength("file_content", thrift.I32, 4) + l += bthrift.Binary.I32Length(int32(*p.FileContent)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergCommitData) field5Length() int { + l := 0 + if p.IsSetPartitionValues() { + l += bthrift.Binary.FieldBeginLength("partition_values", thrift.LIST, 5) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.PartitionValues)) + for _, v := range p.PartitionValues { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergCommitData) field6Length() int { + l := 0 + if p.IsSetReferencedDataFiles() { + l += bthrift.Binary.FieldBeginLength("referenced_data_files", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ReferencedDataFiles)) + for _, v := range p.ReferencedDataFiles { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortField) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortField[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSortField) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SourceColumnId = &v + + } + return offset, nil +} + +func (p *TSortField) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Ascending = &v + + } + return offset, nil +} + +func (p *TSortField) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NullFirst = &v + + } + return offset, nil +} + +// for compatibility +func (p *TSortField) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSortField) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSortField") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TSortField) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSortField") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TSortField) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSourceColumnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "source_column_id", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.SourceColumnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortField) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAscending() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ascending", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Ascending) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortField) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNullFirst() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "null_first", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.NullFirst) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortField) field1Length() int { + l := 0 + if p.IsSetSourceColumnId() { + l += bthrift.Binary.FieldBeginLength("source_column_id", thrift.I32, 1) + l += bthrift.Binary.I32Length(*p.SourceColumnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortField) field2Length() int { + l := 0 + if p.IsSetAscending() { + l += bthrift.Binary.FieldBeginLength("ascending", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(*p.Ascending) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortField) field3Length() int { + l := 0 + if p.IsSetNullFirst() { + l += bthrift.Binary.FieldBeginLength("null_first", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(*p.NullFirst) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergTableSink[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIcebergTableSink) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbName = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TbName = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SchemaJson = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionSpecsJson = make(map[int32]string, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PartitionSpecsJson[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PartitionSpecId = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SortFields = make([]*TSortField, 0, size) + for i := 0; i < size; i++ { + _elem := NewTSortField() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.SortFields = append(p.SortFields, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileFormatType(v) + p.FileFormat = &tmp + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OutputPath = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.HadoopConfig = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.HadoopConfig[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Overwrite = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TFileType(v) + p.FileType = &tmp + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OriginalOutputPath = &v + + } + return offset, nil +} + +func (p *TIcebergTableSink) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileCompressType(v) + p.CompressionType = &tmp + + } + return offset, nil +} + +// for compatibility +func (p *TIcebergTableSink) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIcebergTableSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIcebergTableSink") + if p != nil { + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIcebergTableSink) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIcebergTableSink") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIcebergTableSink) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tb_name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TbName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSchemaJson() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_json", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SchemaJson) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionSpecsJson() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_specs_json", thrift.MAP, 4) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRING, 0) + var length int + for k, v := range p.PartitionSpecsJson { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionSpecId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_spec_id", thrift.I32, 5) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.PartitionSpecId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSortFields() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_fields", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.SortFields { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_format", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileFormat)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOutputPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "output_path", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OutputPath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHadoopConfig() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hadoop_config", thrift.MAP, 9) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.HadoopConfig { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOverwrite() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Overwrite) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_type", thrift.I32, 11) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOriginalOutputPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "original_output_path", thrift.STRING, 12) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OriginalOutputPath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressionType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compression_type", thrift.I32, 13) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressionType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergTableSink) field1Length() int { + l := 0 + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field2Length() int { + l := 0 + if p.IsSetTbName() { + l += bthrift.Binary.FieldBeginLength("tb_name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.TbName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field3Length() int { + l := 0 + if p.IsSetSchemaJson() { + l += bthrift.Binary.FieldBeginLength("schema_json", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.SchemaJson) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field4Length() int { + l := 0 + if p.IsSetPartitionSpecsJson() { + l += bthrift.Binary.FieldBeginLength("partition_specs_json", thrift.MAP, 4) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRING, len(p.PartitionSpecsJson)) + for k, v := range p.PartitionSpecsJson { + + l += bthrift.Binary.I32Length(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field5Length() int { + l := 0 + if p.IsSetPartitionSpecId() { + l += bthrift.Binary.FieldBeginLength("partition_spec_id", thrift.I32, 5) + l += bthrift.Binary.I32Length(*p.PartitionSpecId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field6Length() int { + l := 0 + if p.IsSetSortFields() { + l += bthrift.Binary.FieldBeginLength("sort_fields", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.SortFields)) + for _, v := range p.SortFields { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field7Length() int { + l := 0 + if p.IsSetFileFormat() { + l += bthrift.Binary.FieldBeginLength("file_format", thrift.I32, 7) + l += bthrift.Binary.I32Length(int32(*p.FileFormat)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field8Length() int { + l := 0 + if p.IsSetOutputPath() { + l += bthrift.Binary.FieldBeginLength("output_path", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.OutputPath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field9Length() int { + l := 0 + if p.IsSetHadoopConfig() { + l += bthrift.Binary.FieldBeginLength("hadoop_config", thrift.MAP, 9) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.HadoopConfig)) + for k, v := range p.HadoopConfig { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field10Length() int { + l := 0 + if p.IsSetOverwrite() { + l += bthrift.Binary.FieldBeginLength("overwrite", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.Overwrite) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field11Length() int { + l := 0 + if p.IsSetFileType() { + l += bthrift.Binary.FieldBeginLength("file_type", thrift.I32, 11) + l += bthrift.Binary.I32Length(int32(*p.FileType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field12Length() int { + l := 0 + if p.IsSetOriginalOutputPath() { + l += bthrift.Binary.FieldBeginLength("original_output_path", thrift.STRING, 12) + l += bthrift.Binary.StringLengthNocopy(*p.OriginalOutputPath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergTableSink) field13Length() int { + l := 0 + if p.IsSetCompressionType() { + l += bthrift.Binary.FieldBeginLength("compression_type", thrift.I32, 13) + l += bthrift.Binary.I32Length(int32(*p.CompressionType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataSink) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetType bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l if err != nil { goto SkipFieldError } @@ -6322,6 +11892,34 @@ func (p *TDataSink) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 13: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -6507,6 +12105,32 @@ func (p *TDataSink) FastReadField12(buf []byte) (int, error) { return offset, nil } +func (p *TDataSink) FastReadField13(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHiveTableSink() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.HiveTableSink = tmp + return offset, nil +} + +func (p *TDataSink) FastReadField14(buf []byte) (int, error) { + offset := 0 + + tmp := NewTIcebergTableSink() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.IcebergTableSink = tmp + return offset, nil +} + // for compatibility func (p *TDataSink) FastWrite(buf []byte) int { return 0 @@ -6527,6 +12151,8 @@ func (p *TDataSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -6548,6 +12174,8 @@ func (p *TDataSink) BLength() int { l += p.field10Length() l += p.field11Length() l += p.field12Length() + l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -6663,6 +12291,26 @@ func (p *TDataSink) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWrit return offset } +func (p *TDataSink) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHiveTableSink() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hive_table_sink", thrift.STRUCT, 13) + offset += p.HiveTableSink.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataSink) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIcebergTableSink() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_table_sink", thrift.STRUCT, 14) + offset += p.IcebergTableSink.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TDataSink) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) @@ -6771,3 +12419,23 @@ func (p *TDataSink) field12Length() int { } return l } + +func (p *TDataSink) field13Length() int { + l := 0 + if p.IsSetHiveTableSink() { + l += bthrift.Binary.FieldBeginLength("hive_table_sink", thrift.STRUCT, 13) + l += p.HiveTableSink.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDataSink) field14Length() int { + l := 0 + if p.IsSetIcebergTableSink() { + l += bthrift.Binary.FieldBeginLength("iceberg_table_sink", thrift.STRUCT, 14) + l += p.IcebergTableSink.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} diff --git a/pkg/rpc/kitex_gen/descriptors/Descriptors.go b/pkg/rpc/kitex_gen/descriptors/Descriptors.go index 6758fab8..86bb7248 100644 --- a/pkg/rpc/kitex_gen/descriptors/Descriptors.go +++ b/pkg/rpc/kitex_gen/descriptors/Descriptors.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package descriptors @@ -130,6 +130,18 @@ const ( TSchemaTableType_SCH_PARAMETERS TSchemaTableType = 38 TSchemaTableType_SCH_METADATA_NAME_IDS TSchemaTableType = 39 TSchemaTableType_SCH_PROFILING TSchemaTableType = 40 + TSchemaTableType_SCH_BACKEND_ACTIVE_TASKS TSchemaTableType = 41 + TSchemaTableType_SCH_ACTIVE_QUERIES TSchemaTableType = 42 + TSchemaTableType_SCH_WORKLOAD_GROUPS TSchemaTableType = 43 + TSchemaTableType_SCH_USER TSchemaTableType = 44 + TSchemaTableType_SCH_PROCS_PRIV TSchemaTableType = 45 + TSchemaTableType_SCH_WORKLOAD_POLICY TSchemaTableType = 46 + TSchemaTableType_SCH_TABLE_OPTIONS TSchemaTableType = 47 + TSchemaTableType_SCH_WORKLOAD_GROUP_PRIVILEGES TSchemaTableType = 48 + TSchemaTableType_SCH_WORKLOAD_GROUP_RESOURCE_USAGE TSchemaTableType = 49 + TSchemaTableType_SCH_TABLE_PROPERTIES TSchemaTableType = 50 + TSchemaTableType_SCH_FILE_CACHE_STATISTICS TSchemaTableType = 51 + TSchemaTableType_SCH_CATALOG_META_CACHE_STATISTICS TSchemaTableType = 52 ) func (p TSchemaTableType) String() string { @@ -216,6 +228,30 @@ func (p TSchemaTableType) String() string { return "SCH_METADATA_NAME_IDS" case TSchemaTableType_SCH_PROFILING: return "SCH_PROFILING" + case TSchemaTableType_SCH_BACKEND_ACTIVE_TASKS: + return "SCH_BACKEND_ACTIVE_TASKS" + case TSchemaTableType_SCH_ACTIVE_QUERIES: + return "SCH_ACTIVE_QUERIES" + case TSchemaTableType_SCH_WORKLOAD_GROUPS: + return "SCH_WORKLOAD_GROUPS" + case TSchemaTableType_SCH_USER: + return "SCH_USER" + case TSchemaTableType_SCH_PROCS_PRIV: + return "SCH_PROCS_PRIV" + case TSchemaTableType_SCH_WORKLOAD_POLICY: + return "SCH_WORKLOAD_POLICY" + case TSchemaTableType_SCH_TABLE_OPTIONS: + return "SCH_TABLE_OPTIONS" + case TSchemaTableType_SCH_WORKLOAD_GROUP_PRIVILEGES: + return "SCH_WORKLOAD_GROUP_PRIVILEGES" + case TSchemaTableType_SCH_WORKLOAD_GROUP_RESOURCE_USAGE: + return "SCH_WORKLOAD_GROUP_RESOURCE_USAGE" + case TSchemaTableType_SCH_TABLE_PROPERTIES: + return "SCH_TABLE_PROPERTIES" + case TSchemaTableType_SCH_FILE_CACHE_STATISTICS: + return "SCH_FILE_CACHE_STATISTICS" + case TSchemaTableType_SCH_CATALOG_META_CACHE_STATISTICS: + return "SCH_CATALOG_META_CACHE_STATISTICS" } return "" } @@ -304,6 +340,30 @@ func TSchemaTableTypeFromString(s string) (TSchemaTableType, error) { return TSchemaTableType_SCH_METADATA_NAME_IDS, nil case "SCH_PROFILING": return TSchemaTableType_SCH_PROFILING, nil + case "SCH_BACKEND_ACTIVE_TASKS": + return TSchemaTableType_SCH_BACKEND_ACTIVE_TASKS, nil + case "SCH_ACTIVE_QUERIES": + return TSchemaTableType_SCH_ACTIVE_QUERIES, nil + case "SCH_WORKLOAD_GROUPS": + return TSchemaTableType_SCH_WORKLOAD_GROUPS, nil + case "SCH_USER": + return TSchemaTableType_SCH_USER, nil + case "SCH_PROCS_PRIV": + return TSchemaTableType_SCH_PROCS_PRIV, nil + case "SCH_WORKLOAD_POLICY": + return TSchemaTableType_SCH_WORKLOAD_POLICY, nil + case "SCH_TABLE_OPTIONS": + return TSchemaTableType_SCH_TABLE_OPTIONS, nil + case "SCH_WORKLOAD_GROUP_PRIVILEGES": + return TSchemaTableType_SCH_WORKLOAD_GROUP_PRIVILEGES, nil + case "SCH_WORKLOAD_GROUP_RESOURCE_USAGE": + return TSchemaTableType_SCH_WORKLOAD_GROUP_RESOURCE_USAGE, nil + case "SCH_TABLE_PROPERTIES": + return TSchemaTableType_SCH_TABLE_PROPERTIES, nil + case "SCH_FILE_CACHE_STATISTICS": + return TSchemaTableType_SCH_FILE_CACHE_STATISTICS, nil + case "SCH_CATALOG_META_CACHE_STATISTICS": + return TSchemaTableType_SCH_CATALOG_META_CACHE_STATISTICS, nil } return TSchemaTableType(0), fmt.Errorf("not a valid TSchemaTableType string") } @@ -461,6 +521,8 @@ type TColumn struct { Aggregation *string `thrift:"aggregation,16,optional" frugal:"16,optional,string" json:"aggregation,omitempty"` ResultIsNullable *bool `thrift:"result_is_nullable,17,optional" frugal:"17,optional,bool" json:"result_is_nullable,omitempty"` IsAutoIncrement bool `thrift:"is_auto_increment,18,optional" frugal:"18,optional,bool" json:"is_auto_increment,omitempty"` + ClusterKeyId int32 `thrift:"cluster_key_id,19,optional" frugal:"19,optional,i32" json:"cluster_key_id,omitempty"` + BeExecVersion int32 `thrift:"be_exec_version,20,optional" frugal:"20,optional,i32" json:"be_exec_version,omitempty"` } func NewTColumn() *TColumn { @@ -471,18 +533,19 @@ func NewTColumn() *TColumn { HasBitmapIndex: false, HasNgramBfIndex: false, IsAutoIncrement: false, + ClusterKeyId: -1, + BeExecVersion: -1, } } func (p *TColumn) InitDefault() { - *p = TColumn{ - - Visible: true, - ColUniqueId: -1, - HasBitmapIndex: false, - HasNgramBfIndex: false, - IsAutoIncrement: false, - } + p.Visible = true + p.ColUniqueId = -1 + p.HasBitmapIndex = false + p.HasNgramBfIndex = false + p.IsAutoIncrement = false + p.ClusterKeyId = -1 + p.BeExecVersion = -1 } func (p *TColumn) GetColumnName() (v string) { @@ -641,6 +704,24 @@ func (p *TColumn) GetIsAutoIncrement() (v bool) { } return p.IsAutoIncrement } + +var TColumn_ClusterKeyId_DEFAULT int32 = -1 + +func (p *TColumn) GetClusterKeyId() (v int32) { + if !p.IsSetClusterKeyId() { + return TColumn_ClusterKeyId_DEFAULT + } + return p.ClusterKeyId +} + +var TColumn_BeExecVersion_DEFAULT int32 = -1 + +func (p *TColumn) GetBeExecVersion() (v int32) { + if !p.IsSetBeExecVersion() { + return TColumn_BeExecVersion_DEFAULT + } + return p.BeExecVersion +} func (p *TColumn) SetColumnName(val string) { p.ColumnName = val } @@ -695,6 +776,12 @@ func (p *TColumn) SetResultIsNullable(val *bool) { func (p *TColumn) SetIsAutoIncrement(val bool) { p.IsAutoIncrement = val } +func (p *TColumn) SetClusterKeyId(val int32) { + p.ClusterKeyId = val +} +func (p *TColumn) SetBeExecVersion(val int32) { + p.BeExecVersion = val +} var fieldIDToName_TColumn = map[int16]string{ 1: "column_name", @@ -715,6 +802,8 @@ var fieldIDToName_TColumn = map[int16]string{ 16: "aggregation", 17: "result_is_nullable", 18: "is_auto_increment", + 19: "cluster_key_id", + 20: "be_exec_version", } func (p *TColumn) IsSetColumnType() bool { @@ -785,6 +874,14 @@ func (p *TColumn) IsSetIsAutoIncrement() bool { return p.IsAutoIncrement != TColumn_IsAutoIncrement_DEFAULT } +func (p *TColumn) IsSetClusterKeyId() bool { + return p.ClusterKeyId != TColumn_ClusterKeyId_DEFAULT +} + +func (p *TColumn) IsSetBeExecVersion() bool { + return p.BeExecVersion != TColumn_BeExecVersion_DEFAULT +} + func (p *TColumn) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -812,10 +909,8 @@ func (p *TColumn) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -823,177 +918,158 @@ func (p *TColumn) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.BOOL { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.LIST { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.BOOL { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I32 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.I32 { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.STRING { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.BOOL { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.BOOL { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.I32 { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.I32 { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1030,174 +1106,230 @@ RequiredFieldNotSetError: } func (p *TColumn) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnName = v + _field = v } + p.ColumnName = _field return nil } - func (p *TColumn) ReadField2(iprot thrift.TProtocol) error { - p.ColumnType = types.NewTColumnType() - if err := p.ColumnType.Read(iprot); err != nil { + _field := types.NewTColumnType() + if err := _field.Read(iprot); err != nil { return err } + p.ColumnType = _field return nil } - func (p *TColumn) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TAggregationType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TAggregationType(v) - p.AggregationType = &tmp + _field = &tmp } + p.AggregationType = _field return nil } - func (p *TColumn) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsKey = &v + _field = &v } + p.IsKey = _field return nil } - func (p *TColumn) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsAllowNull = &v + _field = &v } + p.IsAllowNull = _field return nil } - func (p *TColumn) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DefaultValue = &v + _field = &v } + p.DefaultValue = _field return nil } - func (p *TColumn) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsBloomFilterColumn = &v + _field = &v } + p.IsBloomFilterColumn = _field return nil } - func (p *TColumn) ReadField8(iprot thrift.TProtocol) error { - p.DefineExpr = exprs.NewTExpr() - if err := p.DefineExpr.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.DefineExpr = _field return nil } - func (p *TColumn) ReadField9(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Visible = v + _field = v } + p.Visible = _field return nil } - func (p *TColumn) ReadField10(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ChildrenColumn = make([]*TColumn, 0, size) + _field := make([]*TColumn, 0, size) + values := make([]TColumn, size) for i := 0; i < size; i++ { - _elem := NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ChildrenColumn = append(p.ChildrenColumn, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ChildrenColumn = _field return nil } - func (p *TColumn) ReadField11(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColUniqueId = v + _field = v } + p.ColUniqueId = _field return nil } - func (p *TColumn) ReadField12(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasBitmapIndex = v + _field = v } + p.HasBitmapIndex = _field return nil } - func (p *TColumn) ReadField13(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasNgramBfIndex = v + _field = v } + p.HasNgramBfIndex = _field return nil } - func (p *TColumn) ReadField14(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.GramSize = &v + _field = &v } + p.GramSize = _field return nil } - func (p *TColumn) ReadField15(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.GramBfSize = &v + _field = &v } + p.GramBfSize = _field return nil } - func (p *TColumn) ReadField16(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Aggregation = &v + _field = &v } + p.Aggregation = _field return nil } - func (p *TColumn) ReadField17(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ResultIsNullable = &v + _field = &v } + p.ResultIsNullable = _field return nil } - func (p *TColumn) ReadField18(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsAutoIncrement = v + _field = v + } + p.IsAutoIncrement = _field + return nil +} +func (p *TColumn) ReadField19(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ClusterKeyId = _field + return nil +} +func (p *TColumn) ReadField20(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v } + p.BeExecVersion = _field return nil } @@ -1279,7 +1411,14 @@ func (p *TColumn) Write(oprot thrift.TProtocol) (err error) { fieldId = 18 goto WriteFieldError } - + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1644,11 +1783,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } +func (p *TColumn) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetClusterKeyId() { + if err = oprot.WriteFieldBegin("cluster_key_id", thrift.I32, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ClusterKeyId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TColumn) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetBeExecVersion() { + if err = oprot.WriteFieldBegin("be_exec_version", thrift.I32, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.BeExecVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + func (p *TColumn) String() string { if p == nil { return "" } return fmt.Sprintf("TColumn(%+v)", *p) + } func (p *TColumn) DeepEqual(ano *TColumn) bool { @@ -1711,6 +1889,12 @@ func (p *TColumn) DeepEqual(ano *TColumn) bool { if !p.Field18DeepEqual(ano.IsAutoIncrement) { return false } + if !p.Field19DeepEqual(ano.ClusterKeyId) { + return false + } + if !p.Field20DeepEqual(ano.BeExecVersion) { + return false + } return true } @@ -1891,23 +2075,39 @@ func (p *TColumn) Field18DeepEqual(src bool) bool { } return true } +func (p *TColumn) Field19DeepEqual(src int32) bool { + + if p.ClusterKeyId != src { + return false + } + return true +} +func (p *TColumn) Field20DeepEqual(src int32) bool { + + if p.BeExecVersion != src { + return false + } + return true +} type TSlotDescriptor struct { - Id types.TSlotId `thrift:"id,1,required" frugal:"1,required,i32" json:"id"` - Parent types.TTupleId `thrift:"parent,2,required" frugal:"2,required,i32" json:"parent"` - SlotType *types.TTypeDesc `thrift:"slotType,3,required" frugal:"3,required,types.TTypeDesc" json:"slotType"` - ColumnPos int32 `thrift:"columnPos,4,required" frugal:"4,required,i32" json:"columnPos"` - ByteOffset int32 `thrift:"byteOffset,5,required" frugal:"5,required,i32" json:"byteOffset"` - NullIndicatorByte int32 `thrift:"nullIndicatorByte,6,required" frugal:"6,required,i32" json:"nullIndicatorByte"` - NullIndicatorBit int32 `thrift:"nullIndicatorBit,7,required" frugal:"7,required,i32" json:"nullIndicatorBit"` - ColName string `thrift:"colName,8,required" frugal:"8,required,string" json:"colName"` - SlotIdx int32 `thrift:"slotIdx,9,required" frugal:"9,required,i32" json:"slotIdx"` - IsMaterialized bool `thrift:"isMaterialized,10,required" frugal:"10,required,bool" json:"isMaterialized"` - ColUniqueId int32 `thrift:"col_unique_id,11,optional" frugal:"11,optional,i32" json:"col_unique_id,omitempty"` - IsKey bool `thrift:"is_key,12,optional" frugal:"12,optional,bool" json:"is_key,omitempty"` - NeedMaterialize bool `thrift:"need_materialize,13,optional" frugal:"13,optional,bool" json:"need_materialize,omitempty"` - IsAutoIncrement bool `thrift:"is_auto_increment,14,optional" frugal:"14,optional,bool" json:"is_auto_increment,omitempty"` - ColumnPaths []string `thrift:"column_paths,15,optional" frugal:"15,optional,list" json:"column_paths,omitempty"` + Id types.TSlotId `thrift:"id,1,required" frugal:"1,required,i32" json:"id"` + Parent types.TTupleId `thrift:"parent,2,required" frugal:"2,required,i32" json:"parent"` + SlotType *types.TTypeDesc `thrift:"slotType,3,required" frugal:"3,required,types.TTypeDesc" json:"slotType"` + ColumnPos int32 `thrift:"columnPos,4,required" frugal:"4,required,i32" json:"columnPos"` + ByteOffset int32 `thrift:"byteOffset,5,required" frugal:"5,required,i32" json:"byteOffset"` + NullIndicatorByte int32 `thrift:"nullIndicatorByte,6,required" frugal:"6,required,i32" json:"nullIndicatorByte"` + NullIndicatorBit int32 `thrift:"nullIndicatorBit,7,required" frugal:"7,required,i32" json:"nullIndicatorBit"` + ColName string `thrift:"colName,8,required" frugal:"8,required,string" json:"colName"` + SlotIdx int32 `thrift:"slotIdx,9,required" frugal:"9,required,i32" json:"slotIdx"` + IsMaterialized bool `thrift:"isMaterialized,10,required" frugal:"10,required,bool" json:"isMaterialized"` + ColUniqueId int32 `thrift:"col_unique_id,11,optional" frugal:"11,optional,i32" json:"col_unique_id,omitempty"` + IsKey bool `thrift:"is_key,12,optional" frugal:"12,optional,bool" json:"is_key,omitempty"` + NeedMaterialize bool `thrift:"need_materialize,13,optional" frugal:"13,optional,bool" json:"need_materialize,omitempty"` + IsAutoIncrement bool `thrift:"is_auto_increment,14,optional" frugal:"14,optional,bool" json:"is_auto_increment,omitempty"` + ColumnPaths []string `thrift:"column_paths,15,optional" frugal:"15,optional,list" json:"column_paths,omitempty"` + ColDefaultValue *string `thrift:"col_default_value,16,optional" frugal:"16,optional,string" json:"col_default_value,omitempty"` + PrimitiveType types.TPrimitiveType `thrift:"primitive_type,17,optional" frugal:"17,optional,TPrimitiveType" json:"primitive_type,omitempty"` } func NewTSlotDescriptor() *TSlotDescriptor { @@ -1917,17 +2117,16 @@ func NewTSlotDescriptor() *TSlotDescriptor { IsKey: false, NeedMaterialize: true, IsAutoIncrement: false, + PrimitiveType: types.TPrimitiveType_INVALID_TYPE, } } func (p *TSlotDescriptor) InitDefault() { - *p = TSlotDescriptor{ - - ColUniqueId: -1, - IsKey: false, - NeedMaterialize: true, - IsAutoIncrement: false, - } + p.ColUniqueId = -1 + p.IsKey = false + p.NeedMaterialize = true + p.IsAutoIncrement = false + p.PrimitiveType = types.TPrimitiveType_INVALID_TYPE } func (p *TSlotDescriptor) GetId() (v types.TSlotId) { @@ -2019,6 +2218,24 @@ func (p *TSlotDescriptor) GetColumnPaths() (v []string) { } return p.ColumnPaths } + +var TSlotDescriptor_ColDefaultValue_DEFAULT string + +func (p *TSlotDescriptor) GetColDefaultValue() (v string) { + if !p.IsSetColDefaultValue() { + return TSlotDescriptor_ColDefaultValue_DEFAULT + } + return *p.ColDefaultValue +} + +var TSlotDescriptor_PrimitiveType_DEFAULT types.TPrimitiveType = types.TPrimitiveType_INVALID_TYPE + +func (p *TSlotDescriptor) GetPrimitiveType() (v types.TPrimitiveType) { + if !p.IsSetPrimitiveType() { + return TSlotDescriptor_PrimitiveType_DEFAULT + } + return p.PrimitiveType +} func (p *TSlotDescriptor) SetId(val types.TSlotId) { p.Id = val } @@ -2064,6 +2281,12 @@ func (p *TSlotDescriptor) SetIsAutoIncrement(val bool) { func (p *TSlotDescriptor) SetColumnPaths(val []string) { p.ColumnPaths = val } +func (p *TSlotDescriptor) SetColDefaultValue(val *string) { + p.ColDefaultValue = val +} +func (p *TSlotDescriptor) SetPrimitiveType(val types.TPrimitiveType) { + p.PrimitiveType = val +} var fieldIDToName_TSlotDescriptor = map[int16]string{ 1: "id", @@ -2081,6 +2304,8 @@ var fieldIDToName_TSlotDescriptor = map[int16]string{ 13: "need_materialize", 14: "is_auto_increment", 15: "column_paths", + 16: "col_default_value", + 17: "primitive_type", } func (p *TSlotDescriptor) IsSetSlotType() bool { @@ -2107,6 +2332,14 @@ func (p *TSlotDescriptor) IsSetColumnPaths() bool { return p.ColumnPaths != nil } +func (p *TSlotDescriptor) IsSetColDefaultValue() bool { + return p.ColDefaultValue != nil +} + +func (p *TSlotDescriptor) IsSetPrimitiveType() bool { + return p.PrimitiveType != TSlotDescriptor_PrimitiveType_DEFAULT +} + func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -2142,10 +2375,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -2153,10 +2384,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetParent = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -2164,10 +2393,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSlotType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -2175,10 +2402,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnPos = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { @@ -2186,10 +2411,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetByteOffset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { @@ -2197,10 +2420,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNullIndicatorByte = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { @@ -2208,10 +2429,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNullIndicatorBit = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { @@ -2219,10 +2438,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I32 { @@ -2230,10 +2447,8 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSlotIdx = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { @@ -2241,67 +2456,70 @@ func (p *TSlotDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsMaterialized = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.BOOL { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.BOOL { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.LIST { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.STRING { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.I32 { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2378,137 +2596,164 @@ RequiredFieldNotSetError: } func (p *TSlotDescriptor) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Id = v + _field = v } + p.Id = _field return nil } - func (p *TSlotDescriptor) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Parent = v + _field = v } + p.Parent = _field return nil } - func (p *TSlotDescriptor) ReadField3(iprot thrift.TProtocol) error { - p.SlotType = types.NewTTypeDesc() - if err := p.SlotType.Read(iprot); err != nil { + _field := types.NewTTypeDesc() + if err := _field.Read(iprot); err != nil { return err } + p.SlotType = _field return nil } - func (p *TSlotDescriptor) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnPos = v + _field = v } + p.ColumnPos = _field return nil } - func (p *TSlotDescriptor) ReadField5(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ByteOffset = v + _field = v } + p.ByteOffset = _field return nil } - func (p *TSlotDescriptor) ReadField6(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NullIndicatorByte = v + _field = v } + p.NullIndicatorByte = _field return nil } - func (p *TSlotDescriptor) ReadField7(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NullIndicatorBit = v + _field = v } + p.NullIndicatorBit = _field return nil } - func (p *TSlotDescriptor) ReadField8(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColName = v + _field = v } + p.ColName = _field return nil } - func (p *TSlotDescriptor) ReadField9(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SlotIdx = v + _field = v } + p.SlotIdx = _field return nil } - func (p *TSlotDescriptor) ReadField10(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsMaterialized = v + _field = v } + p.IsMaterialized = _field return nil } - func (p *TSlotDescriptor) ReadField11(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColUniqueId = v + _field = v } + p.ColUniqueId = _field return nil } - func (p *TSlotDescriptor) ReadField12(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsKey = v + _field = v } + p.IsKey = _field return nil } - func (p *TSlotDescriptor) ReadField13(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NeedMaterialize = v + _field = v } + p.NeedMaterialize = _field return nil } - func (p *TSlotDescriptor) ReadField14(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsAutoIncrement = v + _field = v } + p.IsAutoIncrement = _field return nil } - func (p *TSlotDescriptor) ReadField15(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnPaths = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -2516,11 +2761,34 @@ func (p *TSlotDescriptor) ReadField15(iprot thrift.TProtocol) error { _elem = v } - p.ColumnPaths = append(p.ColumnPaths, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnPaths = _field + return nil +} +func (p *TSlotDescriptor) ReadField16(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ColDefaultValue = _field + return nil +} +func (p *TSlotDescriptor) ReadField17(iprot thrift.TProtocol) error { + + var _field types.TPrimitiveType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TPrimitiveType(v) + } + p.PrimitiveType = _field return nil } @@ -2590,7 +2858,14 @@ func (p *TSlotDescriptor) Write(oprot thrift.TProtocol) (err error) { fieldId = 15 goto WriteFieldError } - + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2882,11 +3157,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } +func (p *TSlotDescriptor) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetColDefaultValue() { + if err = oprot.WriteFieldBegin("col_default_value", thrift.STRING, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ColDefaultValue); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TSlotDescriptor) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetPrimitiveType() { + if err = oprot.WriteFieldBegin("primitive_type", thrift.I32, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.PrimitiveType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + func (p *TSlotDescriptor) String() string { if p == nil { return "" } return fmt.Sprintf("TSlotDescriptor(%+v)", *p) + } func (p *TSlotDescriptor) DeepEqual(ano *TSlotDescriptor) bool { @@ -2940,6 +3254,12 @@ func (p *TSlotDescriptor) DeepEqual(ano *TSlotDescriptor) bool { if !p.Field15DeepEqual(ano.ColumnPaths) { return false } + if !p.Field16DeepEqual(ano.ColDefaultValue) { + return false + } + if !p.Field17DeepEqual(ano.PrimitiveType) { + return false + } return true } @@ -3054,6 +3374,25 @@ func (p *TSlotDescriptor) Field15DeepEqual(src []string) bool { } return true } +func (p *TSlotDescriptor) Field16DeepEqual(src *string) bool { + + if p.ColDefaultValue == src { + return true + } else if p.ColDefaultValue == nil || src == nil { + return false + } + if strings.Compare(*p.ColDefaultValue, *src) != 0 { + return false + } + return true +} +func (p *TSlotDescriptor) Field17DeepEqual(src types.TPrimitiveType) bool { + + if p.PrimitiveType != src { + return false + } + return true +} type TTupleDescriptor struct { Id types.TTupleId `thrift:"id,1,required" frugal:"1,required,i32" json:"id"` @@ -3068,7 +3407,6 @@ func NewTTupleDescriptor() *TTupleDescriptor { } func (p *TTupleDescriptor) InitDefault() { - *p = TTupleDescriptor{} } func (p *TTupleDescriptor) GetId() (v types.TTupleId) { @@ -3160,10 +3498,8 @@ func (p *TTupleDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -3171,10 +3507,8 @@ func (p *TTupleDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetByteSize = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -3182,37 +3516,30 @@ func (p *TTupleDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumNullBytes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3254,47 +3581,58 @@ RequiredFieldNotSetError: } func (p *TTupleDescriptor) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Id = v + _field = v } + p.Id = _field return nil } - func (p *TTupleDescriptor) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ByteSize = v + _field = v } + p.ByteSize = _field return nil } - func (p *TTupleDescriptor) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumNullBytes = v + _field = v } + p.NumNullBytes = _field return nil } - func (p *TTupleDescriptor) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TTableId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = &v + _field = &v } + p.TableId = _field return nil } - func (p *TTupleDescriptor) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumNullSlots = &v + _field = &v } + p.NumNullSlots = _field return nil } @@ -3324,7 +3662,6 @@ func (p *TTupleDescriptor) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3437,6 +3774,7 @@ func (p *TTupleDescriptor) String() string { return "" } return fmt.Sprintf("TTupleDescriptor(%+v)", *p) + } func (p *TTupleDescriptor) DeepEqual(ano *TTupleDescriptor) bool { @@ -3519,7 +3857,6 @@ func NewTOlapTableIndexTablets() *TOlapTableIndexTablets { } func (p *TOlapTableIndexTablets) InitDefault() { - *p = TOlapTableIndexTablets{} } func (p *TOlapTableIndexTablets) GetIndexId() (v int64) { @@ -3568,10 +3905,8 @@ func (p *TOlapTableIndexTablets) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndexId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -3579,17 +3914,14 @@ func (p *TOlapTableIndexTablets) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTablets = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3626,21 +3958,24 @@ RequiredFieldNotSetError: } func (p *TOlapTableIndexTablets) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IndexId = v + _field = v } + p.IndexId = _field return nil } - func (p *TOlapTableIndexTablets) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Tablets = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -3648,11 +3983,12 @@ func (p *TOlapTableIndexTablets) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.Tablets = append(p.Tablets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Tablets = _field return nil } @@ -3670,7 +4006,6 @@ func (p *TOlapTableIndexTablets) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3736,6 +4071,7 @@ func (p *TOlapTableIndexTablets) String() string { return "" } return fmt.Sprintf("TOlapTableIndexTablets(%+v)", *p) + } func (p *TOlapTableIndexTablets) DeepEqual(ano *TOlapTableIndexTablets) bool { @@ -3785,6 +4121,7 @@ type TOlapTablePartition struct { InKeys [][]*exprs.TExprNode `thrift:"in_keys,8,optional" frugal:"8,optional,list>" json:"in_keys,omitempty"` IsMutable bool `thrift:"is_mutable,9,optional" frugal:"9,optional,bool" json:"is_mutable,omitempty"` IsDefaultPartition *bool `thrift:"is_default_partition,10,optional" frugal:"10,optional,bool" json:"is_default_partition,omitempty"` + LoadTabletIdx *int64 `thrift:"load_tablet_idx,11,optional" frugal:"11,optional,i64" json:"load_tablet_idx,omitempty"` } func NewTOlapTablePartition() *TOlapTablePartition { @@ -3795,10 +4132,7 @@ func NewTOlapTablePartition() *TOlapTablePartition { } func (p *TOlapTablePartition) InitDefault() { - *p = TOlapTablePartition{ - - IsMutable: true, - } + p.IsMutable = true } func (p *TOlapTablePartition) GetId() (v int64) { @@ -3875,6 +4209,15 @@ func (p *TOlapTablePartition) GetIsDefaultPartition() (v bool) { } return *p.IsDefaultPartition } + +var TOlapTablePartition_LoadTabletIdx_DEFAULT int64 + +func (p *TOlapTablePartition) GetLoadTabletIdx() (v int64) { + if !p.IsSetLoadTabletIdx() { + return TOlapTablePartition_LoadTabletIdx_DEFAULT + } + return *p.LoadTabletIdx +} func (p *TOlapTablePartition) SetId(val int64) { p.Id = val } @@ -3905,6 +4248,9 @@ func (p *TOlapTablePartition) SetIsMutable(val bool) { func (p *TOlapTablePartition) SetIsDefaultPartition(val *bool) { p.IsDefaultPartition = val } +func (p *TOlapTablePartition) SetLoadTabletIdx(val *int64) { + p.LoadTabletIdx = val +} var fieldIDToName_TOlapTablePartition = map[int16]string{ 1: "id", @@ -3917,6 +4263,7 @@ var fieldIDToName_TOlapTablePartition = map[int16]string{ 8: "in_keys", 9: "is_mutable", 10: "is_default_partition", + 11: "load_tablet_idx", } func (p *TOlapTablePartition) IsSetStartKey() bool { @@ -3947,6 +4294,10 @@ func (p *TOlapTablePartition) IsSetIsDefaultPartition() bool { return p.IsDefaultPartition != nil } +func (p *TOlapTablePartition) IsSetLoadTabletIdx() bool { + return p.LoadTabletIdx != nil +} + func (p *TOlapTablePartition) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -3975,30 +4326,24 @@ func (p *TOlapTablePartition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -4006,10 +4351,8 @@ func (p *TOlapTablePartition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumBuckets = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { @@ -4017,67 +4360,62 @@ func (p *TOlapTablePartition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndexes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.BOOL { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I64 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4119,113 +4457,129 @@ RequiredFieldNotSetError: } func (p *TOlapTablePartition) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = v + _field = v } + p.Id = _field return nil } - func (p *TOlapTablePartition) ReadField2(iprot thrift.TProtocol) error { - p.StartKey = exprs.NewTExprNode() - if err := p.StartKey.Read(iprot); err != nil { + _field := exprs.NewTExprNode() + if err := _field.Read(iprot); err != nil { return err } + p.StartKey = _field return nil } - func (p *TOlapTablePartition) ReadField3(iprot thrift.TProtocol) error { - p.EndKey = exprs.NewTExprNode() - if err := p.EndKey.Read(iprot); err != nil { + _field := exprs.NewTExprNode() + if err := _field.Read(iprot); err != nil { return err } + p.EndKey = _field return nil } - func (p *TOlapTablePartition) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumBuckets = v + _field = v } + p.NumBuckets = _field return nil } - func (p *TOlapTablePartition) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Indexes = make([]*TOlapTableIndexTablets, 0, size) + _field := make([]*TOlapTableIndexTablets, 0, size) + values := make([]TOlapTableIndexTablets, size) for i := 0; i < size; i++ { - _elem := NewTOlapTableIndexTablets() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Indexes = append(p.Indexes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Indexes = _field return nil } - func (p *TOlapTablePartition) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.StartKeys = make([]*exprs.TExprNode, 0, size) + _field := make([]*exprs.TExprNode, 0, size) + values := make([]exprs.TExprNode, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExprNode() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.StartKeys = append(p.StartKeys, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.StartKeys = _field return nil } - func (p *TOlapTablePartition) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.EndKeys = make([]*exprs.TExprNode, 0, size) + _field := make([]*exprs.TExprNode, 0, size) + values := make([]exprs.TExprNode, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExprNode() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.EndKeys = append(p.EndKeys, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.EndKeys = _field return nil } - func (p *TOlapTablePartition) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.InKeys = make([][]*exprs.TExprNode, 0, size) + _field := make([][]*exprs.TExprNode, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExprNode, 0, size) + values := make([]exprs.TExprNode, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExprNode() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -4236,29 +4590,45 @@ func (p *TOlapTablePartition) ReadField8(iprot thrift.TProtocol) error { return err } - p.InKeys = append(p.InKeys, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.InKeys = _field return nil } - func (p *TOlapTablePartition) ReadField9(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsMutable = v + _field = v } + p.IsMutable = _field return nil } - func (p *TOlapTablePartition) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDefaultPartition = &v + _field = &v + } + p.IsDefaultPartition = _field + return nil +} +func (p *TOlapTablePartition) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.LoadTabletIdx = _field return nil } @@ -4308,7 +4678,10 @@ func (p *TOlapTablePartition) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4551,11 +4924,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } +func (p *TOlapTablePartition) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadTabletIdx() { + if err = oprot.WriteFieldBegin("load_tablet_idx", thrift.I64, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadTabletIdx); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + func (p *TOlapTablePartition) String() string { if p == nil { return "" } return fmt.Sprintf("TOlapTablePartition(%+v)", *p) + } func (p *TOlapTablePartition) DeepEqual(ano *TOlapTablePartition) bool { @@ -4594,6 +4987,9 @@ func (p *TOlapTablePartition) DeepEqual(ano *TOlapTablePartition) bool { if !p.Field10DeepEqual(ano.IsDefaultPartition) { return false } + if !p.Field11DeepEqual(ano.LoadTabletIdx) { + return false + } return true } @@ -4702,26 +5098,44 @@ func (p *TOlapTablePartition) Field10DeepEqual(src *bool) bool { } return true } +func (p *TOlapTablePartition) Field11DeepEqual(src *int64) bool { + + if p.LoadTabletIdx == src { + return true + } else if p.LoadTabletIdx == nil || src == nil { + return false + } + if *p.LoadTabletIdx != *src { + return false + } + return true +} type TOlapTablePartitionParam struct { - DbId int64 `thrift:"db_id,1,required" frugal:"1,required,i64" json:"db_id"` - TableId int64 `thrift:"table_id,2,required" frugal:"2,required,i64" json:"table_id"` - Version int64 `thrift:"version,3,required" frugal:"3,required,i64" json:"version"` - PartitionColumn *string `thrift:"partition_column,4,optional" frugal:"4,optional,string" json:"partition_column,omitempty"` - DistributedColumns []string `thrift:"distributed_columns,5,optional" frugal:"5,optional,list" json:"distributed_columns,omitempty"` - Partitions []*TOlapTablePartition `thrift:"partitions,6,required" frugal:"6,required,list" json:"partitions"` - PartitionColumns []string `thrift:"partition_columns,7,optional" frugal:"7,optional,list" json:"partition_columns,omitempty"` - PartitionFunctionExprs []*exprs.TExpr `thrift:"partition_function_exprs,8,optional" frugal:"8,optional,list" json:"partition_function_exprs,omitempty"` - EnableAutomaticPartition *bool `thrift:"enable_automatic_partition,9,optional" frugal:"9,optional,bool" json:"enable_automatic_partition,omitempty"` - PartitionType *partitions.TPartitionType `thrift:"partition_type,10,optional" frugal:"10,optional,TPartitionType" json:"partition_type,omitempty"` + DbId int64 `thrift:"db_id,1,required" frugal:"1,required,i64" json:"db_id"` + TableId int64 `thrift:"table_id,2,required" frugal:"2,required,i64" json:"table_id"` + Version int64 `thrift:"version,3,required" frugal:"3,required,i64" json:"version"` + PartitionColumn *string `thrift:"partition_column,4,optional" frugal:"4,optional,string" json:"partition_column,omitempty"` + DistributedColumns []string `thrift:"distributed_columns,5,optional" frugal:"5,optional,list" json:"distributed_columns,omitempty"` + Partitions []*TOlapTablePartition `thrift:"partitions,6,required" frugal:"6,required,list" json:"partitions"` + PartitionColumns []string `thrift:"partition_columns,7,optional" frugal:"7,optional,list" json:"partition_columns,omitempty"` + PartitionFunctionExprs []*exprs.TExpr `thrift:"partition_function_exprs,8,optional" frugal:"8,optional,list" json:"partition_function_exprs,omitempty"` + EnableAutomaticPartition *bool `thrift:"enable_automatic_partition,9,optional" frugal:"9,optional,bool" json:"enable_automatic_partition,omitempty"` + PartitionType *partitions.TPartitionType `thrift:"partition_type,10,optional" frugal:"10,optional,TPartitionType" json:"partition_type,omitempty"` + EnableAutoDetectOverwrite *bool `thrift:"enable_auto_detect_overwrite,11,optional" frugal:"11,optional,bool" json:"enable_auto_detect_overwrite,omitempty"` + OverwriteGroupId *int64 `thrift:"overwrite_group_id,12,optional" frugal:"12,optional,i64" json:"overwrite_group_id,omitempty"` + PartitionsIsFake bool `thrift:"partitions_is_fake,13,optional" frugal:"13,optional,bool" json:"partitions_is_fake,omitempty"` } func NewTOlapTablePartitionParam() *TOlapTablePartitionParam { - return &TOlapTablePartitionParam{} + return &TOlapTablePartitionParam{ + + PartitionsIsFake: false, + } } func (p *TOlapTablePartitionParam) InitDefault() { - *p = TOlapTablePartitionParam{} + p.PartitionsIsFake = false } func (p *TOlapTablePartitionParam) GetDbId() (v int64) { @@ -4793,6 +5207,33 @@ func (p *TOlapTablePartitionParam) GetPartitionType() (v partitions.TPartitionTy } return *p.PartitionType } + +var TOlapTablePartitionParam_EnableAutoDetectOverwrite_DEFAULT bool + +func (p *TOlapTablePartitionParam) GetEnableAutoDetectOverwrite() (v bool) { + if !p.IsSetEnableAutoDetectOverwrite() { + return TOlapTablePartitionParam_EnableAutoDetectOverwrite_DEFAULT + } + return *p.EnableAutoDetectOverwrite +} + +var TOlapTablePartitionParam_OverwriteGroupId_DEFAULT int64 + +func (p *TOlapTablePartitionParam) GetOverwriteGroupId() (v int64) { + if !p.IsSetOverwriteGroupId() { + return TOlapTablePartitionParam_OverwriteGroupId_DEFAULT + } + return *p.OverwriteGroupId +} + +var TOlapTablePartitionParam_PartitionsIsFake_DEFAULT bool = false + +func (p *TOlapTablePartitionParam) GetPartitionsIsFake() (v bool) { + if !p.IsSetPartitionsIsFake() { + return TOlapTablePartitionParam_PartitionsIsFake_DEFAULT + } + return p.PartitionsIsFake +} func (p *TOlapTablePartitionParam) SetDbId(val int64) { p.DbId = val } @@ -4823,6 +5264,15 @@ func (p *TOlapTablePartitionParam) SetEnableAutomaticPartition(val *bool) { func (p *TOlapTablePartitionParam) SetPartitionType(val *partitions.TPartitionType) { p.PartitionType = val } +func (p *TOlapTablePartitionParam) SetEnableAutoDetectOverwrite(val *bool) { + p.EnableAutoDetectOverwrite = val +} +func (p *TOlapTablePartitionParam) SetOverwriteGroupId(val *int64) { + p.OverwriteGroupId = val +} +func (p *TOlapTablePartitionParam) SetPartitionsIsFake(val bool) { + p.PartitionsIsFake = val +} var fieldIDToName_TOlapTablePartitionParam = map[int16]string{ 1: "db_id", @@ -4835,6 +5285,9 @@ var fieldIDToName_TOlapTablePartitionParam = map[int16]string{ 8: "partition_function_exprs", 9: "enable_automatic_partition", 10: "partition_type", + 11: "enable_auto_detect_overwrite", + 12: "overwrite_group_id", + 13: "partitions_is_fake", } func (p *TOlapTablePartitionParam) IsSetPartitionColumn() bool { @@ -4861,6 +5314,18 @@ func (p *TOlapTablePartitionParam) IsSetPartitionType() bool { return p.PartitionType != nil } +func (p *TOlapTablePartitionParam) IsSetEnableAutoDetectOverwrite() bool { + return p.EnableAutoDetectOverwrite != nil +} + +func (p *TOlapTablePartitionParam) IsSetOverwriteGroupId() bool { + return p.OverwriteGroupId != nil +} + +func (p *TOlapTablePartitionParam) IsSetPartitionsIsFake() bool { + return p.PartitionsIsFake != TOlapTablePartitionParam_PartitionsIsFake_DEFAULT +} + func (p *TOlapTablePartitionParam) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -4890,10 +5355,8 @@ func (p *TOlapTablePartitionParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -4901,10 +5364,8 @@ func (p *TOlapTablePartitionParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -4912,30 +5373,24 @@ func (p *TOlapTablePartitionParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { @@ -4943,57 +5398,70 @@ func (p *TOlapTablePartitionParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPartitions = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.BOOL { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I32 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - default: - if err = iprot.Skip(fieldTypeId); err != nil { + case 12: + if fieldTypeId == thrift.I64 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } - } - + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5040,48 +5508,57 @@ RequiredFieldNotSetError: } func (p *TOlapTablePartitionParam) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DbId = v + _field = v } + p.DbId = _field return nil } - func (p *TOlapTablePartitionParam) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = v + _field = v } + p.TableId = _field return nil } - func (p *TOlapTablePartitionParam) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TOlapTablePartitionParam) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.PartitionColumn = &v + _field = &v } + p.PartitionColumn = _field return nil } - func (p *TOlapTablePartitionParam) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DistributedColumns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -5089,41 +5566,45 @@ func (p *TOlapTablePartitionParam) ReadField5(iprot thrift.TProtocol) error { _elem = v } - p.DistributedColumns = append(p.DistributedColumns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DistributedColumns = _field return nil } - func (p *TOlapTablePartitionParam) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Partitions = make([]*TOlapTablePartition, 0, size) + _field := make([]*TOlapTablePartition, 0, size) + values := make([]TOlapTablePartition, size) for i := 0; i < size; i++ { - _elem := NewTOlapTablePartition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Partitions = append(p.Partitions, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Partitions = _field return nil } - func (p *TOlapTablePartitionParam) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionColumns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -5131,50 +5612,91 @@ func (p *TOlapTablePartitionParam) ReadField7(iprot thrift.TProtocol) error { _elem = v } - p.PartitionColumns = append(p.PartitionColumns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionColumns = _field return nil } - func (p *TOlapTablePartitionParam) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionFunctionExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionFunctionExprs = append(p.PartitionFunctionExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionFunctionExprs = _field return nil } - func (p *TOlapTablePartitionParam) ReadField9(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.EnableAutomaticPartition = &v + _field = &v } + p.EnableAutomaticPartition = _field return nil } - func (p *TOlapTablePartitionParam) ReadField10(iprot thrift.TProtocol) error { + + var _field *partitions.TPartitionType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := partitions.TPartitionType(v) - p.PartitionType = &tmp + _field = &tmp } + p.PartitionType = _field + return nil +} +func (p *TOlapTablePartitionParam) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableAutoDetectOverwrite = _field + return nil +} +func (p *TOlapTablePartitionParam) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.OverwriteGroupId = _field + return nil +} +func (p *TOlapTablePartitionParam) ReadField13(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.PartitionsIsFake = _field return nil } @@ -5224,7 +5746,18 @@ func (p *TOlapTablePartitionParam) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5457,11 +5990,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } +func (p *TOlapTablePartitionParam) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableAutoDetectOverwrite() { + if err = oprot.WriteFieldBegin("enable_auto_detect_overwrite", thrift.BOOL, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableAutoDetectOverwrite); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TOlapTablePartitionParam) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetOverwriteGroupId() { + if err = oprot.WriteFieldBegin("overwrite_group_id", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.OverwriteGroupId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TOlapTablePartitionParam) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionsIsFake() { + if err = oprot.WriteFieldBegin("partitions_is_fake", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.PartitionsIsFake); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + func (p *TOlapTablePartitionParam) String() string { if p == nil { return "" } return fmt.Sprintf("TOlapTablePartitionParam(%+v)", *p) + } func (p *TOlapTablePartitionParam) DeepEqual(ano *TOlapTablePartitionParam) bool { @@ -5500,6 +6091,15 @@ func (p *TOlapTablePartitionParam) DeepEqual(ano *TOlapTablePartitionParam) bool if !p.Field10DeepEqual(ano.PartitionType) { return false } + if !p.Field11DeepEqual(ano.EnableAutoDetectOverwrite) { + return false + } + if !p.Field12DeepEqual(ano.OverwriteGroupId) { + return false + } + if !p.Field13DeepEqual(ano.PartitionsIsFake) { + return false + } return true } @@ -5612,14 +6212,46 @@ func (p *TOlapTablePartitionParam) Field10DeepEqual(src *partitions.TPartitionTy } return true } +func (p *TOlapTablePartitionParam) Field11DeepEqual(src *bool) bool { + + if p.EnableAutoDetectOverwrite == src { + return true + } else if p.EnableAutoDetectOverwrite == nil || src == nil { + return false + } + if *p.EnableAutoDetectOverwrite != *src { + return false + } + return true +} +func (p *TOlapTablePartitionParam) Field12DeepEqual(src *int64) bool { + + if p.OverwriteGroupId == src { + return true + } else if p.OverwriteGroupId == nil || src == nil { + return false + } + if *p.OverwriteGroupId != *src { + return false + } + return true +} +func (p *TOlapTablePartitionParam) Field13DeepEqual(src bool) bool { + + if p.PartitionsIsFake != src { + return false + } + return true +} type TOlapTableIndex struct { - IndexName *string `thrift:"index_name,1,optional" frugal:"1,optional,string" json:"index_name,omitempty"` - Columns []string `thrift:"columns,2,optional" frugal:"2,optional,list" json:"columns,omitempty"` - IndexType *TIndexType `thrift:"index_type,3,optional" frugal:"3,optional,TIndexType" json:"index_type,omitempty"` - Comment *string `thrift:"comment,4,optional" frugal:"4,optional,string" json:"comment,omitempty"` - IndexId *int64 `thrift:"index_id,5,optional" frugal:"5,optional,i64" json:"index_id,omitempty"` - Properties map[string]string `thrift:"properties,6,optional" frugal:"6,optional,map" json:"properties,omitempty"` + IndexName *string `thrift:"index_name,1,optional" frugal:"1,optional,string" json:"index_name,omitempty"` + Columns []string `thrift:"columns,2,optional" frugal:"2,optional,list" json:"columns,omitempty"` + IndexType *TIndexType `thrift:"index_type,3,optional" frugal:"3,optional,TIndexType" json:"index_type,omitempty"` + Comment *string `thrift:"comment,4,optional" frugal:"4,optional,string" json:"comment,omitempty"` + IndexId *int64 `thrift:"index_id,5,optional" frugal:"5,optional,i64" json:"index_id,omitempty"` + Properties map[string]string `thrift:"properties,6,optional" frugal:"6,optional,map" json:"properties,omitempty"` + ColumnUniqueIds []int32 `thrift:"column_unique_ids,7,optional" frugal:"7,optional,list" json:"column_unique_ids,omitempty"` } func NewTOlapTableIndex() *TOlapTableIndex { @@ -5627,7 +6259,6 @@ func NewTOlapTableIndex() *TOlapTableIndex { } func (p *TOlapTableIndex) InitDefault() { - *p = TOlapTableIndex{} } var TOlapTableIndex_IndexName_DEFAULT string @@ -5683,6 +6314,15 @@ func (p *TOlapTableIndex) GetProperties() (v map[string]string) { } return p.Properties } + +var TOlapTableIndex_ColumnUniqueIds_DEFAULT []int32 + +func (p *TOlapTableIndex) GetColumnUniqueIds() (v []int32) { + if !p.IsSetColumnUniqueIds() { + return TOlapTableIndex_ColumnUniqueIds_DEFAULT + } + return p.ColumnUniqueIds +} func (p *TOlapTableIndex) SetIndexName(val *string) { p.IndexName = val } @@ -5701,6 +6341,9 @@ func (p *TOlapTableIndex) SetIndexId(val *int64) { func (p *TOlapTableIndex) SetProperties(val map[string]string) { p.Properties = val } +func (p *TOlapTableIndex) SetColumnUniqueIds(val []int32) { + p.ColumnUniqueIds = val +} var fieldIDToName_TOlapTableIndex = map[int16]string{ 1: "index_name", @@ -5709,6 +6352,7 @@ var fieldIDToName_TOlapTableIndex = map[int16]string{ 4: "comment", 5: "index_id", 6: "properties", + 7: "column_unique_ids", } func (p *TOlapTableIndex) IsSetIndexName() bool { @@ -5735,6 +6379,10 @@ func (p *TOlapTableIndex) IsSetProperties() bool { return p.Properties != nil } +func (p *TOlapTableIndex) IsSetColumnUniqueIds() bool { + return p.ColumnUniqueIds != nil +} + func (p *TOlapTableIndex) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -5759,67 +6407,62 @@ func (p *TOlapTableIndex) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.MAP { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5845,21 +6488,24 @@ ReadStructEndError: } func (p *TOlapTableIndex) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.IndexName = &v + _field = &v } + p.IndexName = _field return nil } - func (p *TOlapTableIndex) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -5867,48 +6513,54 @@ func (p *TOlapTableIndex) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TOlapTableIndex) ReadField3(iprot thrift.TProtocol) error { + + var _field *TIndexType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TIndexType(v) - p.IndexType = &tmp + _field = &tmp } + p.IndexType = _field return nil } - func (p *TOlapTableIndex) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Comment = &v + _field = &v } + p.Comment = _field return nil } - func (p *TOlapTableIndex) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IndexId = &v + _field = &v } + p.IndexId = _field return nil } - func (p *TOlapTableIndex) ReadField6(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -5924,11 +6576,35 @@ func (p *TOlapTableIndex) ReadField6(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field + return nil +} +func (p *TOlapTableIndex) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ColumnUniqueIds = _field return nil } @@ -5962,7 +6638,10 @@ func (p *TOlapTableIndex) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6093,11 +6772,9 @@ func (p *TOlapTableIndex) writeField6(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -6116,11 +6793,39 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } +func (p *TOlapTableIndex) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnUniqueIds() { + if err = oprot.WriteFieldBegin("column_unique_ids", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.ColumnUniqueIds)); err != nil { + return err + } + for _, v := range p.ColumnUniqueIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + func (p *TOlapTableIndex) String() string { if p == nil { return "" } return fmt.Sprintf("TOlapTableIndex(%+v)", *p) + } func (p *TOlapTableIndex) DeepEqual(ano *TOlapTableIndex) bool { @@ -6147,6 +6852,9 @@ func (p *TOlapTableIndex) DeepEqual(ano *TOlapTableIndex) bool { if !p.Field6DeepEqual(ano.Properties) { return false } + if !p.Field7DeepEqual(ano.ColumnUniqueIds) { + return false + } return true } @@ -6224,6 +6932,19 @@ func (p *TOlapTableIndex) Field6DeepEqual(src map[string]string) bool { } return true } +func (p *TOlapTableIndex) Field7DeepEqual(src []int32) bool { + + if len(p.ColumnUniqueIds) != len(src) { + return false + } + for i, v := range p.ColumnUniqueIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} type TOlapTableIndexSchema struct { Id int64 `thrift:"id,1,required" frugal:"1,required,i64" json:"id"` @@ -6239,7 +6960,6 @@ func NewTOlapTableIndexSchema() *TOlapTableIndexSchema { } func (p *TOlapTableIndexSchema) InitDefault() { - *p = TOlapTableIndexSchema{} } func (p *TOlapTableIndexSchema) GetId() (v int64) { @@ -6348,10 +7068,8 @@ func (p *TOlapTableIndexSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -6359,10 +7077,8 @@ func (p *TOlapTableIndexSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -6370,47 +7086,38 @@ func (p *TOlapTableIndexSchema) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6452,21 +7159,24 @@ RequiredFieldNotSetError: } func (p *TOlapTableIndexSchema) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = v + _field = v } + p.Id = _field return nil } - func (p *TOlapTableIndexSchema) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -6474,68 +7184,77 @@ func (p *TOlapTableIndexSchema) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TOlapTableIndexSchema) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TOlapTableIndexSchema) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnsDesc = make([]*TColumn, 0, size) + _field := make([]*TColumn, 0, size) + values := make([]TColumn, size) for i := 0; i < size; i++ { - _elem := NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnsDesc = append(p.ColumnsDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnsDesc = _field return nil } - func (p *TOlapTableIndexSchema) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.IndexesDesc = make([]*TOlapTableIndex, 0, size) + _field := make([]*TOlapTableIndex, 0, size) + values := make([]TOlapTableIndex, size) for i := 0; i < size; i++ { - _elem := NewTOlapTableIndex() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.IndexesDesc = append(p.IndexesDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.IndexesDesc = _field return nil } - func (p *TOlapTableIndexSchema) ReadField6(iprot thrift.TProtocol) error { - p.WhereClause = exprs.NewTExpr() - if err := p.WhereClause.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.WhereClause = _field return nil } @@ -6569,7 +7288,6 @@ func (p *TOlapTableIndexSchema) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6725,6 +7443,7 @@ func (p *TOlapTableIndexSchema) String() string { return "" } return fmt.Sprintf("TOlapTableIndexSchema(%+v)", *p) + } func (p *TOlapTableIndexSchema) DeepEqual(ano *TOlapTableIndexSchema) bool { @@ -6816,30 +7535,38 @@ func (p *TOlapTableIndexSchema) Field6DeepEqual(src *exprs.TExpr) bool { } type TOlapTableSchemaParam struct { - DbId int64 `thrift:"db_id,1,required" frugal:"1,required,i64" json:"db_id"` - TableId int64 `thrift:"table_id,2,required" frugal:"2,required,i64" json:"table_id"` - Version int64 `thrift:"version,3,required" frugal:"3,required,i64" json:"version"` - SlotDescs []*TSlotDescriptor `thrift:"slot_descs,4,required" frugal:"4,required,list" json:"slot_descs"` - TupleDesc *TTupleDescriptor `thrift:"tuple_desc,5,required" frugal:"5,required,TTupleDescriptor" json:"tuple_desc"` - Indexes []*TOlapTableIndexSchema `thrift:"indexes,6,required" frugal:"6,required,list" json:"indexes"` - IsDynamicSchema *bool `thrift:"is_dynamic_schema,7,optional" frugal:"7,optional,bool" json:"is_dynamic_schema,omitempty"` - IsPartialUpdate *bool `thrift:"is_partial_update,8,optional" frugal:"8,optional,bool" json:"is_partial_update,omitempty"` - PartialUpdateInputColumns []string `thrift:"partial_update_input_columns,9,optional" frugal:"9,optional,list" json:"partial_update_input_columns,omitempty"` - IsStrictMode bool `thrift:"is_strict_mode,10,optional" frugal:"10,optional,bool" json:"is_strict_mode,omitempty"` + DbId int64 `thrift:"db_id,1,required" frugal:"1,required,i64" json:"db_id"` + TableId int64 `thrift:"table_id,2,required" frugal:"2,required,i64" json:"table_id"` + Version int64 `thrift:"version,3,required" frugal:"3,required,i64" json:"version"` + SlotDescs []*TSlotDescriptor `thrift:"slot_descs,4,required" frugal:"4,required,list" json:"slot_descs"` + TupleDesc *TTupleDescriptor `thrift:"tuple_desc,5,required" frugal:"5,required,TTupleDescriptor" json:"tuple_desc"` + Indexes []*TOlapTableIndexSchema `thrift:"indexes,6,required" frugal:"6,required,list" json:"indexes"` + IsDynamicSchema *bool `thrift:"is_dynamic_schema,7,optional" frugal:"7,optional,bool" json:"is_dynamic_schema,omitempty"` + IsPartialUpdate *bool `thrift:"is_partial_update,8,optional" frugal:"8,optional,bool" json:"is_partial_update,omitempty"` + PartialUpdateInputColumns []string `thrift:"partial_update_input_columns,9,optional" frugal:"9,optional,list" json:"partial_update_input_columns,omitempty"` + IsStrictMode bool `thrift:"is_strict_mode,10,optional" frugal:"10,optional,bool" json:"is_strict_mode,omitempty"` + AutoIncrementColumn *string `thrift:"auto_increment_column,11,optional" frugal:"11,optional,string" json:"auto_increment_column,omitempty"` + AutoIncrementColumnUniqueId int32 `thrift:"auto_increment_column_unique_id,12,optional" frugal:"12,optional,i32" json:"auto_increment_column_unique_id,omitempty"` + InvertedIndexFileStorageFormat types.TInvertedIndexFileStorageFormat `thrift:"inverted_index_file_storage_format,13,optional" frugal:"13,optional,TInvertedIndexFileStorageFormat" json:"inverted_index_file_storage_format,omitempty"` + UniqueKeyUpdateMode *types.TUniqueKeyUpdateMode `thrift:"unique_key_update_mode,14,optional" frugal:"14,optional,TUniqueKeyUpdateMode" json:"unique_key_update_mode,omitempty"` + SequenceMapColUniqueId int32 `thrift:"sequence_map_col_unique_id,15,optional" frugal:"15,optional,i32" json:"sequence_map_col_unique_id,omitempty"` } func NewTOlapTableSchemaParam() *TOlapTableSchemaParam { return &TOlapTableSchemaParam{ - IsStrictMode: false, + IsStrictMode: false, + AutoIncrementColumnUniqueId: -1, + InvertedIndexFileStorageFormat: types.TInvertedIndexFileStorageFormat_V1, + SequenceMapColUniqueId: -1, } } func (p *TOlapTableSchemaParam) InitDefault() { - *p = TOlapTableSchemaParam{ - - IsStrictMode: false, - } + p.IsStrictMode = false + p.AutoIncrementColumnUniqueId = -1 + p.InvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat_V1 + p.SequenceMapColUniqueId = -1 } func (p *TOlapTableSchemaParam) GetDbId() (v int64) { @@ -6906,6 +7633,51 @@ func (p *TOlapTableSchemaParam) GetIsStrictMode() (v bool) { } return p.IsStrictMode } + +var TOlapTableSchemaParam_AutoIncrementColumn_DEFAULT string + +func (p *TOlapTableSchemaParam) GetAutoIncrementColumn() (v string) { + if !p.IsSetAutoIncrementColumn() { + return TOlapTableSchemaParam_AutoIncrementColumn_DEFAULT + } + return *p.AutoIncrementColumn +} + +var TOlapTableSchemaParam_AutoIncrementColumnUniqueId_DEFAULT int32 = -1 + +func (p *TOlapTableSchemaParam) GetAutoIncrementColumnUniqueId() (v int32) { + if !p.IsSetAutoIncrementColumnUniqueId() { + return TOlapTableSchemaParam_AutoIncrementColumnUniqueId_DEFAULT + } + return p.AutoIncrementColumnUniqueId +} + +var TOlapTableSchemaParam_InvertedIndexFileStorageFormat_DEFAULT types.TInvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat_V1 + +func (p *TOlapTableSchemaParam) GetInvertedIndexFileStorageFormat() (v types.TInvertedIndexFileStorageFormat) { + if !p.IsSetInvertedIndexFileStorageFormat() { + return TOlapTableSchemaParam_InvertedIndexFileStorageFormat_DEFAULT + } + return p.InvertedIndexFileStorageFormat +} + +var TOlapTableSchemaParam_UniqueKeyUpdateMode_DEFAULT types.TUniqueKeyUpdateMode + +func (p *TOlapTableSchemaParam) GetUniqueKeyUpdateMode() (v types.TUniqueKeyUpdateMode) { + if !p.IsSetUniqueKeyUpdateMode() { + return TOlapTableSchemaParam_UniqueKeyUpdateMode_DEFAULT + } + return *p.UniqueKeyUpdateMode +} + +var TOlapTableSchemaParam_SequenceMapColUniqueId_DEFAULT int32 = -1 + +func (p *TOlapTableSchemaParam) GetSequenceMapColUniqueId() (v int32) { + if !p.IsSetSequenceMapColUniqueId() { + return TOlapTableSchemaParam_SequenceMapColUniqueId_DEFAULT + } + return p.SequenceMapColUniqueId +} func (p *TOlapTableSchemaParam) SetDbId(val int64) { p.DbId = val } @@ -6936,6 +7708,21 @@ func (p *TOlapTableSchemaParam) SetPartialUpdateInputColumns(val []string) { func (p *TOlapTableSchemaParam) SetIsStrictMode(val bool) { p.IsStrictMode = val } +func (p *TOlapTableSchemaParam) SetAutoIncrementColumn(val *string) { + p.AutoIncrementColumn = val +} +func (p *TOlapTableSchemaParam) SetAutoIncrementColumnUniqueId(val int32) { + p.AutoIncrementColumnUniqueId = val +} +func (p *TOlapTableSchemaParam) SetInvertedIndexFileStorageFormat(val types.TInvertedIndexFileStorageFormat) { + p.InvertedIndexFileStorageFormat = val +} +func (p *TOlapTableSchemaParam) SetUniqueKeyUpdateMode(val *types.TUniqueKeyUpdateMode) { + p.UniqueKeyUpdateMode = val +} +func (p *TOlapTableSchemaParam) SetSequenceMapColUniqueId(val int32) { + p.SequenceMapColUniqueId = val +} var fieldIDToName_TOlapTableSchemaParam = map[int16]string{ 1: "db_id", @@ -6948,6 +7735,11 @@ var fieldIDToName_TOlapTableSchemaParam = map[int16]string{ 8: "is_partial_update", 9: "partial_update_input_columns", 10: "is_strict_mode", + 11: "auto_increment_column", + 12: "auto_increment_column_unique_id", + 13: "inverted_index_file_storage_format", + 14: "unique_key_update_mode", + 15: "sequence_map_col_unique_id", } func (p *TOlapTableSchemaParam) IsSetTupleDesc() bool { @@ -6970,6 +7762,26 @@ func (p *TOlapTableSchemaParam) IsSetIsStrictMode() bool { return p.IsStrictMode != TOlapTableSchemaParam_IsStrictMode_DEFAULT } +func (p *TOlapTableSchemaParam) IsSetAutoIncrementColumn() bool { + return p.AutoIncrementColumn != nil +} + +func (p *TOlapTableSchemaParam) IsSetAutoIncrementColumnUniqueId() bool { + return p.AutoIncrementColumnUniqueId != TOlapTableSchemaParam_AutoIncrementColumnUniqueId_DEFAULT +} + +func (p *TOlapTableSchemaParam) IsSetInvertedIndexFileStorageFormat() bool { + return p.InvertedIndexFileStorageFormat != TOlapTableSchemaParam_InvertedIndexFileStorageFormat_DEFAULT +} + +func (p *TOlapTableSchemaParam) IsSetUniqueKeyUpdateMode() bool { + return p.UniqueKeyUpdateMode != nil +} + +func (p *TOlapTableSchemaParam) IsSetSequenceMapColUniqueId() bool { + return p.SequenceMapColUniqueId != TOlapTableSchemaParam_SequenceMapColUniqueId_DEFAULT +} + func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -7001,10 +7813,8 @@ func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -7012,10 +7822,8 @@ func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -7023,10 +7831,8 @@ func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { @@ -7034,10 +7840,8 @@ func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSlotDescs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { @@ -7045,10 +7849,8 @@ func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleDesc = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { @@ -7056,57 +7858,86 @@ func (p *TOlapTableSchemaParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndexes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.BOOL { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.LIST { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I32 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I32 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I32 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7163,105 +7994,122 @@ RequiredFieldNotSetError: } func (p *TOlapTableSchemaParam) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DbId = v + _field = v } + p.DbId = _field return nil } - func (p *TOlapTableSchemaParam) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = v + _field = v } + p.TableId = _field return nil } - func (p *TOlapTableSchemaParam) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TOlapTableSchemaParam) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SlotDescs = make([]*TSlotDescriptor, 0, size) + _field := make([]*TSlotDescriptor, 0, size) + values := make([]TSlotDescriptor, size) for i := 0; i < size; i++ { - _elem := NewTSlotDescriptor() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SlotDescs = append(p.SlotDescs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SlotDescs = _field return nil } - func (p *TOlapTableSchemaParam) ReadField5(iprot thrift.TProtocol) error { - p.TupleDesc = NewTTupleDescriptor() - if err := p.TupleDesc.Read(iprot); err != nil { + _field := NewTTupleDescriptor() + if err := _field.Read(iprot); err != nil { return err } + p.TupleDesc = _field return nil } - func (p *TOlapTableSchemaParam) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Indexes = make([]*TOlapTableIndexSchema, 0, size) + _field := make([]*TOlapTableIndexSchema, 0, size) + values := make([]TOlapTableIndexSchema, size) for i := 0; i < size; i++ { - _elem := NewTOlapTableIndexSchema() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Indexes = append(p.Indexes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Indexes = _field return nil } - func (p *TOlapTableSchemaParam) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDynamicSchema = &v + _field = &v } + p.IsDynamicSchema = _field return nil } - func (p *TOlapTableSchemaParam) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsPartialUpdate = &v + _field = &v } + p.IsPartialUpdate = _field return nil } - func (p *TOlapTableSchemaParam) ReadField9(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartialUpdateInputColumns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -7269,20 +8117,79 @@ func (p *TOlapTableSchemaParam) ReadField9(iprot thrift.TProtocol) error { _elem = v } - p.PartialUpdateInputColumns = append(p.PartialUpdateInputColumns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartialUpdateInputColumns = _field return nil } - func (p *TOlapTableSchemaParam) ReadField10(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsStrictMode = v + _field = v + } + p.IsStrictMode = _field + return nil +} +func (p *TOlapTableSchemaParam) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AutoIncrementColumn = _field + return nil +} +func (p *TOlapTableSchemaParam) ReadField12(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.AutoIncrementColumnUniqueId = _field + return nil +} +func (p *TOlapTableSchemaParam) ReadField13(iprot thrift.TProtocol) error { + + var _field types.TInvertedIndexFileStorageFormat + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TInvertedIndexFileStorageFormat(v) + } + p.InvertedIndexFileStorageFormat = _field + return nil +} +func (p *TOlapTableSchemaParam) ReadField14(iprot thrift.TProtocol) error { + + var _field *types.TUniqueKeyUpdateMode + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TUniqueKeyUpdateMode(v) + _field = &tmp + } + p.UniqueKeyUpdateMode = _field + return nil +} +func (p *TOlapTableSchemaParam) ReadField15(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v } + p.SequenceMapColUniqueId = _field return nil } @@ -7332,7 +8239,26 @@ func (p *TOlapTableSchemaParam) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7553,11 +8479,107 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } +func (p *TOlapTableSchemaParam) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetAutoIncrementColumn() { + if err = oprot.WriteFieldBegin("auto_increment_column", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AutoIncrementColumn); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TOlapTableSchemaParam) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetAutoIncrementColumnUniqueId() { + if err = oprot.WriteFieldBegin("auto_increment_column_unique_id", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.AutoIncrementColumnUniqueId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TOlapTableSchemaParam) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetInvertedIndexFileStorageFormat() { + if err = oprot.WriteFieldBegin("inverted_index_file_storage_format", thrift.I32, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.InvertedIndexFileStorageFormat)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TOlapTableSchemaParam) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetUniqueKeyUpdateMode() { + if err = oprot.WriteFieldBegin("unique_key_update_mode", thrift.I32, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.UniqueKeyUpdateMode)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TOlapTableSchemaParam) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetSequenceMapColUniqueId() { + if err = oprot.WriteFieldBegin("sequence_map_col_unique_id", thrift.I32, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.SequenceMapColUniqueId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + func (p *TOlapTableSchemaParam) String() string { if p == nil { return "" } return fmt.Sprintf("TOlapTableSchemaParam(%+v)", *p) + } func (p *TOlapTableSchemaParam) DeepEqual(ano *TOlapTableSchemaParam) bool { @@ -7596,6 +8618,21 @@ func (p *TOlapTableSchemaParam) DeepEqual(ano *TOlapTableSchemaParam) bool { if !p.Field10DeepEqual(ano.IsStrictMode) { return false } + if !p.Field11DeepEqual(ano.AutoIncrementColumn) { + return false + } + if !p.Field12DeepEqual(ano.AutoIncrementColumnUniqueId) { + return false + } + if !p.Field13DeepEqual(ano.InvertedIndexFileStorageFormat) { + return false + } + if !p.Field14DeepEqual(ano.UniqueKeyUpdateMode) { + return false + } + if !p.Field15DeepEqual(ano.SequenceMapColUniqueId) { + return false + } return true } @@ -7697,18 +8734,62 @@ func (p *TOlapTableSchemaParam) Field10DeepEqual(src bool) bool { } return true } +func (p *TOlapTableSchemaParam) Field11DeepEqual(src *string) bool { -type TTabletLocation struct { - TabletId int64 `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` - NodeIds []int64 `thrift:"node_ids,2,required" frugal:"2,required,list" json:"node_ids"` -} + if p.AutoIncrementColumn == src { + return true + } else if p.AutoIncrementColumn == nil || src == nil { + return false + } + if strings.Compare(*p.AutoIncrementColumn, *src) != 0 { + return false + } + return true +} +func (p *TOlapTableSchemaParam) Field12DeepEqual(src int32) bool { + + if p.AutoIncrementColumnUniqueId != src { + return false + } + return true +} +func (p *TOlapTableSchemaParam) Field13DeepEqual(src types.TInvertedIndexFileStorageFormat) bool { + + if p.InvertedIndexFileStorageFormat != src { + return false + } + return true +} +func (p *TOlapTableSchemaParam) Field14DeepEqual(src *types.TUniqueKeyUpdateMode) bool { + + if p.UniqueKeyUpdateMode == src { + return true + } else if p.UniqueKeyUpdateMode == nil || src == nil { + return false + } + if *p.UniqueKeyUpdateMode != *src { + return false + } + return true +} +func (p *TOlapTableSchemaParam) Field15DeepEqual(src int32) bool { + + if p.SequenceMapColUniqueId != src { + return false + } + return true +} + +type TTabletLocation struct { + TabletId int64 `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` + NodeIds []int64 `thrift:"node_ids,2,required" frugal:"2,required,list" json:"node_ids"` +} func NewTTabletLocation() *TTabletLocation { return &TTabletLocation{} } func (p *TTabletLocation) InitDefault() { - *p = TTabletLocation{} } func (p *TTabletLocation) GetTabletId() (v int64) { @@ -7757,10 +8838,8 @@ func (p *TTabletLocation) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -7768,17 +8847,14 @@ func (p *TTabletLocation) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodeIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7815,21 +8891,24 @@ RequiredFieldNotSetError: } func (p *TTabletLocation) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TTabletLocation) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.NodeIds = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -7837,11 +8916,12 @@ func (p *TTabletLocation) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.NodeIds = append(p.NodeIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.NodeIds = _field return nil } @@ -7859,7 +8939,6 @@ func (p *TTabletLocation) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7925,6 +9004,7 @@ func (p *TTabletLocation) String() string { return "" } return fmt.Sprintf("TTabletLocation(%+v)", *p) + } func (p *TTabletLocation) DeepEqual(ano *TTabletLocation) bool { @@ -7975,7 +9055,6 @@ func NewTOlapTableLocationParam() *TOlapTableLocationParam { } func (p *TOlapTableLocationParam) InitDefault() { - *p = TOlapTableLocationParam{} } func (p *TOlapTableLocationParam) GetDbId() (v int64) { @@ -8042,10 +9121,8 @@ func (p *TOlapTableLocationParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -8053,10 +9130,8 @@ func (p *TOlapTableLocationParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -8064,10 +9139,8 @@ func (p *TOlapTableLocationParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { @@ -8075,17 +9148,14 @@ func (p *TOlapTableLocationParam) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTablets = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8132,49 +9202,59 @@ RequiredFieldNotSetError: } func (p *TOlapTableLocationParam) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DbId = v + _field = v } + p.DbId = _field return nil } - func (p *TOlapTableLocationParam) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = v + _field = v } + p.TableId = _field return nil } - func (p *TOlapTableLocationParam) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TOlapTableLocationParam) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Tablets = make([]*TTabletLocation, 0, size) + _field := make([]*TTabletLocation, 0, size) + values := make([]TTabletLocation, size) for i := 0; i < size; i++ { - _elem := NewTTabletLocation() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Tablets = append(p.Tablets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Tablets = _field return nil } @@ -8200,7 +9280,6 @@ func (p *TOlapTableLocationParam) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8300,6 +9379,7 @@ func (p *TOlapTableLocationParam) String() string { return "" } return fmt.Sprintf("TOlapTableLocationParam(%+v)", *p) + } func (p *TOlapTableLocationParam) DeepEqual(ano *TOlapTableLocationParam) bool { @@ -8370,7 +9450,6 @@ func NewTNodeInfo() *TNodeInfo { } func (p *TNodeInfo) InitDefault() { - *p = TNodeInfo{} } func (p *TNodeInfo) GetId() (v int64) { @@ -8437,10 +9516,8 @@ func (p *TNodeInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -8448,10 +9525,8 @@ func (p *TNodeInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOption = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -8459,10 +9534,8 @@ func (p *TNodeInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHost = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -8470,17 +9543,14 @@ func (p *TNodeInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetAsyncInternalPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8527,38 +9597,47 @@ RequiredFieldNotSetError: } func (p *TNodeInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = v + _field = v } + p.Id = _field return nil } - func (p *TNodeInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Option = v + _field = v } + p.Option = _field return nil } - func (p *TNodeInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = v + _field = v } + p.Host = _field return nil } - func (p *TNodeInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.AsyncInternalPort = v + _field = v } + p.AsyncInternalPort = _field return nil } @@ -8584,7 +9663,6 @@ func (p *TNodeInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8676,6 +9754,7 @@ func (p *TNodeInfo) String() string { return "" } return fmt.Sprintf("TNodeInfo(%+v)", *p) + } func (p *TNodeInfo) DeepEqual(ano *TNodeInfo) bool { @@ -8738,7 +9817,6 @@ func NewTPaloNodesInfo() *TPaloNodesInfo { } func (p *TPaloNodesInfo) InitDefault() { - *p = TPaloNodesInfo{} } func (p *TPaloNodesInfo) GetVersion() (v int64) { @@ -8787,10 +9865,8 @@ func (p *TPaloNodesInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -8798,17 +9874,14 @@ func (p *TPaloNodesInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8845,31 +9918,37 @@ RequiredFieldNotSetError: } func (p *TPaloNodesInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TPaloNodesInfo) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Nodes = make([]*TNodeInfo, 0, size) + _field := make([]*TNodeInfo, 0, size) + values := make([]TNodeInfo, size) for i := 0; i < size; i++ { - _elem := NewTNodeInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Nodes = append(p.Nodes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Nodes = _field return nil } @@ -8887,7 +9966,6 @@ func (p *TPaloNodesInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8953,6 +10031,7 @@ func (p *TPaloNodesInfo) String() string { return "" } return fmt.Sprintf("TPaloNodesInfo(%+v)", *p) + } func (p *TPaloNodesInfo) DeepEqual(ano *TPaloNodesInfo) bool { @@ -9000,7 +10079,6 @@ func NewTOlapTable() *TOlapTable { } func (p *TOlapTable) InitDefault() { - *p = TOlapTable{} } func (p *TOlapTable) GetTableName() (v string) { @@ -9040,17 +10118,14 @@ func (p *TOlapTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9082,11 +10157,14 @@ RequiredFieldNotSetError: } func (p *TOlapTable) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } @@ -9100,7 +10178,6 @@ func (p *TOlapTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9141,6 +10218,7 @@ func (p *TOlapTable) String() string { return "" } return fmt.Sprintf("TOlapTable(%+v)", *p) + } func (p *TOlapTable) DeepEqual(ano *TOlapTable) bool { @@ -9178,7 +10256,6 @@ func NewTMySQLTable() *TMySQLTable { } func (p *TMySQLTable) InitDefault() { - *p = TMySQLTable{} } func (p *TMySQLTable) GetHost() (v string) { @@ -9272,10 +10349,8 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHost = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -9283,10 +10358,8 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -9294,10 +10367,8 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -9305,10 +10376,8 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { @@ -9316,10 +10385,8 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { @@ -9327,10 +10394,8 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { @@ -9338,17 +10403,14 @@ func (p *TMySQLTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCharset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9410,65 +10472,80 @@ RequiredFieldNotSetError: } func (p *TMySQLTable) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = v + _field = v } + p.Host = _field return nil } - func (p *TMySQLTable) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Port = v + _field = v } + p.Port = _field return nil } - func (p *TMySQLTable) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TMySQLTable) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = v } + p.Passwd = _field return nil } - func (p *TMySQLTable) ReadField5(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = v } + p.Db = _field return nil } - func (p *TMySQLTable) ReadField6(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = v + _field = v } + p.Table = _field return nil } - func (p *TMySQLTable) ReadField7(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Charset = v + _field = v } + p.Charset = _field return nil } @@ -9506,7 +10583,6 @@ func (p *TMySQLTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9649,6 +10725,7 @@ func (p *TMySQLTable) String() string { return "" } return fmt.Sprintf("TMySQLTable(%+v)", *p) + } func (p *TMySQLTable) DeepEqual(ano *TMySQLTable) bool { @@ -9747,7 +10824,6 @@ func NewTOdbcTable() *TOdbcTable { } func (p *TOdbcTable) InitDefault() { - *p = TOdbcTable{} } var TOdbcTable_Host_DEFAULT string @@ -9913,87 +10989,70 @@ func (p *TOdbcTable) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10019,75 +11078,92 @@ ReadStructEndError: } func (p *TOdbcTable) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = &v + _field = &v } + p.Host = _field return nil } - func (p *TOdbcTable) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Port = &v + _field = &v } + p.Port = _field return nil } - func (p *TOdbcTable) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TOdbcTable) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = &v + _field = &v } + p.Passwd = _field return nil } - func (p *TOdbcTable) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Db = _field return nil } - func (p *TOdbcTable) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = &v + _field = &v } + p.Table = _field return nil } - func (p *TOdbcTable) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Driver = &v + _field = &v } + p.Driver = _field return nil } - func (p *TOdbcTable) ReadField8(iprot thrift.TProtocol) error { + + var _field *types.TOdbcTableType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TOdbcTableType(v) - p.Type = &tmp + _field = &tmp } + p.Type = _field return nil } @@ -10129,7 +11205,6 @@ func (p *TOdbcTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10305,6 +11380,7 @@ func (p *TOdbcTable) String() string { return "" } return fmt.Sprintf("TOdbcTable(%+v)", *p) + } func (p *TOdbcTable) DeepEqual(ano *TOdbcTable) bool { @@ -10445,7 +11521,6 @@ func NewTEsTable() *TEsTable { } func (p *TEsTable) InitDefault() { - *p = TEsTable{} } var fieldIDToName_TEsTable = map[int16]string{} @@ -10470,7 +11545,6 @@ func (p *TEsTable) Read(iprot thrift.TProtocol) (err error) { if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10498,7 +11572,6 @@ func (p *TEsTable) Write(oprot thrift.TProtocol) (err error) { goto WriteStructBeginError } if p != nil { - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10520,6 +11593,7 @@ func (p *TEsTable) String() string { return "" } return fmt.Sprintf("TEsTable(%+v)", *p) + } func (p *TEsTable) DeepEqual(ano *TEsTable) bool { @@ -10540,7 +11614,6 @@ func NewTSchemaTable() *TSchemaTable { } func (p *TSchemaTable) InitDefault() { - *p = TSchemaTable{} } func (p *TSchemaTable) GetTableType() (v TSchemaTableType) { @@ -10580,17 +11653,14 @@ func (p *TSchemaTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10622,11 +11692,14 @@ RequiredFieldNotSetError: } func (p *TSchemaTable) ReadField1(iprot thrift.TProtocol) error { + + var _field TSchemaTableType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TableType = TSchemaTableType(v) + _field = TSchemaTableType(v) } + p.TableType = _field return nil } @@ -10640,7 +11713,6 @@ func (p *TSchemaTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10681,6 +11753,7 @@ func (p *TSchemaTable) String() string { return "" } return fmt.Sprintf("TSchemaTable(%+v)", *p) + } func (p *TSchemaTable) DeepEqual(ano *TSchemaTable) bool { @@ -10711,7 +11784,6 @@ func NewTBrokerTable() *TBrokerTable { } func (p *TBrokerTable) InitDefault() { - *p = TBrokerTable{} } var fieldIDToName_TBrokerTable = map[int16]string{} @@ -10736,7 +11808,6 @@ func (p *TBrokerTable) Read(iprot thrift.TProtocol) (err error) { if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10764,7 +11835,6 @@ func (p *TBrokerTable) Write(oprot thrift.TProtocol) (err error) { goto WriteStructBeginError } if p != nil { - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10786,6 +11856,7 @@ func (p *TBrokerTable) String() string { return "" } return fmt.Sprintf("TBrokerTable(%+v)", *p) + } func (p *TBrokerTable) DeepEqual(ano *TBrokerTable) bool { @@ -10808,7 +11879,6 @@ func NewTHiveTable() *THiveTable { } func (p *THiveTable) InitDefault() { - *p = THiveTable{} } func (p *THiveTable) GetDbName() (v string) { @@ -10866,10 +11936,8 @@ func (p *THiveTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -10877,10 +11945,8 @@ func (p *THiveTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { @@ -10888,17 +11954,14 @@ func (p *THiveTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetProperties = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10940,29 +12003,33 @@ RequiredFieldNotSetError: } func (p *THiveTable) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = v + _field = v } + p.DbName = _field return nil } - func (p *THiveTable) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } - func (p *THiveTable) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -10978,11 +12045,12 @@ func (p *THiveTable) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } @@ -11004,7 +12072,6 @@ func (p *THiveTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11065,11 +12132,9 @@ func (p *THiveTable) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -11092,6 +12157,7 @@ func (p *THiveTable) String() string { return "" } return fmt.Sprintf("THiveTable(%+v)", *p) + } func (p *THiveTable) DeepEqual(ano *THiveTable) bool { @@ -11151,7 +12217,6 @@ func NewTIcebergTable() *TIcebergTable { } func (p *TIcebergTable) InitDefault() { - *p = TIcebergTable{} } func (p *TIcebergTable) GetDbName() (v string) { @@ -11209,10 +12274,8 @@ func (p *TIcebergTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -11220,10 +12283,8 @@ func (p *TIcebergTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { @@ -11231,17 +12292,14 @@ func (p *TIcebergTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetProperties = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11283,29 +12341,33 @@ RequiredFieldNotSetError: } func (p *TIcebergTable) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = v + _field = v } + p.DbName = _field return nil } - func (p *TIcebergTable) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } - func (p *TIcebergTable) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -11321,11 +12383,12 @@ func (p *TIcebergTable) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } @@ -11347,7 +12410,6 @@ func (p *TIcebergTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11408,11 +12470,9 @@ func (p *TIcebergTable) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -11435,6 +12495,7 @@ func (p *TIcebergTable) String() string { return "" } return fmt.Sprintf("TIcebergTable(%+v)", *p) + } func (p *TIcebergTable) DeepEqual(ano *TIcebergTable) bool { @@ -11494,7 +12555,6 @@ func NewTHudiTable() *THudiTable { } func (p *THudiTable) InitDefault() { - *p = THudiTable{} } var THudiTable_DbName_DEFAULT string @@ -11575,37 +12635,30 @@ func (p *THudiTable) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11631,29 +12684,33 @@ ReadStructEndError: } func (p *THudiTable) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = &v + _field = &v } + p.DbName = _field return nil } - func (p *THudiTable) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } - func (p *THudiTable) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -11669,11 +12726,12 @@ func (p *THudiTable) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } @@ -11695,7 +12753,6 @@ func (p *THudiTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11761,11 +12818,9 @@ func (p *THudiTable) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -11789,6 +12844,7 @@ func (p *THudiTable) String() string { return "" } return fmt.Sprintf("THudiTable(%+v)", *p) + } func (p *THudiTable) DeepEqual(ano *THudiTable) bool { @@ -11848,14 +12904,20 @@ func (p *THudiTable) Field3DeepEqual(src map[string]string) bool { } type TJdbcTable struct { - JdbcUrl *string `thrift:"jdbc_url,1,optional" frugal:"1,optional,string" json:"jdbc_url,omitempty"` - JdbcTableName *string `thrift:"jdbc_table_name,2,optional" frugal:"2,optional,string" json:"jdbc_table_name,omitempty"` - JdbcUser *string `thrift:"jdbc_user,3,optional" frugal:"3,optional,string" json:"jdbc_user,omitempty"` - JdbcPassword *string `thrift:"jdbc_password,4,optional" frugal:"4,optional,string" json:"jdbc_password,omitempty"` - JdbcDriverUrl *string `thrift:"jdbc_driver_url,5,optional" frugal:"5,optional,string" json:"jdbc_driver_url,omitempty"` - JdbcResourceName *string `thrift:"jdbc_resource_name,6,optional" frugal:"6,optional,string" json:"jdbc_resource_name,omitempty"` - JdbcDriverClass *string `thrift:"jdbc_driver_class,7,optional" frugal:"7,optional,string" json:"jdbc_driver_class,omitempty"` - JdbcDriverChecksum *string `thrift:"jdbc_driver_checksum,8,optional" frugal:"8,optional,string" json:"jdbc_driver_checksum,omitempty"` + JdbcUrl *string `thrift:"jdbc_url,1,optional" frugal:"1,optional,string" json:"jdbc_url,omitempty"` + JdbcTableName *string `thrift:"jdbc_table_name,2,optional" frugal:"2,optional,string" json:"jdbc_table_name,omitempty"` + JdbcUser *string `thrift:"jdbc_user,3,optional" frugal:"3,optional,string" json:"jdbc_user,omitempty"` + JdbcPassword *string `thrift:"jdbc_password,4,optional" frugal:"4,optional,string" json:"jdbc_password,omitempty"` + JdbcDriverUrl *string `thrift:"jdbc_driver_url,5,optional" frugal:"5,optional,string" json:"jdbc_driver_url,omitempty"` + JdbcResourceName *string `thrift:"jdbc_resource_name,6,optional" frugal:"6,optional,string" json:"jdbc_resource_name,omitempty"` + JdbcDriverClass *string `thrift:"jdbc_driver_class,7,optional" frugal:"7,optional,string" json:"jdbc_driver_class,omitempty"` + JdbcDriverChecksum *string `thrift:"jdbc_driver_checksum,8,optional" frugal:"8,optional,string" json:"jdbc_driver_checksum,omitempty"` + ConnectionPoolMinSize *int32 `thrift:"connection_pool_min_size,9,optional" frugal:"9,optional,i32" json:"connection_pool_min_size,omitempty"` + ConnectionPoolMaxSize *int32 `thrift:"connection_pool_max_size,10,optional" frugal:"10,optional,i32" json:"connection_pool_max_size,omitempty"` + ConnectionPoolMaxWaitTime *int32 `thrift:"connection_pool_max_wait_time,11,optional" frugal:"11,optional,i32" json:"connection_pool_max_wait_time,omitempty"` + ConnectionPoolMaxLifeTime *int32 `thrift:"connection_pool_max_life_time,12,optional" frugal:"12,optional,i32" json:"connection_pool_max_life_time,omitempty"` + ConnectionPoolKeepAlive *bool `thrift:"connection_pool_keep_alive,13,optional" frugal:"13,optional,bool" json:"connection_pool_keep_alive,omitempty"` + CatalogId *int64 `thrift:"catalog_id,14,optional" frugal:"14,optional,i64" json:"catalog_id,omitempty"` } func NewTJdbcTable() *TJdbcTable { @@ -11863,7 +12925,6 @@ func NewTJdbcTable() *TJdbcTable { } func (p *TJdbcTable) InitDefault() { - *p = TJdbcTable{} } var TJdbcTable_JdbcUrl_DEFAULT string @@ -11937,6 +12998,60 @@ func (p *TJdbcTable) GetJdbcDriverChecksum() (v string) { } return *p.JdbcDriverChecksum } + +var TJdbcTable_ConnectionPoolMinSize_DEFAULT int32 + +func (p *TJdbcTable) GetConnectionPoolMinSize() (v int32) { + if !p.IsSetConnectionPoolMinSize() { + return TJdbcTable_ConnectionPoolMinSize_DEFAULT + } + return *p.ConnectionPoolMinSize +} + +var TJdbcTable_ConnectionPoolMaxSize_DEFAULT int32 + +func (p *TJdbcTable) GetConnectionPoolMaxSize() (v int32) { + if !p.IsSetConnectionPoolMaxSize() { + return TJdbcTable_ConnectionPoolMaxSize_DEFAULT + } + return *p.ConnectionPoolMaxSize +} + +var TJdbcTable_ConnectionPoolMaxWaitTime_DEFAULT int32 + +func (p *TJdbcTable) GetConnectionPoolMaxWaitTime() (v int32) { + if !p.IsSetConnectionPoolMaxWaitTime() { + return TJdbcTable_ConnectionPoolMaxWaitTime_DEFAULT + } + return *p.ConnectionPoolMaxWaitTime +} + +var TJdbcTable_ConnectionPoolMaxLifeTime_DEFAULT int32 + +func (p *TJdbcTable) GetConnectionPoolMaxLifeTime() (v int32) { + if !p.IsSetConnectionPoolMaxLifeTime() { + return TJdbcTable_ConnectionPoolMaxLifeTime_DEFAULT + } + return *p.ConnectionPoolMaxLifeTime +} + +var TJdbcTable_ConnectionPoolKeepAlive_DEFAULT bool + +func (p *TJdbcTable) GetConnectionPoolKeepAlive() (v bool) { + if !p.IsSetConnectionPoolKeepAlive() { + return TJdbcTable_ConnectionPoolKeepAlive_DEFAULT + } + return *p.ConnectionPoolKeepAlive +} + +var TJdbcTable_CatalogId_DEFAULT int64 + +func (p *TJdbcTable) GetCatalogId() (v int64) { + if !p.IsSetCatalogId() { + return TJdbcTable_CatalogId_DEFAULT + } + return *p.CatalogId +} func (p *TJdbcTable) SetJdbcUrl(val *string) { p.JdbcUrl = val } @@ -11961,16 +13076,40 @@ func (p *TJdbcTable) SetJdbcDriverClass(val *string) { func (p *TJdbcTable) SetJdbcDriverChecksum(val *string) { p.JdbcDriverChecksum = val } +func (p *TJdbcTable) SetConnectionPoolMinSize(val *int32) { + p.ConnectionPoolMinSize = val +} +func (p *TJdbcTable) SetConnectionPoolMaxSize(val *int32) { + p.ConnectionPoolMaxSize = val +} +func (p *TJdbcTable) SetConnectionPoolMaxWaitTime(val *int32) { + p.ConnectionPoolMaxWaitTime = val +} +func (p *TJdbcTable) SetConnectionPoolMaxLifeTime(val *int32) { + p.ConnectionPoolMaxLifeTime = val +} +func (p *TJdbcTable) SetConnectionPoolKeepAlive(val *bool) { + p.ConnectionPoolKeepAlive = val +} +func (p *TJdbcTable) SetCatalogId(val *int64) { + p.CatalogId = val +} var fieldIDToName_TJdbcTable = map[int16]string{ - 1: "jdbc_url", - 2: "jdbc_table_name", - 3: "jdbc_user", - 4: "jdbc_password", - 5: "jdbc_driver_url", - 6: "jdbc_resource_name", - 7: "jdbc_driver_class", - 8: "jdbc_driver_checksum", + 1: "jdbc_url", + 2: "jdbc_table_name", + 3: "jdbc_user", + 4: "jdbc_password", + 5: "jdbc_driver_url", + 6: "jdbc_resource_name", + 7: "jdbc_driver_class", + 8: "jdbc_driver_checksum", + 9: "connection_pool_min_size", + 10: "connection_pool_max_size", + 11: "connection_pool_max_wait_time", + 12: "connection_pool_max_life_time", + 13: "connection_pool_keep_alive", + 14: "catalog_id", } func (p *TJdbcTable) IsSetJdbcUrl() bool { @@ -12005,14 +13144,38 @@ func (p *TJdbcTable) IsSetJdbcDriverChecksum() bool { return p.JdbcDriverChecksum != nil } -func (p *TJdbcTable) Read(iprot thrift.TProtocol) (err error) { +func (p *TJdbcTable) IsSetConnectionPoolMinSize() bool { + return p.ConnectionPoolMinSize != nil +} - var fieldTypeId thrift.TType - var fieldId int16 +func (p *TJdbcTable) IsSetConnectionPoolMaxSize() bool { + return p.ConnectionPoolMaxSize != nil +} - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } +func (p *TJdbcTable) IsSetConnectionPoolMaxWaitTime() bool { + return p.ConnectionPoolMaxWaitTime != nil +} + +func (p *TJdbcTable) IsSetConnectionPoolMaxLifeTime() bool { + return p.ConnectionPoolMaxLifeTime != nil +} + +func (p *TJdbcTable) IsSetConnectionPoolKeepAlive() bool { + return p.ConnectionPoolKeepAlive != nil +} + +func (p *TJdbcTable) IsSetCatalogId() bool { + return p.CatalogId != nil +} + +func (p *TJdbcTable) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } for { _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() @@ -12029,87 +13192,118 @@ func (p *TJdbcTable) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I32 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12135,74 +13329,157 @@ ReadStructEndError: } func (p *TJdbcTable) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcUrl = &v + _field = &v } + p.JdbcUrl = _field return nil } - func (p *TJdbcTable) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcTableName = &v + _field = &v } + p.JdbcTableName = _field return nil } - func (p *TJdbcTable) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcUser = &v + _field = &v } + p.JdbcUser = _field return nil } - func (p *TJdbcTable) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcPassword = &v + _field = &v } + p.JdbcPassword = _field return nil } - func (p *TJdbcTable) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcDriverUrl = &v + _field = &v } + p.JdbcDriverUrl = _field return nil } - func (p *TJdbcTable) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcResourceName = &v + _field = &v } + p.JdbcResourceName = _field return nil } - func (p *TJdbcTable) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcDriverClass = &v + _field = &v } + p.JdbcDriverClass = _field return nil } - func (p *TJdbcTable) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcDriverChecksum = &v + _field = &v + } + p.JdbcDriverChecksum = _field + return nil +} +func (p *TJdbcTable) ReadField9(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMinSize = _field + return nil +} +func (p *TJdbcTable) ReadField10(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMaxSize = _field + return nil +} +func (p *TJdbcTable) ReadField11(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMaxWaitTime = _field + return nil +} +func (p *TJdbcTable) ReadField12(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMaxLifeTime = _field + return nil +} +func (p *TJdbcTable) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolKeepAlive = _field + return nil +} +func (p *TJdbcTable) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.CatalogId = _field return nil } @@ -12244,7 +13521,30 @@ func (p *TJdbcTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12415,11 +13715,126 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } +func (p *TJdbcTable) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMinSize() { + if err = oprot.WriteFieldBegin("connection_pool_min_size", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMinSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TJdbcTable) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMaxSize() { + if err = oprot.WriteFieldBegin("connection_pool_max_size", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMaxSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TJdbcTable) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMaxWaitTime() { + if err = oprot.WriteFieldBegin("connection_pool_max_wait_time", thrift.I32, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMaxWaitTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TJdbcTable) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMaxLifeTime() { + if err = oprot.WriteFieldBegin("connection_pool_max_life_time", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMaxLifeTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TJdbcTable) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolKeepAlive() { + if err = oprot.WriteFieldBegin("connection_pool_keep_alive", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.ConnectionPoolKeepAlive); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TJdbcTable) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalog_id", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CatalogId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + func (p *TJdbcTable) String() string { if p == nil { return "" } return fmt.Sprintf("TJdbcTable(%+v)", *p) + } func (p *TJdbcTable) DeepEqual(ano *TJdbcTable) bool { @@ -12452,20 +13867,38 @@ func (p *TJdbcTable) DeepEqual(ano *TJdbcTable) bool { if !p.Field8DeepEqual(ano.JdbcDriverChecksum) { return false } - return true -} - -func (p *TJdbcTable) Field1DeepEqual(src *string) bool { - - if p.JdbcUrl == src { - return true - } else if p.JdbcUrl == nil || src == nil { + if !p.Field9DeepEqual(ano.ConnectionPoolMinSize) { return false } - if strings.Compare(*p.JdbcUrl, *src) != 0 { + if !p.Field10DeepEqual(ano.ConnectionPoolMaxSize) { return false } - return true + if !p.Field11DeepEqual(ano.ConnectionPoolMaxWaitTime) { + return false + } + if !p.Field12DeepEqual(ano.ConnectionPoolMaxLifeTime) { + return false + } + if !p.Field13DeepEqual(ano.ConnectionPoolKeepAlive) { + return false + } + if !p.Field14DeepEqual(ano.CatalogId) { + return false + } + return true +} + +func (p *TJdbcTable) Field1DeepEqual(src *string) bool { + + if p.JdbcUrl == src { + return true + } else if p.JdbcUrl == nil || src == nil { + return false + } + if strings.Compare(*p.JdbcUrl, *src) != 0 { + return false + } + return true } func (p *TJdbcTable) Field2DeepEqual(src *string) bool { @@ -12551,6 +13984,78 @@ func (p *TJdbcTable) Field8DeepEqual(src *string) bool { } return true } +func (p *TJdbcTable) Field9DeepEqual(src *int32) bool { + + if p.ConnectionPoolMinSize == src { + return true + } else if p.ConnectionPoolMinSize == nil || src == nil { + return false + } + if *p.ConnectionPoolMinSize != *src { + return false + } + return true +} +func (p *TJdbcTable) Field10DeepEqual(src *int32) bool { + + if p.ConnectionPoolMaxSize == src { + return true + } else if p.ConnectionPoolMaxSize == nil || src == nil { + return false + } + if *p.ConnectionPoolMaxSize != *src { + return false + } + return true +} +func (p *TJdbcTable) Field11DeepEqual(src *int32) bool { + + if p.ConnectionPoolMaxWaitTime == src { + return true + } else if p.ConnectionPoolMaxWaitTime == nil || src == nil { + return false + } + if *p.ConnectionPoolMaxWaitTime != *src { + return false + } + return true +} +func (p *TJdbcTable) Field12DeepEqual(src *int32) bool { + + if p.ConnectionPoolMaxLifeTime == src { + return true + } else if p.ConnectionPoolMaxLifeTime == nil || src == nil { + return false + } + if *p.ConnectionPoolMaxLifeTime != *src { + return false + } + return true +} +func (p *TJdbcTable) Field13DeepEqual(src *bool) bool { + + if p.ConnectionPoolKeepAlive == src { + return true + } else if p.ConnectionPoolKeepAlive == nil || src == nil { + return false + } + if *p.ConnectionPoolKeepAlive != *src { + return false + } + return true +} +func (p *TJdbcTable) Field14DeepEqual(src *int64) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if *p.CatalogId != *src { + return false + } + return true +} type TMCTable struct { Region *string `thrift:"region,1,optional" frugal:"1,optional,string" json:"region,omitempty"` @@ -12559,6 +14064,10 @@ type TMCTable struct { AccessKey *string `thrift:"access_key,4,optional" frugal:"4,optional,string" json:"access_key,omitempty"` SecretKey *string `thrift:"secret_key,5,optional" frugal:"5,optional,string" json:"secret_key,omitempty"` PublicAccess *string `thrift:"public_access,6,optional" frugal:"6,optional,string" json:"public_access,omitempty"` + OdpsUrl *string `thrift:"odps_url,7,optional" frugal:"7,optional,string" json:"odps_url,omitempty"` + TunnelUrl *string `thrift:"tunnel_url,8,optional" frugal:"8,optional,string" json:"tunnel_url,omitempty"` + Endpoint *string `thrift:"endpoint,9,optional" frugal:"9,optional,string" json:"endpoint,omitempty"` + Quota *string `thrift:"quota,10,optional" frugal:"10,optional,string" json:"quota,omitempty"` } func NewTMCTable() *TMCTable { @@ -12566,7 +14075,6 @@ func NewTMCTable() *TMCTable { } func (p *TMCTable) InitDefault() { - *p = TMCTable{} } var TMCTable_Region_DEFAULT string @@ -12622,6 +14130,42 @@ func (p *TMCTable) GetPublicAccess() (v string) { } return *p.PublicAccess } + +var TMCTable_OdpsUrl_DEFAULT string + +func (p *TMCTable) GetOdpsUrl() (v string) { + if !p.IsSetOdpsUrl() { + return TMCTable_OdpsUrl_DEFAULT + } + return *p.OdpsUrl +} + +var TMCTable_TunnelUrl_DEFAULT string + +func (p *TMCTable) GetTunnelUrl() (v string) { + if !p.IsSetTunnelUrl() { + return TMCTable_TunnelUrl_DEFAULT + } + return *p.TunnelUrl +} + +var TMCTable_Endpoint_DEFAULT string + +func (p *TMCTable) GetEndpoint() (v string) { + if !p.IsSetEndpoint() { + return TMCTable_Endpoint_DEFAULT + } + return *p.Endpoint +} + +var TMCTable_Quota_DEFAULT string + +func (p *TMCTable) GetQuota() (v string) { + if !p.IsSetQuota() { + return TMCTable_Quota_DEFAULT + } + return *p.Quota +} func (p *TMCTable) SetRegion(val *string) { p.Region = val } @@ -12640,14 +14184,30 @@ func (p *TMCTable) SetSecretKey(val *string) { func (p *TMCTable) SetPublicAccess(val *string) { p.PublicAccess = val } +func (p *TMCTable) SetOdpsUrl(val *string) { + p.OdpsUrl = val +} +func (p *TMCTable) SetTunnelUrl(val *string) { + p.TunnelUrl = val +} +func (p *TMCTable) SetEndpoint(val *string) { + p.Endpoint = val +} +func (p *TMCTable) SetQuota(val *string) { + p.Quota = val +} var fieldIDToName_TMCTable = map[int16]string{ - 1: "region", - 2: "project", - 3: "table", - 4: "access_key", - 5: "secret_key", - 6: "public_access", + 1: "region", + 2: "project", + 3: "table", + 4: "access_key", + 5: "secret_key", + 6: "public_access", + 7: "odps_url", + 8: "tunnel_url", + 9: "endpoint", + 10: "quota", } func (p *TMCTable) IsSetRegion() bool { @@ -12674,6 +14234,22 @@ func (p *TMCTable) IsSetPublicAccess() bool { return p.PublicAccess != nil } +func (p *TMCTable) IsSetOdpsUrl() bool { + return p.OdpsUrl != nil +} + +func (p *TMCTable) IsSetTunnelUrl() bool { + return p.TunnelUrl != nil +} + +func (p *TMCTable) IsSetEndpoint() bool { + return p.Endpoint != nil +} + +func (p *TMCTable) IsSetQuota() bool { + return p.Quota != nil +} + func (p *TMCTable) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -12698,67 +14274,86 @@ func (p *TMCTable) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRING { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRING { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12784,56 +14379,113 @@ ReadStructEndError: } func (p *TMCTable) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Region = &v + _field = &v } + p.Region = _field return nil } - func (p *TMCTable) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Project = &v + _field = &v } + p.Project = _field return nil } - func (p *TMCTable) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = &v + _field = &v } + p.Table = _field return nil } - func (p *TMCTable) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.AccessKey = &v + _field = &v } + p.AccessKey = _field return nil } - func (p *TMCTable) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SecretKey = &v + _field = &v } + p.SecretKey = _field return nil } - func (p *TMCTable) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.PublicAccess = _field + return nil +} +func (p *TMCTable) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OdpsUrl = _field + return nil +} +func (p *TMCTable) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TunnelUrl = _field + return nil +} +func (p *TMCTable) ReadField9(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Endpoint = _field + return nil +} +func (p *TMCTable) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.PublicAccess = &v + _field = &v } + p.Quota = _field return nil } @@ -12867,7 +14519,22 @@ func (p *TMCTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12967,7 +14634,894 @@ func (p *TMCTable) writeField5(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("secret_key", thrift.STRING, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.SecretKey); err != nil { + if err := oprot.WriteString(*p.SecretKey); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TMCTable) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPublicAccess() { + if err = oprot.WriteFieldBegin("public_access", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.PublicAccess); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TMCTable) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetOdpsUrl() { + if err = oprot.WriteFieldBegin("odps_url", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OdpsUrl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TMCTable) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTunnelUrl() { + if err = oprot.WriteFieldBegin("tunnel_url", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TunnelUrl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TMCTable) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetEndpoint() { + if err = oprot.WriteFieldBegin("endpoint", thrift.STRING, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Endpoint); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TMCTable) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetQuota() { + if err = oprot.WriteFieldBegin("quota", thrift.STRING, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Quota); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TMCTable) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TMCTable(%+v)", *p) + +} + +func (p *TMCTable) DeepEqual(ano *TMCTable) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Region) { + return false + } + if !p.Field2DeepEqual(ano.Project) { + return false + } + if !p.Field3DeepEqual(ano.Table) { + return false + } + if !p.Field4DeepEqual(ano.AccessKey) { + return false + } + if !p.Field5DeepEqual(ano.SecretKey) { + return false + } + if !p.Field6DeepEqual(ano.PublicAccess) { + return false + } + if !p.Field7DeepEqual(ano.OdpsUrl) { + return false + } + if !p.Field8DeepEqual(ano.TunnelUrl) { + return false + } + if !p.Field9DeepEqual(ano.Endpoint) { + return false + } + if !p.Field10DeepEqual(ano.Quota) { + return false + } + return true +} + +func (p *TMCTable) Field1DeepEqual(src *string) bool { + + if p.Region == src { + return true + } else if p.Region == nil || src == nil { + return false + } + if strings.Compare(*p.Region, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field2DeepEqual(src *string) bool { + + if p.Project == src { + return true + } else if p.Project == nil || src == nil { + return false + } + if strings.Compare(*p.Project, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field3DeepEqual(src *string) bool { + + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field4DeepEqual(src *string) bool { + + if p.AccessKey == src { + return true + } else if p.AccessKey == nil || src == nil { + return false + } + if strings.Compare(*p.AccessKey, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field5DeepEqual(src *string) bool { + + if p.SecretKey == src { + return true + } else if p.SecretKey == nil || src == nil { + return false + } + if strings.Compare(*p.SecretKey, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field6DeepEqual(src *string) bool { + + if p.PublicAccess == src { + return true + } else if p.PublicAccess == nil || src == nil { + return false + } + if strings.Compare(*p.PublicAccess, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field7DeepEqual(src *string) bool { + + if p.OdpsUrl == src { + return true + } else if p.OdpsUrl == nil || src == nil { + return false + } + if strings.Compare(*p.OdpsUrl, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field8DeepEqual(src *string) bool { + + if p.TunnelUrl == src { + return true + } else if p.TunnelUrl == nil || src == nil { + return false + } + if strings.Compare(*p.TunnelUrl, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field9DeepEqual(src *string) bool { + + if p.Endpoint == src { + return true + } else if p.Endpoint == nil || src == nil { + return false + } + if strings.Compare(*p.Endpoint, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field10DeepEqual(src *string) bool { + + if p.Quota == src { + return true + } else if p.Quota == nil || src == nil { + return false + } + if strings.Compare(*p.Quota, *src) != 0 { + return false + } + return true +} + +type TTrinoConnectorTable struct { + DbName *string `thrift:"db_name,1,optional" frugal:"1,optional,string" json:"db_name,omitempty"` + TableName *string `thrift:"table_name,2,optional" frugal:"2,optional,string" json:"table_name,omitempty"` + Properties map[string]string `thrift:"properties,3,optional" frugal:"3,optional,map" json:"properties,omitempty"` +} + +func NewTTrinoConnectorTable() *TTrinoConnectorTable { + return &TTrinoConnectorTable{} +} + +func (p *TTrinoConnectorTable) InitDefault() { +} + +var TTrinoConnectorTable_DbName_DEFAULT string + +func (p *TTrinoConnectorTable) GetDbName() (v string) { + if !p.IsSetDbName() { + return TTrinoConnectorTable_DbName_DEFAULT + } + return *p.DbName +} + +var TTrinoConnectorTable_TableName_DEFAULT string + +func (p *TTrinoConnectorTable) GetTableName() (v string) { + if !p.IsSetTableName() { + return TTrinoConnectorTable_TableName_DEFAULT + } + return *p.TableName +} + +var TTrinoConnectorTable_Properties_DEFAULT map[string]string + +func (p *TTrinoConnectorTable) GetProperties() (v map[string]string) { + if !p.IsSetProperties() { + return TTrinoConnectorTable_Properties_DEFAULT + } + return p.Properties +} +func (p *TTrinoConnectorTable) SetDbName(val *string) { + p.DbName = val +} +func (p *TTrinoConnectorTable) SetTableName(val *string) { + p.TableName = val +} +func (p *TTrinoConnectorTable) SetProperties(val map[string]string) { + p.Properties = val +} + +var fieldIDToName_TTrinoConnectorTable = map[int16]string{ + 1: "db_name", + 2: "table_name", + 3: "properties", +} + +func (p *TTrinoConnectorTable) IsSetDbName() bool { + return p.DbName != nil +} + +func (p *TTrinoConnectorTable) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *TTrinoConnectorTable) IsSetProperties() bool { + return p.Properties != nil +} + +func (p *TTrinoConnectorTable) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTrinoConnectorTable[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTrinoConnectorTable) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DbName = _field + return nil +} +func (p *TTrinoConnectorTable) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableName = _field + return nil +} +func (p *TTrinoConnectorTable) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.Properties = _field + return nil +} + +func (p *TTrinoConnectorTable) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTrinoConnectorTable"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTrinoConnectorTable) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTrinoConnectorTable) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTrinoConnectorTable) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetProperties() { + if err = oprot.WriteFieldBegin("properties", thrift.MAP, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + return err + } + for k, v := range p.Properties { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TTrinoConnectorTable) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTrinoConnectorTable(%+v)", *p) + +} + +func (p *TTrinoConnectorTable) DeepEqual(ano *TTrinoConnectorTable) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DbName) { + return false + } + if !p.Field2DeepEqual(ano.TableName) { + return false + } + if !p.Field3DeepEqual(ano.Properties) { + return false + } + return true +} + +func (p *TTrinoConnectorTable) Field1DeepEqual(src *string) bool { + + if p.DbName == src { + return true + } else if p.DbName == nil || src == nil { + return false + } + if strings.Compare(*p.DbName, *src) != 0 { + return false + } + return true +} +func (p *TTrinoConnectorTable) Field2DeepEqual(src *string) bool { + + if p.TableName == src { + return true + } else if p.TableName == nil || src == nil { + return false + } + if strings.Compare(*p.TableName, *src) != 0 { + return false + } + return true +} +func (p *TTrinoConnectorTable) Field3DeepEqual(src map[string]string) bool { + + if len(p.Properties) != len(src) { + return false + } + for k, v := range p.Properties { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} + +type TLakeSoulTable struct { + DbName *string `thrift:"db_name,1,optional" frugal:"1,optional,string" json:"db_name,omitempty"` + TableName *string `thrift:"table_name,2,optional" frugal:"2,optional,string" json:"table_name,omitempty"` + Properties map[string]string `thrift:"properties,3,optional" frugal:"3,optional,map" json:"properties,omitempty"` +} + +func NewTLakeSoulTable() *TLakeSoulTable { + return &TLakeSoulTable{} +} + +func (p *TLakeSoulTable) InitDefault() { +} + +var TLakeSoulTable_DbName_DEFAULT string + +func (p *TLakeSoulTable) GetDbName() (v string) { + if !p.IsSetDbName() { + return TLakeSoulTable_DbName_DEFAULT + } + return *p.DbName +} + +var TLakeSoulTable_TableName_DEFAULT string + +func (p *TLakeSoulTable) GetTableName() (v string) { + if !p.IsSetTableName() { + return TLakeSoulTable_TableName_DEFAULT + } + return *p.TableName +} + +var TLakeSoulTable_Properties_DEFAULT map[string]string + +func (p *TLakeSoulTable) GetProperties() (v map[string]string) { + if !p.IsSetProperties() { + return TLakeSoulTable_Properties_DEFAULT + } + return p.Properties +} +func (p *TLakeSoulTable) SetDbName(val *string) { + p.DbName = val +} +func (p *TLakeSoulTable) SetTableName(val *string) { + p.TableName = val +} +func (p *TLakeSoulTable) SetProperties(val map[string]string) { + p.Properties = val +} + +var fieldIDToName_TLakeSoulTable = map[int16]string{ + 1: "db_name", + 2: "table_name", + 3: "properties", +} + +func (p *TLakeSoulTable) IsSetDbName() bool { + return p.DbName != nil +} + +func (p *TLakeSoulTable) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *TLakeSoulTable) IsSetProperties() bool { + return p.Properties != nil +} + +func (p *TLakeSoulTable) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLakeSoulTable[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TLakeSoulTable) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DbName = _field + return nil +} +func (p *TLakeSoulTable) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableName = _field + return nil +} +func (p *TLakeSoulTable) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.Properties = _field + return nil +} + +func (p *TLakeSoulTable) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLakeSoulTable"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLakeSoulTable) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLakeSoulTable) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -12976,17 +15530,28 @@ func (p *TMCTable) writeField5(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TMCTable) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetPublicAccess() { - if err = oprot.WriteFieldBegin("public_access", thrift.STRING, 6); err != nil { +func (p *TLakeSoulTable) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetProperties() { + if err = oprot.WriteFieldBegin("properties", thrift.MAP, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.PublicAccess); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + return err + } + for k, v := range p.Properties { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -12995,136 +15560,95 @@ func (p *TMCTable) writeField6(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TMCTable) String() string { +func (p *TLakeSoulTable) String() string { if p == nil { return "" } - return fmt.Sprintf("TMCTable(%+v)", *p) + return fmt.Sprintf("TLakeSoulTable(%+v)", *p) + } -func (p *TMCTable) DeepEqual(ano *TMCTable) bool { +func (p *TLakeSoulTable) DeepEqual(ano *TLakeSoulTable) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Region) { - return false - } - if !p.Field2DeepEqual(ano.Project) { - return false - } - if !p.Field3DeepEqual(ano.Table) { - return false - } - if !p.Field4DeepEqual(ano.AccessKey) { - return false - } - if !p.Field5DeepEqual(ano.SecretKey) { - return false - } - if !p.Field6DeepEqual(ano.PublicAccess) { - return false - } - return true -} - -func (p *TMCTable) Field1DeepEqual(src *string) bool { - - if p.Region == src { - return true - } else if p.Region == nil || src == nil { - return false - } - if strings.Compare(*p.Region, *src) != 0 { + if !p.Field1DeepEqual(ano.DbName) { return false } - return true -} -func (p *TMCTable) Field2DeepEqual(src *string) bool { - - if p.Project == src { - return true - } else if p.Project == nil || src == nil { + if !p.Field2DeepEqual(ano.TableName) { return false } - if strings.Compare(*p.Project, *src) != 0 { + if !p.Field3DeepEqual(ano.Properties) { return false } return true } -func (p *TMCTable) Field3DeepEqual(src *string) bool { - if p.Table == src { - return true - } else if p.Table == nil || src == nil { - return false - } - if strings.Compare(*p.Table, *src) != 0 { - return false - } - return true -} -func (p *TMCTable) Field4DeepEqual(src *string) bool { +func (p *TLakeSoulTable) Field1DeepEqual(src *string) bool { - if p.AccessKey == src { + if p.DbName == src { return true - } else if p.AccessKey == nil || src == nil { + } else if p.DbName == nil || src == nil { return false } - if strings.Compare(*p.AccessKey, *src) != 0 { + if strings.Compare(*p.DbName, *src) != 0 { return false } return true } -func (p *TMCTable) Field5DeepEqual(src *string) bool { +func (p *TLakeSoulTable) Field2DeepEqual(src *string) bool { - if p.SecretKey == src { + if p.TableName == src { return true - } else if p.SecretKey == nil || src == nil { + } else if p.TableName == nil || src == nil { return false } - if strings.Compare(*p.SecretKey, *src) != 0 { + if strings.Compare(*p.TableName, *src) != 0 { return false } return true } -func (p *TMCTable) Field6DeepEqual(src *string) bool { +func (p *TLakeSoulTable) Field3DeepEqual(src map[string]string) bool { - if p.PublicAccess == src { - return true - } else if p.PublicAccess == nil || src == nil { + if len(p.Properties) != len(src) { return false } - if strings.Compare(*p.PublicAccess, *src) != 0 { - return false + for k, v := range p.Properties { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } } return true } type TTableDescriptor struct { - Id types.TTableId `thrift:"id,1,required" frugal:"1,required,i64" json:"id"` - TableType types.TTableType `thrift:"tableType,2,required" frugal:"2,required,TTableType" json:"tableType"` - NumCols int32 `thrift:"numCols,3,required" frugal:"3,required,i32" json:"numCols"` - NumClusteringCols int32 `thrift:"numClusteringCols,4,required" frugal:"4,required,i32" json:"numClusteringCols"` - TableName string `thrift:"tableName,7,required" frugal:"7,required,string" json:"tableName"` - DbName string `thrift:"dbName,8,required" frugal:"8,required,string" json:"dbName"` - MysqlTable *TMySQLTable `thrift:"mysqlTable,10,optional" frugal:"10,optional,TMySQLTable" json:"mysqlTable,omitempty"` - OlapTable *TOlapTable `thrift:"olapTable,11,optional" frugal:"11,optional,TOlapTable" json:"olapTable,omitempty"` - SchemaTable *TSchemaTable `thrift:"schemaTable,12,optional" frugal:"12,optional,TSchemaTable" json:"schemaTable,omitempty"` - BrokerTable *TBrokerTable `thrift:"BrokerTable,14,optional" frugal:"14,optional,TBrokerTable" json:"BrokerTable,omitempty"` - EsTable *TEsTable `thrift:"esTable,15,optional" frugal:"15,optional,TEsTable" json:"esTable,omitempty"` - OdbcTable *TOdbcTable `thrift:"odbcTable,16,optional" frugal:"16,optional,TOdbcTable" json:"odbcTable,omitempty"` - HiveTable *THiveTable `thrift:"hiveTable,17,optional" frugal:"17,optional,THiveTable" json:"hiveTable,omitempty"` - IcebergTable *TIcebergTable `thrift:"icebergTable,18,optional" frugal:"18,optional,TIcebergTable" json:"icebergTable,omitempty"` - HudiTable *THudiTable `thrift:"hudiTable,19,optional" frugal:"19,optional,THudiTable" json:"hudiTable,omitempty"` - JdbcTable *TJdbcTable `thrift:"jdbcTable,20,optional" frugal:"20,optional,TJdbcTable" json:"jdbcTable,omitempty"` - McTable *TMCTable `thrift:"mcTable,21,optional" frugal:"21,optional,TMCTable" json:"mcTable,omitempty"` + Id types.TTableId `thrift:"id,1,required" frugal:"1,required,i64" json:"id"` + TableType types.TTableType `thrift:"tableType,2,required" frugal:"2,required,TTableType" json:"tableType"` + NumCols int32 `thrift:"numCols,3,required" frugal:"3,required,i32" json:"numCols"` + NumClusteringCols int32 `thrift:"numClusteringCols,4,required" frugal:"4,required,i32" json:"numClusteringCols"` + TableName string `thrift:"tableName,7,required" frugal:"7,required,string" json:"tableName"` + DbName string `thrift:"dbName,8,required" frugal:"8,required,string" json:"dbName"` + MysqlTable *TMySQLTable `thrift:"mysqlTable,10,optional" frugal:"10,optional,TMySQLTable" json:"mysqlTable,omitempty"` + OlapTable *TOlapTable `thrift:"olapTable,11,optional" frugal:"11,optional,TOlapTable" json:"olapTable,omitempty"` + SchemaTable *TSchemaTable `thrift:"schemaTable,12,optional" frugal:"12,optional,TSchemaTable" json:"schemaTable,omitempty"` + BrokerTable *TBrokerTable `thrift:"BrokerTable,14,optional" frugal:"14,optional,TBrokerTable" json:"BrokerTable,omitempty"` + EsTable *TEsTable `thrift:"esTable,15,optional" frugal:"15,optional,TEsTable" json:"esTable,omitempty"` + OdbcTable *TOdbcTable `thrift:"odbcTable,16,optional" frugal:"16,optional,TOdbcTable" json:"odbcTable,omitempty"` + HiveTable *THiveTable `thrift:"hiveTable,17,optional" frugal:"17,optional,THiveTable" json:"hiveTable,omitempty"` + IcebergTable *TIcebergTable `thrift:"icebergTable,18,optional" frugal:"18,optional,TIcebergTable" json:"icebergTable,omitempty"` + HudiTable *THudiTable `thrift:"hudiTable,19,optional" frugal:"19,optional,THudiTable" json:"hudiTable,omitempty"` + JdbcTable *TJdbcTable `thrift:"jdbcTable,20,optional" frugal:"20,optional,TJdbcTable" json:"jdbcTable,omitempty"` + McTable *TMCTable `thrift:"mcTable,21,optional" frugal:"21,optional,TMCTable" json:"mcTable,omitempty"` + TrinoConnectorTable *TTrinoConnectorTable `thrift:"trinoConnectorTable,22,optional" frugal:"22,optional,TTrinoConnectorTable" json:"trinoConnectorTable,omitempty"` + LakesoulTable *TLakeSoulTable `thrift:"lakesoulTable,23,optional" frugal:"23,optional,TLakeSoulTable" json:"lakesoulTable,omitempty"` } func NewTTableDescriptor() *TTableDescriptor { @@ -13132,7 +15656,6 @@ func NewTTableDescriptor() *TTableDescriptor { } func (p *TTableDescriptor) InitDefault() { - *p = TTableDescriptor{} } func (p *TTableDescriptor) GetId() (v types.TTableId) { @@ -13257,6 +15780,24 @@ func (p *TTableDescriptor) GetMcTable() (v *TMCTable) { } return p.McTable } + +var TTableDescriptor_TrinoConnectorTable_DEFAULT *TTrinoConnectorTable + +func (p *TTableDescriptor) GetTrinoConnectorTable() (v *TTrinoConnectorTable) { + if !p.IsSetTrinoConnectorTable() { + return TTableDescriptor_TrinoConnectorTable_DEFAULT + } + return p.TrinoConnectorTable +} + +var TTableDescriptor_LakesoulTable_DEFAULT *TLakeSoulTable + +func (p *TTableDescriptor) GetLakesoulTable() (v *TLakeSoulTable) { + if !p.IsSetLakesoulTable() { + return TTableDescriptor_LakesoulTable_DEFAULT + } + return p.LakesoulTable +} func (p *TTableDescriptor) SetId(val types.TTableId) { p.Id = val } @@ -13308,6 +15849,12 @@ func (p *TTableDescriptor) SetJdbcTable(val *TJdbcTable) { func (p *TTableDescriptor) SetMcTable(val *TMCTable) { p.McTable = val } +func (p *TTableDescriptor) SetTrinoConnectorTable(val *TTrinoConnectorTable) { + p.TrinoConnectorTable = val +} +func (p *TTableDescriptor) SetLakesoulTable(val *TLakeSoulTable) { + p.LakesoulTable = val +} var fieldIDToName_TTableDescriptor = map[int16]string{ 1: "id", @@ -13327,6 +15874,8 @@ var fieldIDToName_TTableDescriptor = map[int16]string{ 19: "hudiTable", 20: "jdbcTable", 21: "mcTable", + 22: "trinoConnectorTable", + 23: "lakesoulTable", } func (p *TTableDescriptor) IsSetMysqlTable() bool { @@ -13373,6 +15922,14 @@ func (p *TTableDescriptor) IsSetMcTable() bool { return p.McTable != nil } +func (p *TTableDescriptor) IsSetTrinoConnectorTable() bool { + return p.TrinoConnectorTable != nil +} + +func (p *TTableDescriptor) IsSetLakesoulTable() bool { + return p.LakesoulTable != nil +} + func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -13404,10 +15961,8 @@ func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -13415,10 +15970,8 @@ func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -13426,10 +15979,8 @@ func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumCols = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -13437,10 +15988,8 @@ func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumClusteringCols = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { @@ -13448,10 +15997,8 @@ func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { @@ -13459,127 +16006,118 @@ func (p *TTableDescriptor) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRUCT { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRUCT { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.STRUCT { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.STRUCT { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.STRUCT { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.STRUCT { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.STRUCT { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: if fieldTypeId == thrift.STRUCT { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13636,144 +16174,173 @@ RequiredFieldNotSetError: } func (p *TTableDescriptor) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTableId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = v + _field = v } + p.Id = _field return nil } - func (p *TTableDescriptor) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TTableType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TableType = types.TTableType(v) + _field = types.TTableType(v) } + p.TableType = _field return nil } - func (p *TTableDescriptor) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumCols = v + _field = v } + p.NumCols = _field return nil } - func (p *TTableDescriptor) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumClusteringCols = v + _field = v } + p.NumClusteringCols = _field return nil } - func (p *TTableDescriptor) ReadField7(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } - func (p *TTableDescriptor) ReadField8(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = v + _field = v } + p.DbName = _field return nil } - func (p *TTableDescriptor) ReadField10(iprot thrift.TProtocol) error { - p.MysqlTable = NewTMySQLTable() - if err := p.MysqlTable.Read(iprot); err != nil { + _field := NewTMySQLTable() + if err := _field.Read(iprot); err != nil { return err } + p.MysqlTable = _field return nil } - func (p *TTableDescriptor) ReadField11(iprot thrift.TProtocol) error { - p.OlapTable = NewTOlapTable() - if err := p.OlapTable.Read(iprot); err != nil { + _field := NewTOlapTable() + if err := _field.Read(iprot); err != nil { return err } + p.OlapTable = _field return nil } - func (p *TTableDescriptor) ReadField12(iprot thrift.TProtocol) error { - p.SchemaTable = NewTSchemaTable() - if err := p.SchemaTable.Read(iprot); err != nil { + _field := NewTSchemaTable() + if err := _field.Read(iprot); err != nil { return err } + p.SchemaTable = _field return nil } - func (p *TTableDescriptor) ReadField14(iprot thrift.TProtocol) error { - p.BrokerTable = NewTBrokerTable() - if err := p.BrokerTable.Read(iprot); err != nil { + _field := NewTBrokerTable() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerTable = _field return nil } - func (p *TTableDescriptor) ReadField15(iprot thrift.TProtocol) error { - p.EsTable = NewTEsTable() - if err := p.EsTable.Read(iprot); err != nil { + _field := NewTEsTable() + if err := _field.Read(iprot); err != nil { return err } + p.EsTable = _field return nil } - func (p *TTableDescriptor) ReadField16(iprot thrift.TProtocol) error { - p.OdbcTable = NewTOdbcTable() - if err := p.OdbcTable.Read(iprot); err != nil { + _field := NewTOdbcTable() + if err := _field.Read(iprot); err != nil { return err } + p.OdbcTable = _field return nil } - func (p *TTableDescriptor) ReadField17(iprot thrift.TProtocol) error { - p.HiveTable = NewTHiveTable() - if err := p.HiveTable.Read(iprot); err != nil { + _field := NewTHiveTable() + if err := _field.Read(iprot); err != nil { return err } + p.HiveTable = _field return nil } - func (p *TTableDescriptor) ReadField18(iprot thrift.TProtocol) error { - p.IcebergTable = NewTIcebergTable() - if err := p.IcebergTable.Read(iprot); err != nil { + _field := NewTIcebergTable() + if err := _field.Read(iprot); err != nil { return err } + p.IcebergTable = _field return nil } - func (p *TTableDescriptor) ReadField19(iprot thrift.TProtocol) error { - p.HudiTable = NewTHudiTable() - if err := p.HudiTable.Read(iprot); err != nil { + _field := NewTHudiTable() + if err := _field.Read(iprot); err != nil { return err } + p.HudiTable = _field return nil } - func (p *TTableDescriptor) ReadField20(iprot thrift.TProtocol) error { - p.JdbcTable = NewTJdbcTable() - if err := p.JdbcTable.Read(iprot); err != nil { + _field := NewTJdbcTable() + if err := _field.Read(iprot); err != nil { return err } + p.JdbcTable = _field return nil } - func (p *TTableDescriptor) ReadField21(iprot thrift.TProtocol) error { - p.McTable = NewTMCTable() - if err := p.McTable.Read(iprot); err != nil { + _field := NewTMCTable() + if err := _field.Read(iprot); err != nil { + return err + } + p.McTable = _field + return nil +} +func (p *TTableDescriptor) ReadField22(iprot thrift.TProtocol) error { + _field := NewTTrinoConnectorTable() + if err := _field.Read(iprot); err != nil { return err } + p.TrinoConnectorTable = _field + return nil +} +func (p *TTableDescriptor) ReadField23(iprot thrift.TProtocol) error { + _field := NewTLakeSoulTable() + if err := _field.Read(iprot); err != nil { + return err + } + p.LakesoulTable = _field return nil } @@ -13851,7 +16418,14 @@ func (p *TTableDescriptor) Write(oprot thrift.TProtocol) (err error) { fieldId = 21 goto WriteFieldError } - + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14181,11 +16755,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) } +func (p *TTableDescriptor) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorTable() { + if err = oprot.WriteFieldBegin("trinoConnectorTable", thrift.STRUCT, 22); err != nil { + goto WriteFieldBeginError + } + if err := p.TrinoConnectorTable.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +} + +func (p *TTableDescriptor) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetLakesoulTable() { + if err = oprot.WriteFieldBegin("lakesoulTable", thrift.STRUCT, 23); err != nil { + goto WriteFieldBeginError + } + if err := p.LakesoulTable.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +} + func (p *TTableDescriptor) String() string { if p == nil { return "" } return fmt.Sprintf("TTableDescriptor(%+v)", *p) + } func (p *TTableDescriptor) DeepEqual(ano *TTableDescriptor) bool { @@ -14245,6 +16858,12 @@ func (p *TTableDescriptor) DeepEqual(ano *TTableDescriptor) bool { if !p.Field21DeepEqual(ano.McTable) { return false } + if !p.Field22DeepEqual(ano.TrinoConnectorTable) { + return false + } + if !p.Field23DeepEqual(ano.LakesoulTable) { + return false + } return true } @@ -14367,6 +16986,20 @@ func (p *TTableDescriptor) Field21DeepEqual(src *TMCTable) bool { } return true } +func (p *TTableDescriptor) Field22DeepEqual(src *TTrinoConnectorTable) bool { + + if !p.TrinoConnectorTable.DeepEqual(src) { + return false + } + return true +} +func (p *TTableDescriptor) Field23DeepEqual(src *TLakeSoulTable) bool { + + if !p.LakesoulTable.DeepEqual(src) { + return false + } + return true +} type TDescriptorTable struct { SlotDescriptors []*TSlotDescriptor `thrift:"slotDescriptors,1,optional" frugal:"1,optional,list" json:"slotDescriptors,omitempty"` @@ -14379,7 +17012,6 @@ func NewTDescriptorTable() *TDescriptorTable { } func (p *TDescriptorTable) InitDefault() { - *p = TDescriptorTable{} } var TDescriptorTable_SlotDescriptors_DEFAULT []*TSlotDescriptor @@ -14452,10 +17084,8 @@ func (p *TDescriptorTable) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -14463,27 +17093,22 @@ func (p *TDescriptorTable) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleDescriptors = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14519,58 +17144,68 @@ func (p *TDescriptorTable) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.SlotDescriptors = make([]*TSlotDescriptor, 0, size) + _field := make([]*TSlotDescriptor, 0, size) + values := make([]TSlotDescriptor, size) for i := 0; i < size; i++ { - _elem := NewTSlotDescriptor() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SlotDescriptors = append(p.SlotDescriptors, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SlotDescriptors = _field return nil } - func (p *TDescriptorTable) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TupleDescriptors = make([]*TTupleDescriptor, 0, size) + _field := make([]*TTupleDescriptor, 0, size) + values := make([]TTupleDescriptor, size) for i := 0; i < size; i++ { - _elem := NewTTupleDescriptor() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TupleDescriptors = append(p.TupleDescriptors, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TupleDescriptors = _field return nil } - func (p *TDescriptorTable) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TableDescriptors = make([]*TTableDescriptor, 0, size) + _field := make([]*TTableDescriptor, 0, size) + values := make([]TTableDescriptor, size) for i := 0; i < size; i++ { - _elem := NewTTableDescriptor() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TableDescriptors = append(p.TableDescriptors, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TableDescriptors = _field return nil } @@ -14592,7 +17227,6 @@ func (p *TDescriptorTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14695,6 +17329,7 @@ func (p *TDescriptorTable) String() string { return "" } return fmt.Sprintf("TDescriptorTable(%+v)", *p) + } func (p *TDescriptorTable) DeepEqual(ano *TDescriptorTable) bool { diff --git a/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go b/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go index 97ac8852..b05de6ef 100644 --- a/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go +++ b/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package descriptors @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/partitions" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" @@ -307,6 +308,34 @@ func (p *TColumn) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 19: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -609,6 +638,34 @@ func (p *TColumn) FastReadField18(buf []byte) (int, error) { return offset, nil } +func (p *TColumn) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ClusterKeyId = v + + } + return offset, nil +} + +func (p *TColumn) FastReadField20(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BeExecVersion = v + + } + return offset, nil +} + // for compatibility func (p *TColumn) FastWrite(buf []byte) int { return 0 @@ -629,6 +686,8 @@ func (p *TColumn) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField17(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -664,6 +723,8 @@ func (p *TColumn) BLength() int { l += p.field16Length() l += p.field17Length() l += p.field18Length() + l += p.field19Length() + l += p.field20Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -869,6 +930,28 @@ func (p *TColumn) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter return offset } +func (p *TColumn) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetClusterKeyId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_key_id", thrift.I32, 19) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ClusterKeyId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TColumn) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeExecVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_exec_version", thrift.I32, 20) + offset += bthrift.Binary.WriteI32(buf[offset:], p.BeExecVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TColumn) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("column_name", thrift.STRING, 1) @@ -1064,6 +1147,28 @@ func (p *TColumn) field18Length() int { return l } +func (p *TColumn) field19Length() int { + l := 0 + if p.IsSetClusterKeyId() { + l += bthrift.Binary.FieldBeginLength("cluster_key_id", thrift.I32, 19) + l += bthrift.Binary.I32Length(p.ClusterKeyId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TColumn) field20Length() int { + l := 0 + if p.IsSetBeExecVersion() { + l += bthrift.Binary.FieldBeginLength("be_exec_version", thrift.I32, 20) + l += bthrift.Binary.I32Length(p.BeExecVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TSlotDescriptor) FastRead(buf []byte) (int, error) { var err error var offset int @@ -1316,6 +1421,34 @@ func (p *TSlotDescriptor) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 16: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 17: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -1627,6 +1760,33 @@ func (p *TSlotDescriptor) FastReadField15(buf []byte) (int, error) { return offset, nil } +func (p *TSlotDescriptor) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ColDefaultValue = &v + + } + return offset, nil +} + +func (p *TSlotDescriptor) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PrimitiveType = types.TPrimitiveType(v) + + } + return offset, nil +} + // for compatibility func (p *TSlotDescriptor) FastWrite(buf []byte) int { return 0 @@ -1651,6 +1811,8 @@ func (p *TSlotDescriptor) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1676,6 +1838,8 @@ func (p *TSlotDescriptor) BLength() int { l += p.field13Length() l += p.field14Length() l += p.field15Length() + l += p.field16Length() + l += p.field17Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1834,6 +1998,28 @@ func (p *TSlotDescriptor) fastWriteField15(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TSlotDescriptor) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColDefaultValue() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "col_default_value", thrift.STRING, 16) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColDefaultValue) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSlotDescriptor) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPrimitiveType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "primitive_type", thrift.I32, 17) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.PrimitiveType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TSlotDescriptor) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("id", thrift.I32, 1) @@ -1982,6 +2168,28 @@ func (p *TSlotDescriptor) field15Length() int { return l } +func (p *TSlotDescriptor) field16Length() int { + l := 0 + if p.IsSetColDefaultValue() { + l += bthrift.Binary.FieldBeginLength("col_default_value", thrift.STRING, 16) + l += bthrift.Binary.StringLengthNocopy(*p.ColDefaultValue) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSlotDescriptor) field17Length() int { + l := 0 + if p.IsSetPrimitiveType() { + l += bthrift.Binary.FieldBeginLength("primitive_type", thrift.I32, 17) + l += bthrift.Binary.I32Length(int32(p.PrimitiveType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TTupleDescriptor) FastRead(buf []byte) (int, error) { var err error var offset int @@ -2719,6 +2927,20 @@ func (p *TOlapTablePartition) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 11: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -2974,6 +3196,19 @@ func (p *TOlapTablePartition) FastReadField10(buf []byte) (int, error) { return offset, nil } +func (p *TOlapTablePartition) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadTabletIdx = &v + + } + return offset, nil +} + // for compatibility func (p *TOlapTablePartition) FastWrite(buf []byte) int { return 0 @@ -2987,6 +3222,7 @@ func (p *TOlapTablePartition) FastWriteNocopy(buf []byte, binaryWriter bthrift.B offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) @@ -3013,6 +3249,7 @@ func (p *TOlapTablePartition) BLength() int { l += p.field8Length() l += p.field9Length() l += p.field10Length() + l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -3157,6 +3394,17 @@ func (p *TOlapTablePartition) fastWriteField10(buf []byte, binaryWriter bthrift. return offset } +func (p *TOlapTablePartition) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadTabletIdx() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_tablet_idx", thrift.I64, 11) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadTabletIdx) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TOlapTablePartition) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) @@ -3275,6 +3523,17 @@ func (p *TOlapTablePartition) field10Length() int { return l } +func (p *TOlapTablePartition) field11Length() int { + l := 0 + if p.IsSetLoadTabletIdx() { + l += bthrift.Binary.FieldBeginLength("load_tablet_idx", thrift.I64, 11) + l += bthrift.Binary.I64Length(*p.LoadTabletIdx) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TOlapTablePartitionParam) FastRead(buf []byte) (int, error) { var err error var offset int @@ -3445,6 +3704,48 @@ func (p *TOlapTablePartitionParam) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 11: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -3698,10 +3999,50 @@ func (p *TOlapTablePartitionParam) FastReadField10(buf []byte) (int, error) { return offset, nil } -// for compatibility -func (p *TOlapTablePartitionParam) FastWrite(buf []byte) int { - return 0 -} +func (p *TOlapTablePartitionParam) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableAutoDetectOverwrite = &v + + } + return offset, nil +} + +func (p *TOlapTablePartitionParam) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OverwriteGroupId = &v + + } + return offset, nil +} + +func (p *TOlapTablePartitionParam) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionsIsFake = v + + } + return offset, nil +} + +// for compatibility +func (p *TOlapTablePartitionParam) FastWrite(buf []byte) int { + return 0 +} func (p *TOlapTablePartitionParam) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 @@ -3711,6 +4052,9 @@ func (p *TOlapTablePartitionParam) FastWriteNocopy(buf []byte, binaryWriter bthr offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) @@ -3737,6 +4081,9 @@ func (p *TOlapTablePartitionParam) BLength() int { l += p.field8Length() l += p.field9Length() l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -3875,6 +4222,39 @@ func (p *TOlapTablePartitionParam) fastWriteField10(buf []byte, binaryWriter bth return offset } +func (p *TOlapTablePartitionParam) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableAutoDetectOverwrite() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_auto_detect_overwrite", thrift.BOOL, 11) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableAutoDetectOverwrite) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTablePartitionParam) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOverwriteGroupId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite_group_id", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.OverwriteGroupId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTablePartitionParam) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionsIsFake() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions_is_fake", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], p.PartitionsIsFake) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TOlapTablePartitionParam) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) @@ -3991,6 +4371,39 @@ func (p *TOlapTablePartitionParam) field10Length() int { return l } +func (p *TOlapTablePartitionParam) field11Length() int { + l := 0 + if p.IsSetEnableAutoDetectOverwrite() { + l += bthrift.Binary.FieldBeginLength("enable_auto_detect_overwrite", thrift.BOOL, 11) + l += bthrift.Binary.BoolLength(*p.EnableAutoDetectOverwrite) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTablePartitionParam) field12Length() int { + l := 0 + if p.IsSetOverwriteGroupId() { + l += bthrift.Binary.FieldBeginLength("overwrite_group_id", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.OverwriteGroupId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTablePartitionParam) field13Length() int { + l := 0 + if p.IsSetPartitionsIsFake() { + l += bthrift.Binary.FieldBeginLength("partitions_is_fake", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(p.PartitionsIsFake) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TOlapTableIndex) FastRead(buf []byte) (int, error) { var err error var offset int @@ -4097,6 +4510,20 @@ func (p *TOlapTableIndex) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4256,6 +4683,36 @@ func (p *TOlapTableIndex) FastReadField6(buf []byte) (int, error) { return offset, nil } +func (p *TOlapTableIndex) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnUniqueIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ColumnUniqueIds = append(p.ColumnUniqueIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TOlapTableIndex) FastWrite(buf []byte) int { return 0 @@ -4271,6 +4728,7 @@ func (p *TOlapTableIndex) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -4287,6 +4745,7 @@ func (p *TOlapTableIndex) BLength() int { l += p.field4Length() l += p.field5Length() l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -4378,6 +4837,25 @@ func (p *TOlapTableIndex) fastWriteField6(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TOlapTableIndex) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnUniqueIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_unique_ids", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.ColumnUniqueIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TOlapTableIndex) field1Length() int { l := 0 if p.IsSetIndexName() { @@ -4455,6 +4933,19 @@ func (p *TOlapTableIndex) field6Length() int { return l } +func (p *TOlapTableIndex) field7Length() int { + l := 0 + if p.IsSetColumnUniqueIds() { + l += bthrift.Binary.FieldBeginLength("column_unique_ids", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.ColumnUniqueIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.ColumnUniqueIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TOlapTableIndexSchema) FastRead(buf []byte) (int, error) { var err error var offset int @@ -5104,6 +5595,76 @@ func (p *TOlapTableSchemaParam) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -5349,36 +5910,111 @@ func (p *TOlapTableSchemaParam) FastReadField10(buf []byte) (int, error) { return offset, nil } -// for compatibility -func (p *TOlapTableSchemaParam) FastWrite(buf []byte) int { - return 0 +func (p *TOlapTableSchemaParam) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AutoIncrementColumn = &v + + } + return offset, nil } -func (p *TOlapTableSchemaParam) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TOlapTableSchemaParam) FastReadField12(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TOlapTableSchemaParam") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.AutoIncrementColumnUniqueId = v + } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset + return offset, nil } -func (p *TOlapTableSchemaParam) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TOlapTableSchemaParam") - if p != nil { - l += p.field1Length() +func (p *TOlapTableSchemaParam) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat(v) + + } + return offset, nil +} + +func (p *TOlapTableSchemaParam) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TUniqueKeyUpdateMode(v) + p.UniqueKeyUpdateMode = &tmp + + } + return offset, nil +} + +func (p *TOlapTableSchemaParam) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SequenceMapColUniqueId = v + + } + return offset, nil +} + +// for compatibility +func (p *TOlapTableSchemaParam) FastWrite(buf []byte) int { + return 0 +} + +func (p *TOlapTableSchemaParam) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TOlapTableSchemaParam") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TOlapTableSchemaParam) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TOlapTableSchemaParam") + if p != nil { + l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() @@ -5388,6 +6024,11 @@ func (p *TOlapTableSchemaParam) BLength() int { l += p.field8Length() l += p.field9Length() l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -5513,6 +6154,61 @@ func (p *TOlapTableSchemaParam) fastWriteField10(buf []byte, binaryWriter bthrif return offset } +func (p *TOlapTableSchemaParam) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAutoIncrementColumn() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auto_increment_column", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AutoIncrementColumn) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSchemaParam) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAutoIncrementColumnUniqueId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auto_increment_column_unique_id", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], p.AutoIncrementColumnUniqueId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSchemaParam) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInvertedIndexFileStorageFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_file_storage_format", thrift.I32, 13) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.InvertedIndexFileStorageFormat)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSchemaParam) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUniqueKeyUpdateMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "unique_key_update_mode", thrift.I32, 14) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.UniqueKeyUpdateMode)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TOlapTableSchemaParam) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSequenceMapColUniqueId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sequence_map_col_unique_id", thrift.I32, 15) + offset += bthrift.Binary.WriteI32(buf[offset:], p.SequenceMapColUniqueId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TOlapTableSchemaParam) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) @@ -5620,6 +6316,61 @@ func (p *TOlapTableSchemaParam) field10Length() int { return l } +func (p *TOlapTableSchemaParam) field11Length() int { + l := 0 + if p.IsSetAutoIncrementColumn() { + l += bthrift.Binary.FieldBeginLength("auto_increment_column", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.AutoIncrementColumn) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSchemaParam) field12Length() int { + l := 0 + if p.IsSetAutoIncrementColumnUniqueId() { + l += bthrift.Binary.FieldBeginLength("auto_increment_column_unique_id", thrift.I32, 12) + l += bthrift.Binary.I32Length(p.AutoIncrementColumnUniqueId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSchemaParam) field13Length() int { + l := 0 + if p.IsSetInvertedIndexFileStorageFormat() { + l += bthrift.Binary.FieldBeginLength("inverted_index_file_storage_format", thrift.I32, 13) + l += bthrift.Binary.I32Length(int32(p.InvertedIndexFileStorageFormat)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSchemaParam) field14Length() int { + l := 0 + if p.IsSetUniqueKeyUpdateMode() { + l += bthrift.Binary.FieldBeginLength("unique_key_update_mode", thrift.I32, 14) + l += bthrift.Binary.I32Length(int32(*p.UniqueKeyUpdateMode)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TOlapTableSchemaParam) field15Length() int { + l := 0 + if p.IsSetSequenceMapColUniqueId() { + l += bthrift.Binary.FieldBeginLength("sequence_map_col_unique_id", thrift.I32, 15) + l += bthrift.Binary.I32Length(p.SequenceMapColUniqueId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TTabletLocation) FastRead(buf []byte) (int, error) { var err error var offset int @@ -7806,7 +8557,7 @@ func (p *TEsTable) FastRead(buf []byte) (int, error) { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l if err != nil { - goto SkipFieldTypeError + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -7826,9 +8577,8 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: @@ -8022,7 +8772,7 @@ func (p *TBrokerTable) FastRead(buf []byte) (int, error) { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l if err != nil { - goto SkipFieldTypeError + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -8042,9 +8792,8 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: @@ -9074,6 +9823,90 @@ func (p *TJdbcTable) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 9: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -9213,20 +10046,104 @@ func (p *TJdbcTable) FastReadField8(buf []byte) (int, error) { return offset, nil } -// for compatibility -func (p *TJdbcTable) FastWrite(buf []byte) int { - return 0 +func (p *TJdbcTable) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMinSize = &v + + } + return offset, nil } -func (p *TJdbcTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TJdbcTable) FastReadField10(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TJdbcTable") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMaxSize = &v + + } + return offset, nil +} + +func (p *TJdbcTable) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMaxWaitTime = &v + + } + return offset, nil +} + +func (p *TJdbcTable) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMaxLifeTime = &v + + } + return offset, nil +} + +func (p *TJdbcTable) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolKeepAlive = &v + + } + return offset, nil +} + +func (p *TJdbcTable) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CatalogId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TJdbcTable) FastWrite(buf []byte) int { + return 0 +} + +func (p *TJdbcTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TJdbcTable") + if p != nil { + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) @@ -9248,6 +10165,12 @@ func (p *TJdbcTable) BLength() int { l += p.field6Length() l += p.field7Length() l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -9342,6 +10265,72 @@ func (p *TJdbcTable) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWrit return offset } +func (p *TJdbcTable) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMinSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_min_size", thrift.I32, 9) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMinSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcTable) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMaxSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_max_size", thrift.I32, 10) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMaxSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcTable) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMaxWaitTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_max_wait_time", thrift.I32, 11) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMaxWaitTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcTable) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMaxLifeTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_max_life_time", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMaxLifeTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcTable) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolKeepAlive() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_keep_alive", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ConnectionPoolKeepAlive) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcTable) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog_id", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TJdbcTable) field1Length() int { l := 0 if p.IsSetJdbcUrl() { @@ -9430,6 +10419,72 @@ func (p *TJdbcTable) field8Length() int { return l } +func (p *TJdbcTable) field9Length() int { + l := 0 + if p.IsSetConnectionPoolMinSize() { + l += bthrift.Binary.FieldBeginLength("connection_pool_min_size", thrift.I32, 9) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMinSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcTable) field10Length() int { + l := 0 + if p.IsSetConnectionPoolMaxSize() { + l += bthrift.Binary.FieldBeginLength("connection_pool_max_size", thrift.I32, 10) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMaxSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcTable) field11Length() int { + l := 0 + if p.IsSetConnectionPoolMaxWaitTime() { + l += bthrift.Binary.FieldBeginLength("connection_pool_max_wait_time", thrift.I32, 11) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMaxWaitTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcTable) field12Length() int { + l := 0 + if p.IsSetConnectionPoolMaxLifeTime() { + l += bthrift.Binary.FieldBeginLength("connection_pool_max_life_time", thrift.I32, 12) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMaxLifeTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcTable) field13Length() int { + l := 0 + if p.IsSetConnectionPoolKeepAlive() { + l += bthrift.Binary.FieldBeginLength("connection_pool_keep_alive", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(*p.ConnectionPoolKeepAlive) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcTable) field14Length() int { + l := 0 + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalog_id", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.CatalogId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMCTable) FastRead(buf []byte) (int, error) { var err error var offset int @@ -9536,6 +10591,836 @@ func (p *TMCTable) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMCTable[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TMCTable) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Region = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Project = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Table = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AccessKey = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SecretKey = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PublicAccess = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OdpsUrl = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TunnelUrl = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Endpoint = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Quota = &v + + } + return offset, nil +} + +// for compatibility +func (p *TMCTable) FastWrite(buf []byte) int { + return 0 +} + +func (p *TMCTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMCTable") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TMCTable) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TMCTable") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TMCTable) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRegion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "region", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Region) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProject() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "project", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Project) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAccessKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "access_key", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AccessKey) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSecretKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "secret_key", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SecretKey) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPublicAccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "public_access", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PublicAccess) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOdpsUrl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "odps_url", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OdpsUrl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTunnelUrl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tunnel_url", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TunnelUrl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEndpoint() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "endpoint", thrift.STRING, 9) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Endpoint) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQuota() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "quota", thrift.STRING, 10) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Quota) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) field1Length() int { + l := 0 + if p.IsSetRegion() { + l += bthrift.Binary.FieldBeginLength("region", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Region) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field2Length() int { + l := 0 + if p.IsSetProject() { + l += bthrift.Binary.FieldBeginLength("project", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Project) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field3Length() int { + l := 0 + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Table) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field4Length() int { + l := 0 + if p.IsSetAccessKey() { + l += bthrift.Binary.FieldBeginLength("access_key", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.AccessKey) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field5Length() int { + l := 0 + if p.IsSetSecretKey() { + l += bthrift.Binary.FieldBeginLength("secret_key", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.SecretKey) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field6Length() int { + l := 0 + if p.IsSetPublicAccess() { + l += bthrift.Binary.FieldBeginLength("public_access", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.PublicAccess) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field7Length() int { + l := 0 + if p.IsSetOdpsUrl() { + l += bthrift.Binary.FieldBeginLength("odps_url", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.OdpsUrl) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field8Length() int { + l := 0 + if p.IsSetTunnelUrl() { + l += bthrift.Binary.FieldBeginLength("tunnel_url", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.TunnelUrl) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field9Length() int { + l := 0 + if p.IsSetEndpoint() { + l += bthrift.Binary.FieldBeginLength("endpoint", thrift.STRING, 9) + l += bthrift.Binary.StringLengthNocopy(*p.Endpoint) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field10Length() int { + l := 0 + if p.IsSetQuota() { + l += bthrift.Binary.FieldBeginLength("quota", thrift.STRING, 10) + l += bthrift.Binary.StringLengthNocopy(*p.Quota) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorTable) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTrinoConnectorTable[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTrinoConnectorTable) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbName = &v + + } + return offset, nil +} + +func (p *TTrinoConnectorTable) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableName = &v + + } + return offset, nil +} + +func (p *TTrinoConnectorTable) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Properties = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.Properties[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TTrinoConnectorTable) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTrinoConnectorTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTrinoConnectorTable") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTrinoConnectorTable) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTrinoConnectorTable") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTrinoConnectorTable) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorTable) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorTable) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProperties() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 3) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.Properties { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorTable) field1Length() int { + l := 0 + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorTable) field2Length() int { + l := 0 + if p.IsSetTableName() { + l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.TableName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorTable) field3Length() int { + l := 0 + if p.IsSetProperties() { + l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) + for k, v := range p.Properties { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLakeSoulTable) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -9562,7 +11447,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMCTable[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLakeSoulTable[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9571,248 +11456,182 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TMCTable) FastReadField1(buf []byte) (int, error) { +func (p *TLakeSoulTable) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Region = &v + p.DbName = &v } return offset, nil } -func (p *TMCTable) FastReadField2(buf []byte) (int, error) { +func (p *TLakeSoulTable) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Project = &v + p.TableName = &v } return offset, nil } -func (p *TMCTable) FastReadField3(buf []byte) (int, error) { +func (p *TLakeSoulTable) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - p.Table = &v - } - return offset, nil -} + p.Properties = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TMCTable) FastReadField4(buf []byte) (int, error) { - offset := 0 + _key = v - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.AccessKey = &v + } - } - return offset, nil -} + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TMCTable) FastReadField5(buf []byte) (int, error) { - offset := 0 + _val = v - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.SecretKey = &v + } + p.Properties[_key] = _val } - return offset, nil -} - -func (p *TMCTable) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.PublicAccess = &v - } return offset, nil } // for compatibility -func (p *TMCTable) FastWrite(buf []byte) int { +func (p *TLakeSoulTable) FastWrite(buf []byte) int { return 0 } -func (p *TMCTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLakeSoulTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMCTable") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLakeSoulTable") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TMCTable) BLength() int { +func (p *TLakeSoulTable) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMCTable") + l += bthrift.Binary.StructBeginLength("TLakeSoulTable") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMCTable) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRegion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "region", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Region) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TMCTable) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetProject() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "project", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Project) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TMCTable) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLakeSoulTable) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMCTable) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLakeSoulTable) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAccessKey() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "access_key", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AccessKey) + if p.IsSetTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMCTable) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLakeSoulTable) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSecretKey() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "secret_key", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SecretKey) + if p.IsSetProperties() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 3) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.Properties { + length++ - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) -func (p *TMCTable) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPublicAccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "public_access", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PublicAccess) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMCTable) field1Length() int { - l := 0 - if p.IsSetRegion() { - l += bthrift.Binary.FieldBeginLength("region", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Region) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TMCTable) field2Length() int { - l := 0 - if p.IsSetProject() { - l += bthrift.Binary.FieldBeginLength("project", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Project) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TMCTable) field3Length() int { +func (p *TLakeSoulTable) field1Length() int { l := 0 - if p.IsSetTable() { - l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Table) + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMCTable) field4Length() int { +func (p *TLakeSoulTable) field2Length() int { l := 0 - if p.IsSetAccessKey() { - l += bthrift.Binary.FieldBeginLength("access_key", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.AccessKey) + if p.IsSetTableName() { + l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.TableName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMCTable) field5Length() int { +func (p *TLakeSoulTable) field3Length() int { l := 0 - if p.IsSetSecretKey() { - l += bthrift.Binary.FieldBeginLength("secret_key", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.SecretKey) + if p.IsSetProperties() { + l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) + for k, v := range p.Properties { - l += bthrift.Binary.FieldEndLength() - } - return l -} + l += bthrift.Binary.StringLengthNocopy(k) -func (p *TMCTable) field6Length() int { - l := 0 - if p.IsSetPublicAccess() { - l += bthrift.Binary.FieldBeginLength("public_access", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.PublicAccess) + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l @@ -10090,6 +11909,34 @@ func (p *TTableDescriptor) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 22: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField22(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 23: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10383,6 +12230,32 @@ func (p *TTableDescriptor) FastReadField21(buf []byte) (int, error) { return offset, nil } +func (p *TTableDescriptor) FastReadField22(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTrinoConnectorTable() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TrinoConnectorTable = tmp + return offset, nil +} + +func (p *TTableDescriptor) FastReadField23(buf []byte) (int, error) { + offset := 0 + + tmp := NewTLakeSoulTable() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LakesoulTable = tmp + return offset, nil +} + // for compatibility func (p *TTableDescriptor) FastWrite(buf []byte) int { return 0 @@ -10409,6 +12282,8 @@ func (p *TTableDescriptor) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField19(buf[offset:], binaryWriter) offset += p.fastWriteField20(buf[offset:], binaryWriter) offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -10436,6 +12311,8 @@ func (p *TTableDescriptor) BLength() int { l += p.field19Length() l += p.field20Length() l += p.field21Length() + l += p.field22Length() + l += p.field23Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -10606,6 +12483,26 @@ func (p *TTableDescriptor) fastWriteField21(buf []byte, binaryWriter bthrift.Bin return offset } +func (p *TTableDescriptor) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trinoConnectorTable", thrift.STRUCT, 22) + offset += p.TrinoConnectorTable.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableDescriptor) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLakesoulTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lakesoulTable", thrift.STRUCT, 23) + offset += p.LakesoulTable.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TTableDescriptor) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) @@ -10770,6 +12667,26 @@ func (p *TTableDescriptor) field21Length() int { return l } +func (p *TTableDescriptor) field22Length() int { + l := 0 + if p.IsSetTrinoConnectorTable() { + l += bthrift.Binary.FieldBeginLength("trinoConnectorTable", thrift.STRUCT, 22) + l += p.TrinoConnectorTable.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableDescriptor) field23Length() int { + l := 0 + if p.IsSetLakesoulTable() { + l += bthrift.Binary.FieldBeginLength("lakesoulTable", thrift.STRUCT, 23) + l += p.LakesoulTable.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TDescriptorTable) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/dorisexternalservice/DorisExternalService.go b/pkg/rpc/kitex_gen/dorisexternalservice/DorisExternalService.go index 68db2fc0..83ee256e 100644 --- a/pkg/rpc/kitex_gen/dorisexternalservice/DorisExternalService.go +++ b/pkg/rpc/kitex_gen/dorisexternalservice/DorisExternalService.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package dorisexternalservice @@ -33,7 +33,6 @@ func NewTScanOpenParams() *TScanOpenParams { } func (p *TScanOpenParams) InitDefault() { - *p = TScanOpenParams{} } func (p *TScanOpenParams) GetCluster() (v string) { @@ -245,10 +244,8 @@ func (p *TScanOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCluster = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -256,10 +253,8 @@ func (p *TScanOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDatabase = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -267,10 +262,8 @@ func (p *TScanOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { @@ -278,10 +271,8 @@ func (p *TScanOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { @@ -289,97 +280,78 @@ func (p *TScanOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOpaquedQueryPlan = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.MAP { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRING { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I16 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I32 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I64 { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -431,39 +403,46 @@ RequiredFieldNotSetError: } func (p *TScanOpenParams) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Cluster = v + _field = v } + p.Cluster = _field return nil } - func (p *TScanOpenParams) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Database = v + _field = v } + p.Database = _field return nil } - func (p *TScanOpenParams) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = v + _field = v } + p.Table = _field return nil } - func (p *TScanOpenParams) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TabletIds = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -471,38 +450,42 @@ func (p *TScanOpenParams) ReadField4(iprot thrift.TProtocol) error { _elem = v } - p.TabletIds = append(p.TabletIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletIds = _field return nil } - func (p *TScanOpenParams) ReadField5(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.OpaquedQueryPlan = v + _field = v } + p.OpaquedQueryPlan = _field return nil } - func (p *TScanOpenParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BatchSize = &v + _field = &v } + p.BatchSize = _field return nil } - func (p *TScanOpenParams) ReadField7(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -518,65 +501,78 @@ func (p *TScanOpenParams) ReadField7(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } - func (p *TScanOpenParams) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Limit = &v + _field = &v } + p.Limit = _field return nil } - func (p *TScanOpenParams) ReadField9(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TScanOpenParams) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = &v + _field = &v } + p.Passwd = _field return nil } - func (p *TScanOpenParams) ReadField11(iprot thrift.TProtocol) error { + + var _field *int16 if v, err := iprot.ReadI16(); err != nil { return err } else { - p.KeepAliveMin = &v + _field = &v } + p.KeepAliveMin = _field return nil } - func (p *TScanOpenParams) ReadField12(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ExecutionTimeout = &v + _field = &v } + p.ExecutionTimeout = _field return nil } - func (p *TScanOpenParams) ReadField13(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MemLimit = &v + _field = &v } + p.MemLimit = _field return nil } @@ -638,7 +634,6 @@ func (p *TScanOpenParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 13 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -778,11 +773,9 @@ func (p *TScanOpenParams) writeField7(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -920,6 +913,7 @@ func (p *TScanOpenParams) String() string { return "" } return fmt.Sprintf("TScanOpenParams(%+v)", *p) + } func (p *TScanOpenParams) DeepEqual(ano *TScanOpenParams) bool { @@ -1119,7 +1113,6 @@ func NewTScanColumnDesc() *TScanColumnDesc { } func (p *TScanColumnDesc) InitDefault() { - *p = TScanColumnDesc{} } var TScanColumnDesc_Name_DEFAULT string @@ -1183,27 +1176,22 @@ func (p *TScanColumnDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1229,21 +1217,26 @@ ReadStructEndError: } func (p *TScanColumnDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = &v + _field = &v } + p.Name = _field return nil } - func (p *TScanColumnDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TPrimitiveType(v) - p.Type = &tmp + _field = &tmp } + p.Type = _field return nil } @@ -1261,7 +1254,6 @@ func (p *TScanColumnDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1323,6 +1315,7 @@ func (p *TScanColumnDesc) String() string { return "" } return fmt.Sprintf("TScanColumnDesc(%+v)", *p) + } func (p *TScanColumnDesc) DeepEqual(ano *TScanColumnDesc) bool { @@ -1376,7 +1369,6 @@ func NewTScanOpenResult_() *TScanOpenResult_ { } func (p *TScanOpenResult_) InitDefault() { - *p = TScanOpenResult_{} } var TScanOpenResult__Status_DEFAULT *status.TStatus @@ -1459,37 +1451,30 @@ func (p *TScanOpenResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1521,39 +1506,45 @@ RequiredFieldNotSetError: } func (p *TScanOpenResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } - func (p *TScanOpenResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ContextId = &v + _field = &v } + p.ContextId = _field return nil } - func (p *TScanOpenResult_) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SelectedColumns = make([]*TScanColumnDesc, 0, size) + _field := make([]*TScanColumnDesc, 0, size) + values := make([]TScanColumnDesc, size) for i := 0; i < size; i++ { - _elem := NewTScanColumnDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SelectedColumns = append(p.SelectedColumns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SelectedColumns = _field return nil } @@ -1575,7 +1566,6 @@ func (p *TScanOpenResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1662,6 +1652,7 @@ func (p *TScanOpenResult_) String() string { return "" } return fmt.Sprintf("TScanOpenResult_(%+v)", *p) + } func (p *TScanOpenResult_) DeepEqual(ano *TScanOpenResult_) bool { @@ -1725,7 +1716,6 @@ func NewTScanNextBatchParams() *TScanNextBatchParams { } func (p *TScanNextBatchParams) InitDefault() { - *p = TScanNextBatchParams{} } var TScanNextBatchParams_ContextId_DEFAULT string @@ -1789,27 +1779,22 @@ func (p *TScanNextBatchParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1835,20 +1820,25 @@ ReadStructEndError: } func (p *TScanNextBatchParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ContextId = &v + _field = &v } + p.ContextId = _field return nil } - func (p *TScanNextBatchParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Offset = &v + _field = &v } + p.Offset = _field return nil } @@ -1866,7 +1856,6 @@ func (p *TScanNextBatchParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1928,6 +1917,7 @@ func (p *TScanNextBatchParams) String() string { return "" } return fmt.Sprintf("TScanNextBatchParams(%+v)", *p) + } func (p *TScanNextBatchParams) DeepEqual(ano *TScanNextBatchParams) bool { @@ -1981,7 +1971,6 @@ func NewTScanBatchResult_() *TScanBatchResult_ { } func (p *TScanBatchResult_) InitDefault() { - *p = TScanBatchResult_{} } var TScanBatchResult__Status_DEFAULT *status.TStatus @@ -2064,37 +2053,30 @@ func (p *TScanBatchResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2126,28 +2108,33 @@ RequiredFieldNotSetError: } func (p *TScanBatchResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } - func (p *TScanBatchResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Eos = &v + _field = &v } + p.Eos = _field return nil } - func (p *TScanBatchResult_) ReadField3(iprot thrift.TProtocol) error { + + var _field []byte if v, err := iprot.ReadBinary(); err != nil { return err } else { - p.Rows = []byte(v) + _field = []byte(v) } + p.Rows = _field return nil } @@ -2169,7 +2156,6 @@ func (p *TScanBatchResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2248,6 +2234,7 @@ func (p *TScanBatchResult_) String() string { return "" } return fmt.Sprintf("TScanBatchResult_(%+v)", *p) + } func (p *TScanBatchResult_) DeepEqual(ano *TScanBatchResult_) bool { @@ -2304,7 +2291,6 @@ func NewTScanCloseParams() *TScanCloseParams { } func (p *TScanCloseParams) InitDefault() { - *p = TScanCloseParams{} } var TScanCloseParams_ContextId_DEFAULT string @@ -2351,17 +2337,14 @@ func (p *TScanCloseParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2387,11 +2370,14 @@ ReadStructEndError: } func (p *TScanCloseParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ContextId = &v + _field = &v } + p.ContextId = _field return nil } @@ -2405,7 +2391,6 @@ func (p *TScanCloseParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2448,6 +2433,7 @@ func (p *TScanCloseParams) String() string { return "" } return fmt.Sprintf("TScanCloseParams(%+v)", *p) + } func (p *TScanCloseParams) DeepEqual(ano *TScanCloseParams) bool { @@ -2484,7 +2470,6 @@ func NewTScanCloseResult_() *TScanCloseResult_ { } func (p *TScanCloseResult_) InitDefault() { - *p = TScanCloseResult_{} } var TScanCloseResult__Status_DEFAULT *status.TStatus @@ -2533,17 +2518,14 @@ func (p *TScanCloseResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2575,10 +2557,11 @@ RequiredFieldNotSetError: } func (p *TScanCloseResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } @@ -2592,7 +2575,6 @@ func (p *TScanCloseResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2633,6 +2615,7 @@ func (p *TScanCloseResult_) String() string { return "" } return fmt.Sprintf("TScanCloseResult_(%+v)", *p) + } func (p *TScanCloseResult_) DeepEqual(ano *TScanCloseResult_) bool { @@ -2913,7 +2896,6 @@ func NewTDorisExternalServiceOpenScannerArgs() *TDorisExternalServiceOpenScanner } func (p *TDorisExternalServiceOpenScannerArgs) InitDefault() { - *p = TDorisExternalServiceOpenScannerArgs{} } var TDorisExternalServiceOpenScannerArgs_Params_DEFAULT *TScanOpenParams @@ -2960,17 +2942,14 @@ func (p *TDorisExternalServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2996,10 +2975,11 @@ ReadStructEndError: } func (p *TDorisExternalServiceOpenScannerArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTScanOpenParams() - if err := p.Params.Read(iprot); err != nil { + _field := NewTScanOpenParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } @@ -3013,7 +2993,6 @@ func (p *TDorisExternalServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3054,6 +3033,7 @@ func (p *TDorisExternalServiceOpenScannerArgs) String() string { return "" } return fmt.Sprintf("TDorisExternalServiceOpenScannerArgs(%+v)", *p) + } func (p *TDorisExternalServiceOpenScannerArgs) DeepEqual(ano *TDorisExternalServiceOpenScannerArgs) bool { @@ -3085,7 +3065,6 @@ func NewTDorisExternalServiceOpenScannerResult() *TDorisExternalServiceOpenScann } func (p *TDorisExternalServiceOpenScannerResult) InitDefault() { - *p = TDorisExternalServiceOpenScannerResult{} } var TDorisExternalServiceOpenScannerResult_Success_DEFAULT *TScanOpenResult_ @@ -3132,17 +3111,14 @@ func (p *TDorisExternalServiceOpenScannerResult) Read(iprot thrift.TProtocol) (e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3168,10 +3144,11 @@ ReadStructEndError: } func (p *TDorisExternalServiceOpenScannerResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTScanOpenResult_() - if err := p.Success.Read(iprot); err != nil { + _field := NewTScanOpenResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } @@ -3185,7 +3162,6 @@ func (p *TDorisExternalServiceOpenScannerResult) Write(oprot thrift.TProtocol) ( fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3228,6 +3204,7 @@ func (p *TDorisExternalServiceOpenScannerResult) String() string { return "" } return fmt.Sprintf("TDorisExternalServiceOpenScannerResult(%+v)", *p) + } func (p *TDorisExternalServiceOpenScannerResult) DeepEqual(ano *TDorisExternalServiceOpenScannerResult) bool { @@ -3259,7 +3236,6 @@ func NewTDorisExternalServiceGetNextArgs() *TDorisExternalServiceGetNextArgs { } func (p *TDorisExternalServiceGetNextArgs) InitDefault() { - *p = TDorisExternalServiceGetNextArgs{} } var TDorisExternalServiceGetNextArgs_Params_DEFAULT *TScanNextBatchParams @@ -3306,17 +3282,14 @@ func (p *TDorisExternalServiceGetNextArgs) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3342,10 +3315,11 @@ ReadStructEndError: } func (p *TDorisExternalServiceGetNextArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTScanNextBatchParams() - if err := p.Params.Read(iprot); err != nil { + _field := NewTScanNextBatchParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } @@ -3359,7 +3333,6 @@ func (p *TDorisExternalServiceGetNextArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3400,6 +3373,7 @@ func (p *TDorisExternalServiceGetNextArgs) String() string { return "" } return fmt.Sprintf("TDorisExternalServiceGetNextArgs(%+v)", *p) + } func (p *TDorisExternalServiceGetNextArgs) DeepEqual(ano *TDorisExternalServiceGetNextArgs) bool { @@ -3431,7 +3405,6 @@ func NewTDorisExternalServiceGetNextResult() *TDorisExternalServiceGetNextResult } func (p *TDorisExternalServiceGetNextResult) InitDefault() { - *p = TDorisExternalServiceGetNextResult{} } var TDorisExternalServiceGetNextResult_Success_DEFAULT *TScanBatchResult_ @@ -3478,17 +3451,14 @@ func (p *TDorisExternalServiceGetNextResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3514,10 +3484,11 @@ ReadStructEndError: } func (p *TDorisExternalServiceGetNextResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTScanBatchResult_() - if err := p.Success.Read(iprot); err != nil { + _field := NewTScanBatchResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } @@ -3531,7 +3502,6 @@ func (p *TDorisExternalServiceGetNextResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3574,6 +3544,7 @@ func (p *TDorisExternalServiceGetNextResult) String() string { return "" } return fmt.Sprintf("TDorisExternalServiceGetNextResult(%+v)", *p) + } func (p *TDorisExternalServiceGetNextResult) DeepEqual(ano *TDorisExternalServiceGetNextResult) bool { @@ -3605,7 +3576,6 @@ func NewTDorisExternalServiceCloseScannerArgs() *TDorisExternalServiceCloseScann } func (p *TDorisExternalServiceCloseScannerArgs) InitDefault() { - *p = TDorisExternalServiceCloseScannerArgs{} } var TDorisExternalServiceCloseScannerArgs_Params_DEFAULT *TScanCloseParams @@ -3652,17 +3622,14 @@ func (p *TDorisExternalServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (er if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3688,10 +3655,11 @@ ReadStructEndError: } func (p *TDorisExternalServiceCloseScannerArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTScanCloseParams() - if err := p.Params.Read(iprot); err != nil { + _field := NewTScanCloseParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } @@ -3705,7 +3673,6 @@ func (p *TDorisExternalServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3746,6 +3713,7 @@ func (p *TDorisExternalServiceCloseScannerArgs) String() string { return "" } return fmt.Sprintf("TDorisExternalServiceCloseScannerArgs(%+v)", *p) + } func (p *TDorisExternalServiceCloseScannerArgs) DeepEqual(ano *TDorisExternalServiceCloseScannerArgs) bool { @@ -3777,7 +3745,6 @@ func NewTDorisExternalServiceCloseScannerResult() *TDorisExternalServiceCloseSca } func (p *TDorisExternalServiceCloseScannerResult) InitDefault() { - *p = TDorisExternalServiceCloseScannerResult{} } var TDorisExternalServiceCloseScannerResult_Success_DEFAULT *TScanCloseResult_ @@ -3824,17 +3791,14 @@ func (p *TDorisExternalServiceCloseScannerResult) Read(iprot thrift.TProtocol) ( if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3860,10 +3824,11 @@ ReadStructEndError: } func (p *TDorisExternalServiceCloseScannerResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTScanCloseResult_() - if err := p.Success.Read(iprot); err != nil { + _field := NewTScanCloseResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } @@ -3877,7 +3842,6 @@ func (p *TDorisExternalServiceCloseScannerResult) Write(oprot thrift.TProtocol) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3920,6 +3884,7 @@ func (p *TDorisExternalServiceCloseScannerResult) String() string { return "" } return fmt.Sprintf("TDorisExternalServiceCloseScannerResult(%+v)", *p) + } func (p *TDorisExternalServiceCloseScannerResult) DeepEqual(ano *TDorisExternalServiceCloseScannerResult) bool { diff --git a/pkg/rpc/kitex_gen/dorisexternalservice/k-DorisExternalService.go b/pkg/rpc/kitex_gen/dorisexternalservice/k-DorisExternalService.go index 7e6159b9..3230f3b8 100644 --- a/pkg/rpc/kitex_gen/dorisexternalservice/k-DorisExternalService.go +++ b/pkg/rpc/kitex_gen/dorisexternalservice/k-DorisExternalService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package dorisexternalservice @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" ) diff --git a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/client.go b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/client.go index 7b97df5a..7ccaa340 100644 --- a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/client.go +++ b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/client.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package tdorisexternalservice diff --git a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/invoker.go b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/invoker.go index f5f30df9..f72e67ff 100644 --- a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/invoker.go +++ b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/invoker.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package tdorisexternalservice diff --git a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/server.go b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/server.go index 0990ec66..317c0a10 100644 --- a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/server.go +++ b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/server.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package tdorisexternalservice import ( diff --git a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/tdorisexternalservice.go b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/tdorisexternalservice.go index d65534c9..1ddd6a08 100644 --- a/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/tdorisexternalservice.go +++ b/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/tdorisexternalservice.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package tdorisexternalservice @@ -24,14 +24,15 @@ func NewServiceInfo() *kitex.ServiceInfo { "close_scanner": kitex.NewMethodInfo(closeScannerHandler, newTDorisExternalServiceCloseScannerArgs, newTDorisExternalServiceCloseScannerResult, false), } extra := map[string]interface{}{ - "PackageName": "dorisexternalservice", + "PackageName": "dorisexternalservice", + "ServiceFilePath": `thrift/DorisExternalService.thrift`, } svcInfo := &kitex.ServiceInfo{ ServiceName: serviceName, HandlerType: handlerType, Methods: methods, PayloadCodec: kitex.Thrift, - KiteXGenVersion: "v0.4.4", + KiteXGenVersion: "v0.8.0", Extra: extra, } return svcInfo diff --git a/pkg/rpc/kitex_gen/exprs/Exprs.go b/pkg/rpc/kitex_gen/exprs/Exprs.go index 0bd31c3b..6762676a 100644 --- a/pkg/rpc/kitex_gen/exprs/Exprs.go +++ b/pkg/rpc/kitex_gen/exprs/Exprs.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package exprs @@ -49,6 +49,10 @@ const ( TExprNodeType_LAMBDA_FUNCTION_EXPR TExprNodeType = 31 TExprNodeType_LAMBDA_FUNCTION_CALL_EXPR TExprNodeType = 32 TExprNodeType_COLUMN_REF TExprNodeType = 33 + TExprNodeType_IPV4_LITERAL TExprNodeType = 34 + TExprNodeType_IPV6_LITERAL TExprNodeType = 35 + TExprNodeType_NULL_AWARE_IN_PRED TExprNodeType = 36 + TExprNodeType_NULL_AWARE_BINARY_PRED TExprNodeType = 37 ) func (p TExprNodeType) String() string { @@ -121,6 +125,14 @@ func (p TExprNodeType) String() string { return "LAMBDA_FUNCTION_CALL_EXPR" case TExprNodeType_COLUMN_REF: return "COLUMN_REF" + case TExprNodeType_IPV4_LITERAL: + return "IPV4_LITERAL" + case TExprNodeType_IPV6_LITERAL: + return "IPV6_LITERAL" + case TExprNodeType_NULL_AWARE_IN_PRED: + return "NULL_AWARE_IN_PRED" + case TExprNodeType_NULL_AWARE_BINARY_PRED: + return "NULL_AWARE_BINARY_PRED" } return "" } @@ -195,6 +207,14 @@ func TExprNodeTypeFromString(s string) (TExprNodeType, error) { return TExprNodeType_LAMBDA_FUNCTION_CALL_EXPR, nil case "COLUMN_REF": return TExprNodeType_COLUMN_REF, nil + case "IPV4_LITERAL": + return TExprNodeType_IPV4_LITERAL, nil + case "IPV6_LITERAL": + return TExprNodeType_IPV6_LITERAL, nil + case "NULL_AWARE_IN_PRED": + return TExprNodeType_NULL_AWARE_IN_PRED, nil + case "NULL_AWARE_BINARY_PRED": + return TExprNodeType_NULL_AWARE_BINARY_PRED, nil } return TExprNodeType(0), fmt.Errorf("not a valid TExprNodeType string") } @@ -266,7 +286,6 @@ func NewTAggregateExpr() *TAggregateExpr { } func (p *TAggregateExpr) InitDefault() { - *p = TAggregateExpr{} } func (p *TAggregateExpr) GetIsMergeAgg() (v bool) { @@ -323,27 +342,22 @@ func (p *TAggregateExpr) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsMergeAgg = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -375,31 +389,37 @@ RequiredFieldNotSetError: } func (p *TAggregateExpr) ReadField1(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsMergeAgg = v + _field = v } + p.IsMergeAgg = _field return nil } - func (p *TAggregateExpr) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ParamTypes = make([]*types.TTypeDesc, 0, size) + _field := make([]*types.TTypeDesc, 0, size) + values := make([]types.TTypeDesc, size) for i := 0; i < size; i++ { - _elem := types.NewTTypeDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ParamTypes = append(p.ParamTypes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ParamTypes = _field return nil } @@ -417,7 +437,6 @@ func (p *TAggregateExpr) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -485,6 +504,7 @@ func (p *TAggregateExpr) String() string { return "" } return fmt.Sprintf("TAggregateExpr(%+v)", *p) + } func (p *TAggregateExpr) DeepEqual(ano *TAggregateExpr) bool { @@ -532,7 +552,6 @@ func NewTBoolLiteral() *TBoolLiteral { } func (p *TBoolLiteral) InitDefault() { - *p = TBoolLiteral{} } func (p *TBoolLiteral) GetValue() (v bool) { @@ -572,17 +591,14 @@ func (p *TBoolLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -614,11 +630,14 @@ RequiredFieldNotSetError: } func (p *TBoolLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -632,7 +651,6 @@ func (p *TBoolLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -673,6 +691,7 @@ func (p *TBoolLiteral) String() string { return "" } return fmt.Sprintf("TBoolLiteral(%+v)", *p) + } func (p *TBoolLiteral) DeepEqual(ano *TBoolLiteral) bool { @@ -705,7 +724,6 @@ func NewTCaseExpr() *TCaseExpr { } func (p *TCaseExpr) InitDefault() { - *p = TCaseExpr{} } func (p *TCaseExpr) GetHasCaseExpr() (v bool) { @@ -754,10 +772,8 @@ func (p *TCaseExpr) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHasCaseExpr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { @@ -765,17 +781,14 @@ func (p *TCaseExpr) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHasElseExpr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -812,20 +825,25 @@ RequiredFieldNotSetError: } func (p *TCaseExpr) ReadField1(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasCaseExpr = v + _field = v } + p.HasCaseExpr = _field return nil } - func (p *TCaseExpr) ReadField2(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasElseExpr = v + _field = v } + p.HasElseExpr = _field return nil } @@ -843,7 +861,6 @@ func (p *TCaseExpr) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -901,6 +918,7 @@ func (p *TCaseExpr) String() string { return "" } return fmt.Sprintf("TCaseExpr(%+v)", *p) + } func (p *TCaseExpr) DeepEqual(ano *TCaseExpr) bool { @@ -942,7 +960,6 @@ func NewTDateLiteral() *TDateLiteral { } func (p *TDateLiteral) InitDefault() { - *p = TDateLiteral{} } func (p *TDateLiteral) GetValue() (v string) { @@ -982,17 +999,14 @@ func (p *TDateLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1024,11 +1038,14 @@ RequiredFieldNotSetError: } func (p *TDateLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -1042,7 +1059,6 @@ func (p *TDateLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1083,6 +1099,7 @@ func (p *TDateLiteral) String() string { return "" } return fmt.Sprintf("TDateLiteral(%+v)", *p) + } func (p *TDateLiteral) DeepEqual(ano *TDateLiteral) bool { @@ -1114,7 +1131,6 @@ func NewTFloatLiteral() *TFloatLiteral { } func (p *TFloatLiteral) InitDefault() { - *p = TFloatLiteral{} } func (p *TFloatLiteral) GetValue() (v float64) { @@ -1154,17 +1170,14 @@ func (p *TFloatLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1196,11 +1209,14 @@ RequiredFieldNotSetError: } func (p *TFloatLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field float64 if v, err := iprot.ReadDouble(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -1214,7 +1230,6 @@ func (p *TFloatLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1255,6 +1270,7 @@ func (p *TFloatLiteral) String() string { return "" } return fmt.Sprintf("TFloatLiteral(%+v)", *p) + } func (p *TFloatLiteral) DeepEqual(ano *TFloatLiteral) bool { @@ -1286,7 +1302,6 @@ func NewTDecimalLiteral() *TDecimalLiteral { } func (p *TDecimalLiteral) InitDefault() { - *p = TDecimalLiteral{} } func (p *TDecimalLiteral) GetValue() (v string) { @@ -1326,17 +1341,14 @@ func (p *TDecimalLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1368,11 +1380,14 @@ RequiredFieldNotSetError: } func (p *TDecimalLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -1386,7 +1401,6 @@ func (p *TDecimalLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1427,6 +1441,7 @@ func (p *TDecimalLiteral) String() string { return "" } return fmt.Sprintf("TDecimalLiteral(%+v)", *p) + } func (p *TDecimalLiteral) DeepEqual(ano *TDecimalLiteral) bool { @@ -1458,7 +1473,6 @@ func NewTIntLiteral() *TIntLiteral { } func (p *TIntLiteral) InitDefault() { - *p = TIntLiteral{} } func (p *TIntLiteral) GetValue() (v int64) { @@ -1498,17 +1512,14 @@ func (p *TIntLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1540,11 +1551,14 @@ RequiredFieldNotSetError: } func (p *TIntLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -1558,7 +1572,6 @@ func (p *TIntLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1599,6 +1612,7 @@ func (p *TIntLiteral) String() string { return "" } return fmt.Sprintf("TIntLiteral(%+v)", *p) + } func (p *TIntLiteral) DeepEqual(ano *TIntLiteral) bool { @@ -1630,7 +1644,6 @@ func NewTLargeIntLiteral() *TLargeIntLiteral { } func (p *TLargeIntLiteral) InitDefault() { - *p = TLargeIntLiteral{} } func (p *TLargeIntLiteral) GetValue() (v string) { @@ -1670,17 +1683,14 @@ func (p *TLargeIntLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1712,11 +1722,14 @@ RequiredFieldNotSetError: } func (p *TLargeIntLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -1730,7 +1743,6 @@ func (p *TLargeIntLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1771,6 +1783,7 @@ func (p *TLargeIntLiteral) String() string { return "" } return fmt.Sprintf("TLargeIntLiteral(%+v)", *p) + } func (p *TLargeIntLiteral) DeepEqual(ano *TLargeIntLiteral) bool { @@ -1793,34 +1806,33 @@ func (p *TLargeIntLiteral) Field1DeepEqual(src string) bool { return true } -type TInPredicate struct { - IsNotIn bool `thrift:"is_not_in,1,required" frugal:"1,required,bool" json:"is_not_in"` +type TIPv4Literal struct { + Value int64 `thrift:"value,1,required" frugal:"1,required,i64" json:"value"` } -func NewTInPredicate() *TInPredicate { - return &TInPredicate{} +func NewTIPv4Literal() *TIPv4Literal { + return &TIPv4Literal{} } -func (p *TInPredicate) InitDefault() { - *p = TInPredicate{} +func (p *TIPv4Literal) InitDefault() { } -func (p *TInPredicate) GetIsNotIn() (v bool) { - return p.IsNotIn +func (p *TIPv4Literal) GetValue() (v int64) { + return p.Value } -func (p *TInPredicate) SetIsNotIn(val bool) { - p.IsNotIn = val +func (p *TIPv4Literal) SetValue(val int64) { + p.Value = val } -var fieldIDToName_TInPredicate = map[int16]string{ - 1: "is_not_in", +var fieldIDToName_TIPv4Literal = map[int16]string{ + 1: "value", } -func (p *TInPredicate) Read(iprot thrift.TProtocol) (err error) { +func (p *TIPv4Literal) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetIsNotIn bool = false + var issetValue bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -1837,22 +1849,19 @@ func (p *TInPredicate) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetIsNotIn = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetValue = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1861,7 +1870,7 @@ func (p *TInPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetIsNotIn { + if !issetValue { fieldId = 1 goto RequiredFieldNotSetError } @@ -1871,7 +1880,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInPredicate[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIPv4Literal[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -1880,21 +1889,24 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TInPredicate[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIPv4Literal[fieldId])) } -func (p *TInPredicate) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TIPv4Literal) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IsNotIn = v + _field = v } + p.Value = _field return nil } -func (p *TInPredicate) Write(oprot thrift.TProtocol) (err error) { +func (p *TIPv4Literal) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TInPredicate"); err != nil { + if err = oprot.WriteStructBegin("TIPv4Literal"); err != nil { goto WriteStructBeginError } if p != nil { @@ -1902,7 +1914,6 @@ func (p *TInPredicate) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1921,11 +1932,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TInPredicate) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("is_not_in", thrift.BOOL, 1); err != nil { +func (p *TIPv4Literal) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("value", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(p.IsNotIn); err != nil { + if err := oprot.WriteI64(p.Value); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -1938,61 +1949,61 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TInPredicate) String() string { +func (p *TIPv4Literal) String() string { if p == nil { return "" } - return fmt.Sprintf("TInPredicate(%+v)", *p) + return fmt.Sprintf("TIPv4Literal(%+v)", *p) + } -func (p *TInPredicate) DeepEqual(ano *TInPredicate) bool { +func (p *TIPv4Literal) DeepEqual(ano *TIPv4Literal) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.IsNotIn) { + if !p.Field1DeepEqual(ano.Value) { return false } return true } -func (p *TInPredicate) Field1DeepEqual(src bool) bool { +func (p *TIPv4Literal) Field1DeepEqual(src int64) bool { - if p.IsNotIn != src { + if p.Value != src { return false } return true } -type TIsNullPredicate struct { - IsNotNull bool `thrift:"is_not_null,1,required" frugal:"1,required,bool" json:"is_not_null"` +type TIPv6Literal struct { + Value string `thrift:"value,1,required" frugal:"1,required,string" json:"value"` } -func NewTIsNullPredicate() *TIsNullPredicate { - return &TIsNullPredicate{} +func NewTIPv6Literal() *TIPv6Literal { + return &TIPv6Literal{} } -func (p *TIsNullPredicate) InitDefault() { - *p = TIsNullPredicate{} +func (p *TIPv6Literal) InitDefault() { } -func (p *TIsNullPredicate) GetIsNotNull() (v bool) { - return p.IsNotNull +func (p *TIPv6Literal) GetValue() (v string) { + return p.Value } -func (p *TIsNullPredicate) SetIsNotNull(val bool) { - p.IsNotNull = val +func (p *TIPv6Literal) SetValue(val string) { + p.Value = val } -var fieldIDToName_TIsNullPredicate = map[int16]string{ - 1: "is_not_null", +var fieldIDToName_TIPv6Literal = map[int16]string{ + 1: "value", } -func (p *TIsNullPredicate) Read(iprot thrift.TProtocol) (err error) { +func (p *TIPv6Literal) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetIsNotNull bool = false + var issetValue bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -2009,22 +2020,19 @@ func (p *TIsNullPredicate) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetIsNotNull = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetValue = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2033,7 +2041,7 @@ func (p *TIsNullPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetIsNotNull { + if !issetValue { fieldId = 1 goto RequiredFieldNotSetError } @@ -2043,7 +2051,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIsNullPredicate[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIPv6Literal[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -2052,21 +2060,24 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIsNullPredicate[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIPv6Literal[fieldId])) } -func (p *TIsNullPredicate) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TIPv6Literal) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.IsNotNull = v + _field = v } + p.Value = _field return nil } -func (p *TIsNullPredicate) Write(oprot thrift.TProtocol) (err error) { +func (p *TIPv6Literal) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TIsNullPredicate"); err != nil { + if err = oprot.WriteStructBegin("TIPv6Literal"); err != nil { goto WriteStructBeginError } if p != nil { @@ -2074,7 +2085,6 @@ func (p *TIsNullPredicate) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2093,11 +2103,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TIsNullPredicate) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("is_not_null", thrift.BOOL, 1); err != nil { +func (p *TIPv6Literal) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("value", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(p.IsNotNull); err != nil { + if err := oprot.WriteString(p.Value); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -2110,61 +2120,61 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TIsNullPredicate) String() string { +func (p *TIPv6Literal) String() string { if p == nil { return "" } - return fmt.Sprintf("TIsNullPredicate(%+v)", *p) + return fmt.Sprintf("TIPv6Literal(%+v)", *p) + } -func (p *TIsNullPredicate) DeepEqual(ano *TIsNullPredicate) bool { +func (p *TIPv6Literal) DeepEqual(ano *TIPv6Literal) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.IsNotNull) { + if !p.Field1DeepEqual(ano.Value) { return false } return true } -func (p *TIsNullPredicate) Field1DeepEqual(src bool) bool { +func (p *TIPv6Literal) Field1DeepEqual(src string) bool { - if p.IsNotNull != src { + if strings.Compare(p.Value, src) != 0 { return false } return true } -type TLikePredicate struct { - EscapeChar string `thrift:"escape_char,1,required" frugal:"1,required,string" json:"escape_char"` +type TInPredicate struct { + IsNotIn bool `thrift:"is_not_in,1,required" frugal:"1,required,bool" json:"is_not_in"` } -func NewTLikePredicate() *TLikePredicate { - return &TLikePredicate{} +func NewTInPredicate() *TInPredicate { + return &TInPredicate{} } -func (p *TLikePredicate) InitDefault() { - *p = TLikePredicate{} +func (p *TInPredicate) InitDefault() { } -func (p *TLikePredicate) GetEscapeChar() (v string) { - return p.EscapeChar +func (p *TInPredicate) GetIsNotIn() (v bool) { + return p.IsNotIn } -func (p *TLikePredicate) SetEscapeChar(val string) { - p.EscapeChar = val +func (p *TInPredicate) SetIsNotIn(val bool) { + p.IsNotIn = val } -var fieldIDToName_TLikePredicate = map[int16]string{ - 1: "escape_char", +var fieldIDToName_TInPredicate = map[int16]string{ + 1: "is_not_in", } -func (p *TLikePredicate) Read(iprot thrift.TProtocol) (err error) { +func (p *TInPredicate) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetEscapeChar bool = false + var issetIsNotIn bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -2181,22 +2191,19 @@ func (p *TLikePredicate) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetEscapeChar = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetIsNotIn = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2205,7 +2212,7 @@ func (p *TLikePredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetEscapeChar { + if !issetIsNotIn { fieldId = 1 goto RequiredFieldNotSetError } @@ -2215,7 +2222,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLikePredicate[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInPredicate[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -2224,21 +2231,24 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLikePredicate[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TInPredicate[fieldId])) } -func (p *TLikePredicate) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TInPredicate) ReadField1(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.EscapeChar = v + _field = v } + p.IsNotIn = _field return nil } -func (p *TLikePredicate) Write(oprot thrift.TProtocol) (err error) { +func (p *TInPredicate) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLikePredicate"); err != nil { + if err = oprot.WriteStructBegin("TInPredicate"); err != nil { goto WriteStructBeginError } if p != nil { @@ -2246,7 +2256,6 @@ func (p *TLikePredicate) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2265,11 +2274,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLikePredicate) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("escape_char", thrift.STRING, 1); err != nil { +func (p *TInPredicate) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("is_not_in", thrift.BOOL, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.EscapeChar); err != nil { + if err := oprot.WriteBool(p.IsNotIn); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -2282,63 +2291,431 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLikePredicate) String() string { +func (p *TInPredicate) String() string { if p == nil { return "" } - return fmt.Sprintf("TLikePredicate(%+v)", *p) + return fmt.Sprintf("TInPredicate(%+v)", *p) + } -func (p *TLikePredicate) DeepEqual(ano *TLikePredicate) bool { +func (p *TInPredicate) DeepEqual(ano *TInPredicate) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.EscapeChar) { + if !p.Field1DeepEqual(ano.IsNotIn) { return false } return true } -func (p *TLikePredicate) Field1DeepEqual(src string) bool { +func (p *TInPredicate) Field1DeepEqual(src bool) bool { - if strings.Compare(p.EscapeChar, src) != 0 { + if p.IsNotIn != src { return false } return true } -type TMatchPredicate struct { - ParserType string `thrift:"parser_type,1,required" frugal:"1,required,string" json:"parser_type"` - ParserMode string `thrift:"parser_mode,2,required" frugal:"2,required,string" json:"parser_mode"` - CharFilterMap map[string]string `thrift:"char_filter_map,3,optional" frugal:"3,optional,map" json:"char_filter_map,omitempty"` +type TIsNullPredicate struct { + IsNotNull bool `thrift:"is_not_null,1,required" frugal:"1,required,bool" json:"is_not_null"` } -func NewTMatchPredicate() *TMatchPredicate { - return &TMatchPredicate{} +func NewTIsNullPredicate() *TIsNullPredicate { + return &TIsNullPredicate{} } -func (p *TMatchPredicate) InitDefault() { - *p = TMatchPredicate{} +func (p *TIsNullPredicate) InitDefault() { } -func (p *TMatchPredicate) GetParserType() (v string) { - return p.ParserType +func (p *TIsNullPredicate) GetIsNotNull() (v bool) { + return p.IsNotNull +} +func (p *TIsNullPredicate) SetIsNotNull(val bool) { + p.IsNotNull = val } -func (p *TMatchPredicate) GetParserMode() (v string) { - return p.ParserMode +var fieldIDToName_TIsNullPredicate = map[int16]string{ + 1: "is_not_null", } -var TMatchPredicate_CharFilterMap_DEFAULT map[string]string +func (p *TIsNullPredicate) Read(iprot thrift.TProtocol) (err error) { -func (p *TMatchPredicate) GetCharFilterMap() (v map[string]string) { - if !p.IsSetCharFilterMap() { - return TMatchPredicate_CharFilterMap_DEFAULT + var fieldTypeId thrift.TType + var fieldId int16 + var issetIsNotNull bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return p.CharFilterMap -} + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetIsNotNull = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetIsNotNull { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIsNullPredicate[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIsNullPredicate[fieldId])) +} + +func (p *TIsNullPredicate) ReadField1(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsNotNull = _field + return nil +} + +func (p *TIsNullPredicate) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TIsNullPredicate"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TIsNullPredicate) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("is_not_null", thrift.BOOL, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsNotNull); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TIsNullPredicate) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TIsNullPredicate(%+v)", *p) + +} + +func (p *TIsNullPredicate) DeepEqual(ano *TIsNullPredicate) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.IsNotNull) { + return false + } + return true +} + +func (p *TIsNullPredicate) Field1DeepEqual(src bool) bool { + + if p.IsNotNull != src { + return false + } + return true +} + +type TLikePredicate struct { + EscapeChar string `thrift:"escape_char,1,required" frugal:"1,required,string" json:"escape_char"` +} + +func NewTLikePredicate() *TLikePredicate { + return &TLikePredicate{} +} + +func (p *TLikePredicate) InitDefault() { +} + +func (p *TLikePredicate) GetEscapeChar() (v string) { + return p.EscapeChar +} +func (p *TLikePredicate) SetEscapeChar(val string) { + p.EscapeChar = val +} + +var fieldIDToName_TLikePredicate = map[int16]string{ + 1: "escape_char", +} + +func (p *TLikePredicate) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetEscapeChar bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetEscapeChar = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetEscapeChar { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLikePredicate[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLikePredicate[fieldId])) +} + +func (p *TLikePredicate) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.EscapeChar = _field + return nil +} + +func (p *TLikePredicate) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLikePredicate"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLikePredicate) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("escape_char", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.EscapeChar); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLikePredicate) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLikePredicate(%+v)", *p) + +} + +func (p *TLikePredicate) DeepEqual(ano *TLikePredicate) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.EscapeChar) { + return false + } + return true +} + +func (p *TLikePredicate) Field1DeepEqual(src string) bool { + + if strings.Compare(p.EscapeChar, src) != 0 { + return false + } + return true +} + +type TMatchPredicate struct { + ParserType string `thrift:"parser_type,1,required" frugal:"1,required,string" json:"parser_type"` + ParserMode string `thrift:"parser_mode,2,required" frugal:"2,required,string" json:"parser_mode"` + CharFilterMap map[string]string `thrift:"char_filter_map,3,optional" frugal:"3,optional,map" json:"char_filter_map,omitempty"` + ParserLowercase bool `thrift:"parser_lowercase,4,optional" frugal:"4,optional,bool" json:"parser_lowercase,omitempty"` + ParserStopwords string `thrift:"parser_stopwords,5,optional" frugal:"5,optional,string" json:"parser_stopwords,omitempty"` +} + +func NewTMatchPredicate() *TMatchPredicate { + return &TMatchPredicate{ + + ParserLowercase: true, + ParserStopwords: "", + } +} + +func (p *TMatchPredicate) InitDefault() { + p.ParserLowercase = true + p.ParserStopwords = "" +} + +func (p *TMatchPredicate) GetParserType() (v string) { + return p.ParserType +} + +func (p *TMatchPredicate) GetParserMode() (v string) { + return p.ParserMode +} + +var TMatchPredicate_CharFilterMap_DEFAULT map[string]string + +func (p *TMatchPredicate) GetCharFilterMap() (v map[string]string) { + if !p.IsSetCharFilterMap() { + return TMatchPredicate_CharFilterMap_DEFAULT + } + return p.CharFilterMap +} + +var TMatchPredicate_ParserLowercase_DEFAULT bool = true + +func (p *TMatchPredicate) GetParserLowercase() (v bool) { + if !p.IsSetParserLowercase() { + return TMatchPredicate_ParserLowercase_DEFAULT + } + return p.ParserLowercase +} + +var TMatchPredicate_ParserStopwords_DEFAULT string = "" + +func (p *TMatchPredicate) GetParserStopwords() (v string) { + if !p.IsSetParserStopwords() { + return TMatchPredicate_ParserStopwords_DEFAULT + } + return p.ParserStopwords +} func (p *TMatchPredicate) SetParserType(val string) { p.ParserType = val } @@ -2348,17 +2725,33 @@ func (p *TMatchPredicate) SetParserMode(val string) { func (p *TMatchPredicate) SetCharFilterMap(val map[string]string) { p.CharFilterMap = val } +func (p *TMatchPredicate) SetParserLowercase(val bool) { + p.ParserLowercase = val +} +func (p *TMatchPredicate) SetParserStopwords(val string) { + p.ParserStopwords = val +} var fieldIDToName_TMatchPredicate = map[int16]string{ 1: "parser_type", 2: "parser_mode", 3: "char_filter_map", + 4: "parser_lowercase", + 5: "parser_stopwords", } func (p *TMatchPredicate) IsSetCharFilterMap() bool { return p.CharFilterMap != nil } +func (p *TMatchPredicate) IsSetParserLowercase() bool { + return p.ParserLowercase != TMatchPredicate_ParserLowercase_DEFAULT +} + +func (p *TMatchPredicate) IsSetParserStopwords() bool { + return p.ParserStopwords != TMatchPredicate_ParserStopwords_DEFAULT +} + func (p *TMatchPredicate) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -2386,10 +2779,8 @@ func (p *TMatchPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetParserType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -2397,27 +2788,38 @@ func (p *TMatchPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetParserMode = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2454,29 +2856,33 @@ RequiredFieldNotSetError: } func (p *TMatchPredicate) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ParserType = v + _field = v } + p.ParserType = _field return nil } - func (p *TMatchPredicate) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ParserMode = v + _field = v } + p.ParserMode = _field return nil } - func (p *TMatchPredicate) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.CharFilterMap = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -2492,11 +2898,34 @@ func (p *TMatchPredicate) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.CharFilterMap[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.CharFilterMap = _field + return nil +} +func (p *TMatchPredicate) ReadField4(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.ParserLowercase = _field + return nil +} +func (p *TMatchPredicate) ReadField5(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.ParserStopwords = _field return nil } @@ -2518,7 +2947,14 @@ func (p *TMatchPredicate) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2580,11 +3016,9 @@ func (p *TMatchPredicate) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.CharFilterMap { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -2603,11 +3037,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TMatchPredicate) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetParserLowercase() { + if err = oprot.WriteFieldBegin("parser_lowercase", thrift.BOOL, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.ParserLowercase); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TMatchPredicate) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetParserStopwords() { + if err = oprot.WriteFieldBegin("parser_stopwords", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.ParserStopwords); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + func (p *TMatchPredicate) String() string { if p == nil { return "" } return fmt.Sprintf("TMatchPredicate(%+v)", *p) + } func (p *TMatchPredicate) DeepEqual(ano *TMatchPredicate) bool { @@ -2625,6 +3098,12 @@ func (p *TMatchPredicate) DeepEqual(ano *TMatchPredicate) bool { if !p.Field3DeepEqual(ano.CharFilterMap) { return false } + if !p.Field4DeepEqual(ano.ParserLowercase) { + return false + } + if !p.Field5DeepEqual(ano.ParserStopwords) { + return false + } return true } @@ -2655,6 +3134,20 @@ func (p *TMatchPredicate) Field3DeepEqual(src map[string]string) bool { } return true } +func (p *TMatchPredicate) Field4DeepEqual(src bool) bool { + + if p.ParserLowercase != src { + return false + } + return true +} +func (p *TMatchPredicate) Field5DeepEqual(src string) bool { + + if strings.Compare(p.ParserStopwords, src) != 0 { + return false + } + return true +} type TLiteralPredicate struct { Value bool `thrift:"value,1,required" frugal:"1,required,bool" json:"value"` @@ -2666,7 +3159,6 @@ func NewTLiteralPredicate() *TLiteralPredicate { } func (p *TLiteralPredicate) InitDefault() { - *p = TLiteralPredicate{} } func (p *TLiteralPredicate) GetValue() (v bool) { @@ -2715,10 +3207,8 @@ func (p *TLiteralPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { @@ -2726,17 +3216,14 @@ func (p *TLiteralPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsNull = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2773,20 +3260,25 @@ RequiredFieldNotSetError: } func (p *TLiteralPredicate) ReadField1(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } - func (p *TLiteralPredicate) ReadField2(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsNull = v + _field = v } + p.IsNull = _field return nil } @@ -2804,7 +3296,6 @@ func (p *TLiteralPredicate) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2862,6 +3353,7 @@ func (p *TLiteralPredicate) String() string { return "" } return fmt.Sprintf("TLiteralPredicate(%+v)", *p) + } func (p *TLiteralPredicate) DeepEqual(ano *TLiteralPredicate) bool { @@ -2904,7 +3396,6 @@ func NewTTupleIsNullPredicate() *TTupleIsNullPredicate { } func (p *TTupleIsNullPredicate) InitDefault() { - *p = TTupleIsNullPredicate{} } func (p *TTupleIsNullPredicate) GetTupleIds() (v []types.TTupleId) { @@ -2961,27 +3452,22 @@ func (p *TTupleIsNullPredicate) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3017,8 +3503,9 @@ func (p *TTupleIsNullPredicate) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.TupleIds = make([]types.TTupleId, 0, size) + _field := make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err @@ -3026,21 +3513,24 @@ func (p *TTupleIsNullPredicate) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.TupleIds = append(p.TupleIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TupleIds = _field return nil } - func (p *TTupleIsNullPredicate) ReadField2(iprot thrift.TProtocol) error { + + var _field *TNullSide if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TNullSide(v) - p.NullSide = &tmp + _field = &tmp } + p.NullSide = _field return nil } @@ -3058,7 +3548,6 @@ func (p *TTupleIsNullPredicate) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3126,6 +3615,7 @@ func (p *TTupleIsNullPredicate) String() string { return "" } return fmt.Sprintf("TTupleIsNullPredicate(%+v)", *p) + } func (p *TTupleIsNullPredicate) DeepEqual(ano *TTupleIsNullPredicate) bool { @@ -3180,7 +3670,6 @@ func NewTSlotRef() *TSlotRef { } func (p *TSlotRef) InitDefault() { - *p = TSlotRef{} } func (p *TSlotRef) GetSlotId() (v types.TSlotId) { @@ -3246,10 +3735,8 @@ func (p *TSlotRef) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSlotId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -3257,27 +3744,22 @@ func (p *TSlotRef) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3314,29 +3796,36 @@ RequiredFieldNotSetError: } func (p *TSlotRef) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SlotId = v + _field = v } + p.SlotId = _field return nil } - func (p *TSlotRef) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TSlotRef) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColUniqueId = &v + _field = &v } + p.ColUniqueId = _field return nil } @@ -3358,7 +3847,6 @@ func (p *TSlotRef) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3435,6 +3923,7 @@ func (p *TSlotRef) String() string { return "" } return fmt.Sprintf("TSlotRef(%+v)", *p) + } func (p *TSlotRef) DeepEqual(ano *TSlotRef) bool { @@ -3492,7 +3981,6 @@ func NewTColumnRef() *TColumnRef { } func (p *TColumnRef) InitDefault() { - *p = TColumnRef{} } var TColumnRef_ColumnId_DEFAULT types.TSlotId @@ -3556,27 +4044,22 @@ func (p *TColumnRef) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3602,20 +4085,25 @@ ReadStructEndError: } func (p *TColumnRef) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnId = &v + _field = &v } + p.ColumnId = _field return nil } - func (p *TColumnRef) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnName = &v + _field = &v } + p.ColumnName = _field return nil } @@ -3633,7 +4121,6 @@ func (p *TColumnRef) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3695,6 +4182,7 @@ func (p *TColumnRef) String() string { return "" } return fmt.Sprintf("TColumnRef(%+v)", *p) + } func (p *TColumnRef) DeepEqual(ano *TColumnRef) bool { @@ -3746,7 +4234,6 @@ func NewTStringLiteral() *TStringLiteral { } func (p *TStringLiteral) InitDefault() { - *p = TStringLiteral{} } func (p *TStringLiteral) GetValue() (v string) { @@ -3786,17 +4273,14 @@ func (p *TStringLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3828,11 +4312,14 @@ RequiredFieldNotSetError: } func (p *TStringLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -3846,7 +4333,6 @@ func (p *TStringLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3887,6 +4373,7 @@ func (p *TStringLiteral) String() string { return "" } return fmt.Sprintf("TStringLiteral(%+v)", *p) + } func (p *TStringLiteral) DeepEqual(ano *TStringLiteral) bool { @@ -3909,6 +4396,259 @@ func (p *TStringLiteral) Field1DeepEqual(src string) bool { return true } +type TNullableStringLiteral struct { + Value *string `thrift:"value,1,optional" frugal:"1,optional,string" json:"value,omitempty"` + IsNull bool `thrift:"is_null,2,optional" frugal:"2,optional,bool" json:"is_null,omitempty"` +} + +func NewTNullableStringLiteral() *TNullableStringLiteral { + return &TNullableStringLiteral{ + + IsNull: false, + } +} + +func (p *TNullableStringLiteral) InitDefault() { + p.IsNull = false +} + +var TNullableStringLiteral_Value_DEFAULT string + +func (p *TNullableStringLiteral) GetValue() (v string) { + if !p.IsSetValue() { + return TNullableStringLiteral_Value_DEFAULT + } + return *p.Value +} + +var TNullableStringLiteral_IsNull_DEFAULT bool = false + +func (p *TNullableStringLiteral) GetIsNull() (v bool) { + if !p.IsSetIsNull() { + return TNullableStringLiteral_IsNull_DEFAULT + } + return p.IsNull +} +func (p *TNullableStringLiteral) SetValue(val *string) { + p.Value = val +} +func (p *TNullableStringLiteral) SetIsNull(val bool) { + p.IsNull = val +} + +var fieldIDToName_TNullableStringLiteral = map[int16]string{ + 1: "value", + 2: "is_null", +} + +func (p *TNullableStringLiteral) IsSetValue() bool { + return p.Value != nil +} + +func (p *TNullableStringLiteral) IsSetIsNull() bool { + return p.IsNull != TNullableStringLiteral_IsNull_DEFAULT +} + +func (p *TNullableStringLiteral) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TNullableStringLiteral[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TNullableStringLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Value = _field + return nil +} +func (p *TNullableStringLiteral) ReadField2(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsNull = _field + return nil +} + +func (p *TNullableStringLiteral) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TNullableStringLiteral"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TNullableStringLiteral) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err = oprot.WriteFieldBegin("value", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Value); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TNullableStringLiteral) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNull() { + if err = oprot.WriteFieldBegin("is_null", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsNull); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TNullableStringLiteral) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TNullableStringLiteral(%+v)", *p) + +} + +func (p *TNullableStringLiteral) DeepEqual(ano *TNullableStringLiteral) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Value) { + return false + } + if !p.Field2DeepEqual(ano.IsNull) { + return false + } + return true +} + +func (p *TNullableStringLiteral) Field1DeepEqual(src *string) bool { + + if p.Value == src { + return true + } else if p.Value == nil || src == nil { + return false + } + if strings.Compare(*p.Value, *src) != 0 { + return false + } + return true +} +func (p *TNullableStringLiteral) Field2DeepEqual(src bool) bool { + + if p.IsNull != src { + return false + } + return true +} + type TJsonLiteral struct { Value string `thrift:"value,1,required" frugal:"1,required,string" json:"value"` } @@ -3918,7 +4658,6 @@ func NewTJsonLiteral() *TJsonLiteral { } func (p *TJsonLiteral) InitDefault() { - *p = TJsonLiteral{} } func (p *TJsonLiteral) GetValue() (v string) { @@ -3958,17 +4697,14 @@ func (p *TJsonLiteral) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4000,11 +4736,14 @@ RequiredFieldNotSetError: } func (p *TJsonLiteral) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -4018,7 +4757,6 @@ func (p *TJsonLiteral) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4059,6 +4797,7 @@ func (p *TJsonLiteral) String() string { return "" } return fmt.Sprintf("TJsonLiteral(%+v)", *p) + } func (p *TJsonLiteral) DeepEqual(ano *TJsonLiteral) bool { @@ -4091,7 +4830,6 @@ func NewTInfoFunc() *TInfoFunc { } func (p *TInfoFunc) InitDefault() { - *p = TInfoFunc{} } func (p *TInfoFunc) GetIntValue() (v int64) { @@ -4140,10 +4878,8 @@ func (p *TInfoFunc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIntValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -4151,17 +4887,14 @@ func (p *TInfoFunc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStrValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4198,20 +4931,25 @@ RequiredFieldNotSetError: } func (p *TInfoFunc) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IntValue = v + _field = v } + p.IntValue = _field return nil } - func (p *TInfoFunc) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.StrValue = v + _field = v } + p.StrValue = _field return nil } @@ -4229,7 +4967,6 @@ func (p *TInfoFunc) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4287,6 +5024,7 @@ func (p *TInfoFunc) String() string { return "" } return fmt.Sprintf("TInfoFunc(%+v)", *p) + } func (p *TInfoFunc) DeepEqual(ano *TInfoFunc) bool { @@ -4329,7 +5067,6 @@ func NewTFunctionCallExpr() *TFunctionCallExpr { } func (p *TFunctionCallExpr) InitDefault() { - *p = TFunctionCallExpr{} } var TFunctionCallExpr_Fn_DEFAULT *types.TFunction @@ -4395,27 +5132,22 @@ func (p *TFunctionCallExpr) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFn = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4447,19 +5179,22 @@ RequiredFieldNotSetError: } func (p *TFunctionCallExpr) ReadField1(iprot thrift.TProtocol) error { - p.Fn = types.NewTFunction() - if err := p.Fn.Read(iprot); err != nil { + _field := types.NewTFunction() + if err := _field.Read(iprot); err != nil { return err } + p.Fn = _field return nil } - func (p *TFunctionCallExpr) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VarargStartIdx = &v + _field = &v } + p.VarargStartIdx = _field return nil } @@ -4477,7 +5212,6 @@ func (p *TFunctionCallExpr) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4537,6 +5271,7 @@ func (p *TFunctionCallExpr) String() string { return "" } return fmt.Sprintf("TFunctionCallExpr(%+v)", *p) + } func (p *TFunctionCallExpr) DeepEqual(ano *TFunctionCallExpr) bool { @@ -4583,7 +5318,6 @@ func NewTSchemaChangeExpr() *TSchemaChangeExpr { } func (p *TSchemaChangeExpr) InitDefault() { - *p = TSchemaChangeExpr{} } var TSchemaChangeExpr_TableId_DEFAULT int64 @@ -4630,17 +5364,14 @@ func (p *TSchemaChangeExpr) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4666,11 +5397,14 @@ ReadStructEndError: } func (p *TSchemaChangeExpr) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = &v + _field = &v } + p.TableId = _field return nil } @@ -4684,7 +5418,6 @@ func (p *TSchemaChangeExpr) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4727,6 +5460,7 @@ func (p *TSchemaChangeExpr) String() string { return "" } return fmt.Sprintf("TSchemaChangeExpr(%+v)", *p) + } func (p *TSchemaChangeExpr) DeepEqual(ano *TSchemaChangeExpr) bool { @@ -4788,6 +5522,9 @@ type TExprNode struct { SchemaChangeExpr *TSchemaChangeExpr `thrift:"schema_change_expr,31,optional" frugal:"31,optional,TSchemaChangeExpr" json:"schema_change_expr,omitempty"` ColumnRef *TColumnRef `thrift:"column_ref,32,optional" frugal:"32,optional,TColumnRef" json:"column_ref,omitempty"` MatchPredicate *TMatchPredicate `thrift:"match_predicate,33,optional" frugal:"33,optional,TMatchPredicate" json:"match_predicate,omitempty"` + Ipv4Literal *TIPv4Literal `thrift:"ipv4_literal,34,optional" frugal:"34,optional,TIPv4Literal" json:"ipv4_literal,omitempty"` + Ipv6Literal *TIPv6Literal `thrift:"ipv6_literal,35,optional" frugal:"35,optional,TIPv6Literal" json:"ipv6_literal,omitempty"` + Label *string `thrift:"label,36,optional" frugal:"36,optional,string" json:"label,omitempty"` } func NewTExprNode() *TExprNode { @@ -4795,7 +5532,6 @@ func NewTExprNode() *TExprNode { } func (p *TExprNode) InitDefault() { - *p = TExprNode{} } func (p *TExprNode) GetNodeType() (v TExprNodeType) { @@ -5079,6 +5815,33 @@ func (p *TExprNode) GetMatchPredicate() (v *TMatchPredicate) { } return p.MatchPredicate } + +var TExprNode_Ipv4Literal_DEFAULT *TIPv4Literal + +func (p *TExprNode) GetIpv4Literal() (v *TIPv4Literal) { + if !p.IsSetIpv4Literal() { + return TExprNode_Ipv4Literal_DEFAULT + } + return p.Ipv4Literal +} + +var TExprNode_Ipv6Literal_DEFAULT *TIPv6Literal + +func (p *TExprNode) GetIpv6Literal() (v *TIPv6Literal) { + if !p.IsSetIpv6Literal() { + return TExprNode_Ipv6Literal_DEFAULT + } + return p.Ipv6Literal +} + +var TExprNode_Label_DEFAULT string + +func (p *TExprNode) GetLabel() (v string) { + if !p.IsSetLabel() { + return TExprNode_Label_DEFAULT + } + return *p.Label +} func (p *TExprNode) SetNodeType(val TExprNodeType) { p.NodeType = val } @@ -5178,6 +5941,15 @@ func (p *TExprNode) SetColumnRef(val *TColumnRef) { func (p *TExprNode) SetMatchPredicate(val *TMatchPredicate) { p.MatchPredicate = val } +func (p *TExprNode) SetIpv4Literal(val *TIPv4Literal) { + p.Ipv4Literal = val +} +func (p *TExprNode) SetIpv6Literal(val *TIPv6Literal) { + p.Ipv6Literal = val +} +func (p *TExprNode) SetLabel(val *string) { + p.Label = val +} var fieldIDToName_TExprNode = map[int16]string{ 1: "node_type", @@ -5213,6 +5985,9 @@ var fieldIDToName_TExprNode = map[int16]string{ 31: "schema_change_expr", 32: "column_ref", 33: "match_predicate", + 34: "ipv4_literal", + 35: "ipv6_literal", + 36: "label", } func (p *TExprNode) IsSetType() bool { @@ -5335,6 +6110,18 @@ func (p *TExprNode) IsSetMatchPredicate() bool { return p.MatchPredicate != nil } +func (p *TExprNode) IsSetIpv4Literal() bool { + return p.Ipv4Literal != nil +} + +func (p *TExprNode) IsSetIpv6Literal() bool { + return p.Ipv6Literal != nil +} + +func (p *TExprNode) IsSetLabel() bool { + return p.Label != nil +} + func (p *TExprNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -5364,10 +6151,8 @@ func (p *TExprNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodeType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -5375,20 +6160,16 @@ func (p *TExprNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -5396,160 +6177,128 @@ func (p *TExprNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumChildren = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRUCT { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRUCT { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRUCT { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.STRUCT { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.STRUCT { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.STRUCT { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.STRUCT { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.I32 { @@ -5557,147 +6306,142 @@ func (p *TExprNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOutputScale = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: if fieldTypeId == thrift.STRUCT { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 22: if fieldTypeId == thrift.STRUCT { if err = p.ReadField22(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 23: if fieldTypeId == thrift.I32 { if err = p.ReadField23(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 24: if fieldTypeId == thrift.STRUCT { if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 25: if fieldTypeId == thrift.I32 { if err = p.ReadField25(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 26: if fieldTypeId == thrift.STRUCT { if err = p.ReadField26(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 27: if fieldTypeId == thrift.I32 { if err = p.ReadField27(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 28: if fieldTypeId == thrift.I32 { if err = p.ReadField28(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 29: if fieldTypeId == thrift.BOOL { if err = p.ReadField29(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 30: if fieldTypeId == thrift.STRUCT { if err = p.ReadField30(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 31: if fieldTypeId == thrift.STRUCT { if err = p.ReadField31(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 32: if fieldTypeId == thrift.STRUCT { if err = p.ReadField32(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 33: if fieldTypeId == thrift.STRUCT { if err = p.ReadField33(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 34: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField34(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 35: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField35(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 36: + if fieldTypeId == thrift.STRING { + if err = p.ReadField36(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5744,278 +6488,324 @@ RequiredFieldNotSetError: } func (p *TExprNode) ReadField1(iprot thrift.TProtocol) error { + + var _field TExprNodeType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NodeType = TExprNodeType(v) + _field = TExprNodeType(v) } + p.NodeType = _field return nil } - func (p *TExprNode) ReadField2(iprot thrift.TProtocol) error { - p.Type = types.NewTTypeDesc() - if err := p.Type.Read(iprot); err != nil { + _field := types.NewTTypeDesc() + if err := _field.Read(iprot); err != nil { return err } + p.Type = _field return nil } - func (p *TExprNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *opcodes.TExprOpcode if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := opcodes.TExprOpcode(v) - p.Opcode = &tmp + _field = &tmp } + p.Opcode = _field return nil } - func (p *TExprNode) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumChildren = v + _field = v } + p.NumChildren = _field return nil } - func (p *TExprNode) ReadField5(iprot thrift.TProtocol) error { - p.AggExpr = NewTAggregateExpr() - if err := p.AggExpr.Read(iprot); err != nil { + _field := NewTAggregateExpr() + if err := _field.Read(iprot); err != nil { return err } + p.AggExpr = _field return nil } - func (p *TExprNode) ReadField6(iprot thrift.TProtocol) error { - p.BoolLiteral = NewTBoolLiteral() - if err := p.BoolLiteral.Read(iprot); err != nil { + _field := NewTBoolLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.BoolLiteral = _field return nil } - func (p *TExprNode) ReadField7(iprot thrift.TProtocol) error { - p.CaseExpr = NewTCaseExpr() - if err := p.CaseExpr.Read(iprot); err != nil { + _field := NewTCaseExpr() + if err := _field.Read(iprot); err != nil { return err } + p.CaseExpr = _field return nil } - func (p *TExprNode) ReadField8(iprot thrift.TProtocol) error { - p.DateLiteral = NewTDateLiteral() - if err := p.DateLiteral.Read(iprot); err != nil { + _field := NewTDateLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.DateLiteral = _field return nil } - func (p *TExprNode) ReadField9(iprot thrift.TProtocol) error { - p.FloatLiteral = NewTFloatLiteral() - if err := p.FloatLiteral.Read(iprot); err != nil { + _field := NewTFloatLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.FloatLiteral = _field return nil } - func (p *TExprNode) ReadField10(iprot thrift.TProtocol) error { - p.IntLiteral = NewTIntLiteral() - if err := p.IntLiteral.Read(iprot); err != nil { + _field := NewTIntLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.IntLiteral = _field return nil } - func (p *TExprNode) ReadField11(iprot thrift.TProtocol) error { - p.InPredicate = NewTInPredicate() - if err := p.InPredicate.Read(iprot); err != nil { + _field := NewTInPredicate() + if err := _field.Read(iprot); err != nil { return err } + p.InPredicate = _field return nil } - func (p *TExprNode) ReadField12(iprot thrift.TProtocol) error { - p.IsNullPred = NewTIsNullPredicate() - if err := p.IsNullPred.Read(iprot); err != nil { + _field := NewTIsNullPredicate() + if err := _field.Read(iprot); err != nil { return err } + p.IsNullPred = _field return nil } - func (p *TExprNode) ReadField13(iprot thrift.TProtocol) error { - p.LikePred = NewTLikePredicate() - if err := p.LikePred.Read(iprot); err != nil { + _field := NewTLikePredicate() + if err := _field.Read(iprot); err != nil { return err } + p.LikePred = _field return nil } - func (p *TExprNode) ReadField14(iprot thrift.TProtocol) error { - p.LiteralPred = NewTLiteralPredicate() - if err := p.LiteralPred.Read(iprot); err != nil { + _field := NewTLiteralPredicate() + if err := _field.Read(iprot); err != nil { return err } + p.LiteralPred = _field return nil } - func (p *TExprNode) ReadField15(iprot thrift.TProtocol) error { - p.SlotRef = NewTSlotRef() - if err := p.SlotRef.Read(iprot); err != nil { + _field := NewTSlotRef() + if err := _field.Read(iprot); err != nil { return err } + p.SlotRef = _field return nil } - func (p *TExprNode) ReadField16(iprot thrift.TProtocol) error { - p.StringLiteral = NewTStringLiteral() - if err := p.StringLiteral.Read(iprot); err != nil { + _field := NewTStringLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.StringLiteral = _field return nil } - func (p *TExprNode) ReadField17(iprot thrift.TProtocol) error { - p.TupleIsNullPred = NewTTupleIsNullPredicate() - if err := p.TupleIsNullPred.Read(iprot); err != nil { + _field := NewTTupleIsNullPredicate() + if err := _field.Read(iprot); err != nil { return err } + p.TupleIsNullPred = _field return nil } - func (p *TExprNode) ReadField18(iprot thrift.TProtocol) error { - p.InfoFunc = NewTInfoFunc() - if err := p.InfoFunc.Read(iprot); err != nil { + _field := NewTInfoFunc() + if err := _field.Read(iprot); err != nil { return err } + p.InfoFunc = _field return nil } - func (p *TExprNode) ReadField19(iprot thrift.TProtocol) error { - p.DecimalLiteral = NewTDecimalLiteral() - if err := p.DecimalLiteral.Read(iprot); err != nil { + _field := NewTDecimalLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.DecimalLiteral = _field return nil } - func (p *TExprNode) ReadField20(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputScale = v + _field = v } + p.OutputScale = _field return nil } - func (p *TExprNode) ReadField21(iprot thrift.TProtocol) error { - p.FnCallExpr = NewTFunctionCallExpr() - if err := p.FnCallExpr.Read(iprot); err != nil { + _field := NewTFunctionCallExpr() + if err := _field.Read(iprot); err != nil { return err } + p.FnCallExpr = _field return nil } - func (p *TExprNode) ReadField22(iprot thrift.TProtocol) error { - p.LargeIntLiteral = NewTLargeIntLiteral() - if err := p.LargeIntLiteral.Read(iprot); err != nil { + _field := NewTLargeIntLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.LargeIntLiteral = _field return nil } - func (p *TExprNode) ReadField23(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputColumn = &v + _field = &v } + p.OutputColumn = _field return nil } - func (p *TExprNode) ReadField24(iprot thrift.TProtocol) error { - p.OutputType = types.NewTColumnType() - if err := p.OutputType.Read(iprot); err != nil { + _field := types.NewTColumnType() + if err := _field.Read(iprot); err != nil { return err } + p.OutputType = _field return nil } - func (p *TExprNode) ReadField25(iprot thrift.TProtocol) error { + + var _field *opcodes.TExprOpcode if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := opcodes.TExprOpcode(v) - p.VectorOpcode = &tmp + _field = &tmp } + p.VectorOpcode = _field return nil } - func (p *TExprNode) ReadField26(iprot thrift.TProtocol) error { - p.Fn = types.NewTFunction() - if err := p.Fn.Read(iprot); err != nil { + _field := types.NewTFunction() + if err := _field.Read(iprot); err != nil { return err } + p.Fn = _field return nil } - func (p *TExprNode) ReadField27(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VarargStartIdx = &v + _field = &v } + p.VarargStartIdx = _field return nil } - func (p *TExprNode) ReadField28(iprot thrift.TProtocol) error { + + var _field *types.TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TPrimitiveType(v) - p.ChildType = &tmp + _field = &tmp } + p.ChildType = _field return nil } - func (p *TExprNode) ReadField29(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsNullable = &v + _field = &v } + p.IsNullable = _field return nil } - func (p *TExprNode) ReadField30(iprot thrift.TProtocol) error { - p.JsonLiteral = NewTJsonLiteral() - if err := p.JsonLiteral.Read(iprot); err != nil { + _field := NewTJsonLiteral() + if err := _field.Read(iprot); err != nil { return err } + p.JsonLiteral = _field return nil } - func (p *TExprNode) ReadField31(iprot thrift.TProtocol) error { - p.SchemaChangeExpr = NewTSchemaChangeExpr() - if err := p.SchemaChangeExpr.Read(iprot); err != nil { + _field := NewTSchemaChangeExpr() + if err := _field.Read(iprot); err != nil { return err } + p.SchemaChangeExpr = _field return nil } - func (p *TExprNode) ReadField32(iprot thrift.TProtocol) error { - p.ColumnRef = NewTColumnRef() - if err := p.ColumnRef.Read(iprot); err != nil { + _field := NewTColumnRef() + if err := _field.Read(iprot); err != nil { return err } + p.ColumnRef = _field return nil } - func (p *TExprNode) ReadField33(iprot thrift.TProtocol) error { - p.MatchPredicate = NewTMatchPredicate() - if err := p.MatchPredicate.Read(iprot); err != nil { + _field := NewTMatchPredicate() + if err := _field.Read(iprot); err != nil { + return err + } + p.MatchPredicate = _field + return nil +} +func (p *TExprNode) ReadField34(iprot thrift.TProtocol) error { + _field := NewTIPv4Literal() + if err := _field.Read(iprot); err != nil { + return err + } + p.Ipv4Literal = _field + return nil +} +func (p *TExprNode) ReadField35(iprot thrift.TProtocol) error { + _field := NewTIPv6Literal() + if err := _field.Read(iprot); err != nil { + return err + } + p.Ipv6Literal = _field + return nil +} +func (p *TExprNode) ReadField36(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Label = _field return nil } @@ -6157,7 +6947,18 @@ func (p *TExprNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 33 goto WriteFieldError } - + if err = p.writeField34(oprot); err != nil { + fieldId = 34 + goto WriteFieldError + } + if err = p.writeField35(oprot); err != nil { + fieldId = 35 + goto WriteFieldError + } + if err = p.writeField36(oprot); err != nil { + fieldId = 36 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6795,11 +7596,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) } +func (p *TExprNode) writeField34(oprot thrift.TProtocol) (err error) { + if p.IsSetIpv4Literal() { + if err = oprot.WriteFieldBegin("ipv4_literal", thrift.STRUCT, 34); err != nil { + goto WriteFieldBeginError + } + if err := p.Ipv4Literal.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) +} + +func (p *TExprNode) writeField35(oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6Literal() { + if err = oprot.WriteFieldBegin("ipv6_literal", thrift.STRUCT, 35); err != nil { + goto WriteFieldBeginError + } + if err := p.Ipv6Literal.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 end error: ", p), err) +} + +func (p *TExprNode) writeField36(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 36); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 end error: ", p), err) +} + func (p *TExprNode) String() string { if p == nil { return "" } return fmt.Sprintf("TExprNode(%+v)", *p) + } func (p *TExprNode) DeepEqual(ano *TExprNode) bool { @@ -6907,6 +7766,15 @@ func (p *TExprNode) DeepEqual(ano *TExprNode) bool { if !p.Field33DeepEqual(ano.MatchPredicate) { return false } + if !p.Field34DeepEqual(ano.Ipv4Literal) { + return false + } + if !p.Field35DeepEqual(ano.Ipv6Literal) { + return false + } + if !p.Field36DeepEqual(ano.Label) { + return false + } return true } @@ -7171,6 +8039,32 @@ func (p *TExprNode) Field33DeepEqual(src *TMatchPredicate) bool { } return true } +func (p *TExprNode) Field34DeepEqual(src *TIPv4Literal) bool { + + if !p.Ipv4Literal.DeepEqual(src) { + return false + } + return true +} +func (p *TExprNode) Field35DeepEqual(src *TIPv6Literal) bool { + + if !p.Ipv6Literal.DeepEqual(src) { + return false + } + return true +} +func (p *TExprNode) Field36DeepEqual(src *string) bool { + + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true +} type TExpr struct { Nodes []*TExprNode `thrift:"nodes,1,required" frugal:"1,required,list" json:"nodes"` @@ -7181,7 +8075,6 @@ func NewTExpr() *TExpr { } func (p *TExpr) InitDefault() { - *p = TExpr{} } func (p *TExpr) GetNodes() (v []*TExprNode) { @@ -7221,17 +8114,14 @@ func (p *TExpr) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7267,18 +8157,22 @@ func (p *TExpr) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Nodes = make([]*TExprNode, 0, size) + _field := make([]*TExprNode, 0, size) + values := make([]TExprNode, size) for i := 0; i < size; i++ { - _elem := NewTExprNode() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Nodes = append(p.Nodes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Nodes = _field return nil } @@ -7292,7 +8186,6 @@ func (p *TExpr) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7341,6 +8234,7 @@ func (p *TExpr) String() string { return "" } return fmt.Sprintf("TExpr(%+v)", *p) + } func (p *TExpr) DeepEqual(ano *TExpr) bool { @@ -7378,7 +8272,6 @@ func NewTExprList() *TExprList { } func (p *TExprList) InitDefault() { - *p = TExprList{} } func (p *TExprList) GetExprs() (v []*TExpr) { @@ -7418,17 +8311,14 @@ func (p *TExprList) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7464,18 +8354,22 @@ func (p *TExprList) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Exprs = make([]*TExpr, 0, size) + _field := make([]*TExpr, 0, size) + values := make([]TExpr, size) for i := 0; i < size; i++ { - _elem := NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Exprs = append(p.Exprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Exprs = _field return nil } @@ -7489,7 +8383,6 @@ func (p *TExprList) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7538,6 +8431,7 @@ func (p *TExprList) String() string { return "" } return fmt.Sprintf("TExprList(%+v)", *p) + } func (p *TExprList) DeepEqual(ano *TExprList) bool { diff --git a/pkg/rpc/kitex_gen/exprs/k-Exprs.go b/pkg/rpc/kitex_gen/exprs/k-Exprs.go index c5ab8ac0..75f86c8b 100644 --- a/pkg/rpc/kitex_gen/exprs/k-Exprs.go +++ b/pkg/rpc/kitex_gen/exprs/k-Exprs.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package exprs @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/opcodes" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" ) @@ -1261,6 +1262,282 @@ func (p *TLargeIntLiteral) field1Length() int { return l } +func (p *TIPv4Literal) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetValue bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetValue = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetValue { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIPv4Literal[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIPv4Literal[fieldId])) +} + +func (p *TIPv4Literal) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Value = v + + } + return offset, nil +} + +// for compatibility +func (p *TIPv4Literal) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIPv4Literal) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIPv4Literal") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIPv4Literal) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIPv4Literal") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIPv4Literal) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.Value) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TIPv4Literal) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("value", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.Value) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TIPv6Literal) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetValue bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetValue = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetValue { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIPv6Literal[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIPv6Literal[fieldId])) +} + +func (p *TIPv6Literal) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Value = v + + } + return offset, nil +} + +// for compatibility +func (p *TIPv6Literal) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIPv6Literal) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIPv6Literal") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIPv6Literal) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIPv6Literal") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIPv6Literal) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Value) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TIPv6Literal) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.Value) + + l += bthrift.Binary.FieldEndLength() + return l +} + func (p *TInPredicate) FastRead(buf []byte) (int, error) { var err error var offset int @@ -1743,6 +2020,34 @@ func (p *TMatchPredicate) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -1857,6 +2162,34 @@ func (p *TMatchPredicate) FastReadField3(buf []byte) (int, error) { return offset, nil } +func (p *TMatchPredicate) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParserLowercase = v + + } + return offset, nil +} + +func (p *TMatchPredicate) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParserStopwords = v + + } + return offset, nil +} + // for compatibility func (p *TMatchPredicate) FastWrite(buf []byte) int { return 0 @@ -1866,9 +2199,11 @@ func (p *TMatchPredicate) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset := 0 offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMatchPredicate") if p != nil { + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1882,6 +2217,8 @@ func (p *TMatchPredicate) BLength() int { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() + l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1928,6 +2265,28 @@ func (p *TMatchPredicate) fastWriteField3(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TMatchPredicate) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParserLowercase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parser_lowercase", thrift.BOOL, 4) + offset += bthrift.Binary.WriteBool(buf[offset:], p.ParserLowercase) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMatchPredicate) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParserStopwords() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parser_stopwords", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ParserStopwords) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TMatchPredicate) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("parser_type", thrift.STRING, 1) @@ -1964,6 +2323,28 @@ func (p *TMatchPredicate) field3Length() int { return l } +func (p *TMatchPredicate) field4Length() int { + l := 0 + if p.IsSetParserLowercase() { + l += bthrift.Binary.FieldBeginLength("parser_lowercase", thrift.BOOL, 4) + l += bthrift.Binary.BoolLength(p.ParserLowercase) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMatchPredicate) field5Length() int { + l := 0 + if p.IsSetParserStopwords() { + l += bthrift.Binary.FieldBeginLength("parser_stopwords", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(p.ParserStopwords) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TLiteralPredicate) FastRead(buf []byte) (int, error) { var err error var offset int @@ -2694,121 +3075,258 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnRef[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnRef[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TColumnRef) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ColumnId = &v + + } + return offset, nil +} + +func (p *TColumnRef) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ColumnName = &v + + } + return offset, nil +} + +// for compatibility +func (p *TColumnRef) FastWrite(buf []byte) int { + return 0 +} + +func (p *TColumnRef) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TColumnRef") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TColumnRef) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TColumnRef") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TColumnRef) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_id", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ColumnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TColumnRef) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColumnName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TColumnRef) field1Length() int { + l := 0 + if p.IsSetColumnId() { + l += bthrift.Binary.FieldBeginLength("column_id", thrift.I32, 1) + l += bthrift.Binary.I32Length(*p.ColumnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TColumnRef) field2Length() int { + l := 0 + if p.IsSetColumnName() { + l += bthrift.Binary.FieldBeginLength("column_name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.ColumnName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStringLiteral) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetValue bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetValue = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetValue { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStringLiteral[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStringLiteral[fieldId])) } -func (p *TColumnRef) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ColumnId = &v - - } - return offset, nil -} - -func (p *TColumnRef) FastReadField2(buf []byte) (int, error) { +func (p *TStringLiteral) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ColumnName = &v + + p.Value = v } return offset, nil } // for compatibility -func (p *TColumnRef) FastWrite(buf []byte) int { +func (p *TStringLiteral) FastWrite(buf []byte) int { return 0 } -func (p *TColumnRef) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TStringLiteral) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TColumnRef") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStringLiteral") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TColumnRef) BLength() int { +func (p *TStringLiteral) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TColumnRef") + l += bthrift.Binary.StructBeginLength("TStringLiteral") if p != nil { l += p.field1Length() - l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TColumnRef) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetColumnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_id", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.ColumnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TColumnRef) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TStringLiteral) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumnName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_name", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColumnName) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Value) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TColumnRef) field1Length() int { - l := 0 - if p.IsSetColumnId() { - l += bthrift.Binary.FieldBeginLength("column_id", thrift.I32, 1) - l += bthrift.Binary.I32Length(*p.ColumnId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TColumnRef) field2Length() int { +func (p *TStringLiteral) field1Length() int { l := 0 - if p.IsSetColumnName() { - l += bthrift.Binary.FieldBeginLength("column_name", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.ColumnName) + l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.Value) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TStringLiteral) FastRead(buf []byte) (int, error) { +func (p *TNullableStringLiteral) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetValue bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -2832,7 +3350,20 @@ func (p *TStringLiteral) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetValue = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -2860,50 +3391,58 @@ func (p *TStringLiteral) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetValue { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStringLiteral[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TNullableStringLiteral[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStringLiteral[fieldId])) } -func (p *TStringLiteral) FastReadField1(buf []byte) (int, error) { +func (p *TNullableStringLiteral) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Value = &v - p.Value = v + } + return offset, nil +} + +func (p *TNullableStringLiteral) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsNull = v } return offset, nil } // for compatibility -func (p *TStringLiteral) FastWrite(buf []byte) int { +func (p *TNullableStringLiteral) FastWrite(buf []byte) int { return 0 } -func (p *TStringLiteral) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TNullableStringLiteral) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStringLiteral") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TNullableStringLiteral") if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -2911,32 +3450,59 @@ func (p *TStringLiteral) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TStringLiteral) BLength() int { +func (p *TNullableStringLiteral) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TStringLiteral") + l += bthrift.Binary.StructBeginLength("TNullableStringLiteral") if p != nil { l += p.field1Length() + l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TStringLiteral) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TNullableStringLiteral) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Value) + if p.IsSetValue() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Value) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TStringLiteral) field1Length() int { +func (p *TNullableStringLiteral) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNull() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_null", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsNull) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TNullableStringLiteral) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.Value) + if p.IsSetValue() { + l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Value) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TNullableStringLiteral) field2Length() int { + l := 0 + if p.IsSetIsNull() { + l += bthrift.Binary.FieldBeginLength("is_null", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(p.IsNull) + + l += bthrift.Binary.FieldEndLength() + } return l } @@ -4082,6 +4648,48 @@ func (p *TExprNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 34: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField34(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 35: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField35(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 36: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField36(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4576,6 +5184,45 @@ func (p *TExprNode) FastReadField33(buf []byte) (int, error) { return offset, nil } +func (p *TExprNode) FastReadField34(buf []byte) (int, error) { + offset := 0 + + tmp := NewTIPv4Literal() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Ipv4Literal = tmp + return offset, nil +} + +func (p *TExprNode) FastReadField35(buf []byte) (int, error) { + offset := 0 + + tmp := NewTIPv6Literal() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Ipv6Literal = tmp + return offset, nil +} + +func (p *TExprNode) FastReadField36(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Label = &v + + } + return offset, nil +} + // for compatibility func (p *TExprNode) FastWrite(buf []byte) int { return 0 @@ -4618,6 +5265,9 @@ func (p *TExprNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField31(buf[offset:], binaryWriter) offset += p.fastWriteField32(buf[offset:], binaryWriter) offset += p.fastWriteField33(buf[offset:], binaryWriter) + offset += p.fastWriteField34(buf[offset:], binaryWriter) + offset += p.fastWriteField35(buf[offset:], binaryWriter) + offset += p.fastWriteField36(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -4661,6 +5311,9 @@ func (p *TExprNode) BLength() int { l += p.field31Length() l += p.field32Length() l += p.field33Length() + l += p.field34Length() + l += p.field35Length() + l += p.field36Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -4998,6 +5651,37 @@ func (p *TExprNode) fastWriteField33(buf []byte, binaryWriter bthrift.BinaryWrit return offset } +func (p *TExprNode) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIpv4Literal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ipv4_literal", thrift.STRUCT, 34) + offset += p.Ipv4Literal.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExprNode) fastWriteField35(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIpv6Literal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ipv6_literal", thrift.STRUCT, 35) + offset += p.Ipv6Literal.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExprNode) fastWriteField36(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 36) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TExprNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("node_type", thrift.I32, 1) @@ -5329,6 +6013,37 @@ func (p *TExprNode) field33Length() int { return l } +func (p *TExprNode) field34Length() int { + l := 0 + if p.IsSetIpv4Literal() { + l += bthrift.Binary.FieldBeginLength("ipv4_literal", thrift.STRUCT, 34) + l += p.Ipv4Literal.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExprNode) field35Length() int { + l := 0 + if p.IsSetIpv6Literal() { + l += bthrift.Binary.FieldBeginLength("ipv6_literal", thrift.STRUCT, 35) + l += p.Ipv6Literal.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExprNode) field36Length() int { + l := 0 + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 36) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TExpr) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/frontendservice/FrontendService.go b/pkg/rpc/kitex_gen/frontendservice/FrontendService.go index 486335a2..15268b66 100644 --- a/pkg/rpc/kitex_gen/frontendservice/FrontendService.go +++ b/pkg/rpc/kitex_gen/frontendservice/FrontendService.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package frontendservice @@ -10,6 +10,7 @@ import ( "fmt" "github.com/apache/thrift/lib/go/thrift" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/data" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/datasinks" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/masterservice" @@ -220,6 +221,48 @@ func (p *FrontendServiceVersion) Value() (driver.Value, error) { return int64(*p), nil } +type TSubTxnType int64 + +const ( + TSubTxnType_INSERT TSubTxnType = 0 + TSubTxnType_DELETE TSubTxnType = 1 +) + +func (p TSubTxnType) String() string { + switch p { + case TSubTxnType_INSERT: + return "INSERT" + case TSubTxnType_DELETE: + return "DELETE" + } + return "" +} + +func TSubTxnTypeFromString(s string) (TSubTxnType, error) { + switch s { + case "INSERT": + return TSubTxnType_INSERT, nil + case "DELETE": + return TSubTxnType_DELETE, nil + } + return TSubTxnType(0), fmt.Errorf("not a valid TSubTxnType string") +} + +func TSubTxnTypePtr(v TSubTxnType) *TSubTxnType { return &v } +func (p *TSubTxnType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TSubTxnType(result.Int64) + return +} + +func (p *TSubTxnType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TFrontendPingFrontendStatusCode int64 const ( @@ -267,13 +310,40 @@ func (p *TFrontendPingFrontendStatusCode) Value() (driver.Value, error) { type TSchemaTableName int64 const ( - TSchemaTableName_METADATA_TABLE TSchemaTableName = 1 + TSchemaTableName_METADATA_TABLE TSchemaTableName = 1 + TSchemaTableName_ACTIVE_QUERIES TSchemaTableName = 2 + TSchemaTableName_WORKLOAD_GROUPS TSchemaTableName = 3 + TSchemaTableName_ROUTINES_INFO TSchemaTableName = 4 + TSchemaTableName_WORKLOAD_SCHEDULE_POLICY TSchemaTableName = 5 + TSchemaTableName_TABLE_OPTIONS TSchemaTableName = 6 + TSchemaTableName_WORKLOAD_GROUP_PRIVILEGES TSchemaTableName = 7 + TSchemaTableName_TABLE_PROPERTIES TSchemaTableName = 8 + TSchemaTableName_CATALOG_META_CACHE_STATS TSchemaTableName = 9 + TSchemaTableName_PARTITIONS TSchemaTableName = 10 ) func (p TSchemaTableName) String() string { switch p { case TSchemaTableName_METADATA_TABLE: return "METADATA_TABLE" + case TSchemaTableName_ACTIVE_QUERIES: + return "ACTIVE_QUERIES" + case TSchemaTableName_WORKLOAD_GROUPS: + return "WORKLOAD_GROUPS" + case TSchemaTableName_ROUTINES_INFO: + return "ROUTINES_INFO" + case TSchemaTableName_WORKLOAD_SCHEDULE_POLICY: + return "WORKLOAD_SCHEDULE_POLICY" + case TSchemaTableName_TABLE_OPTIONS: + return "TABLE_OPTIONS" + case TSchemaTableName_WORKLOAD_GROUP_PRIVILEGES: + return "WORKLOAD_GROUP_PRIVILEGES" + case TSchemaTableName_TABLE_PROPERTIES: + return "TABLE_PROPERTIES" + case TSchemaTableName_CATALOG_META_CACHE_STATS: + return "CATALOG_META_CACHE_STATS" + case TSchemaTableName_PARTITIONS: + return "PARTITIONS" } return "" } @@ -282,6 +352,24 @@ func TSchemaTableNameFromString(s string) (TSchemaTableName, error) { switch s { case "METADATA_TABLE": return TSchemaTableName_METADATA_TABLE, nil + case "ACTIVE_QUERIES": + return TSchemaTableName_ACTIVE_QUERIES, nil + case "WORKLOAD_GROUPS": + return TSchemaTableName_WORKLOAD_GROUPS, nil + case "ROUTINES_INFO": + return TSchemaTableName_ROUTINES_INFO, nil + case "WORKLOAD_SCHEDULE_POLICY": + return TSchemaTableName_WORKLOAD_SCHEDULE_POLICY, nil + case "TABLE_OPTIONS": + return TSchemaTableName_TABLE_OPTIONS, nil + case "WORKLOAD_GROUP_PRIVILEGES": + return TSchemaTableName_WORKLOAD_GROUP_PRIVILEGES, nil + case "TABLE_PROPERTIES": + return TSchemaTableName_TABLE_PROPERTIES, nil + case "CATALOG_META_CACHE_STATS": + return TSchemaTableName_CATALOG_META_CACHE_STATS, nil + case "PARTITIONS": + return TSchemaTableName_PARTITIONS, nil } return TSchemaTableName(0), fmt.Errorf("not a valid TSchemaTableName string") } @@ -525,20 +613,123 @@ func (p *TQueryStatsType) Value() (driver.Value, error) { type TBinlogType int64 const ( - TBinlogType_UPSERT TBinlogType = 0 - TBinlogType_ADD_PARTITION TBinlogType = 1 - TBinlogType_CREATE_TABLE TBinlogType = 2 - TBinlogType_DROP_PARTITION TBinlogType = 3 - TBinlogType_DROP_TABLE TBinlogType = 4 - TBinlogType_ALTER_JOB TBinlogType = 5 - TBinlogType_MODIFY_TABLE_ADD_OR_DROP_COLUMNS TBinlogType = 6 - TBinlogType_DUMMY TBinlogType = 7 - TBinlogType_ALTER_DATABASE_PROPERTY TBinlogType = 8 - TBinlogType_MODIFY_TABLE_PROPERTY TBinlogType = 9 - TBinlogType_BARRIER TBinlogType = 10 - TBinlogType_MODIFY_PARTITIONS TBinlogType = 11 - TBinlogType_REPLACE_PARTITIONS TBinlogType = 12 - TBinlogType_TRUNCATE_TABLE TBinlogType = 13 + TBinlogType_UPSERT TBinlogType = 0 + TBinlogType_ADD_PARTITION TBinlogType = 1 + TBinlogType_CREATE_TABLE TBinlogType = 2 + TBinlogType_DROP_PARTITION TBinlogType = 3 + TBinlogType_DROP_TABLE TBinlogType = 4 + TBinlogType_ALTER_JOB TBinlogType = 5 + TBinlogType_MODIFY_TABLE_ADD_OR_DROP_COLUMNS TBinlogType = 6 + TBinlogType_DUMMY TBinlogType = 7 + TBinlogType_ALTER_DATABASE_PROPERTY TBinlogType = 8 + TBinlogType_MODIFY_TABLE_PROPERTY TBinlogType = 9 + TBinlogType_BARRIER TBinlogType = 10 + TBinlogType_MODIFY_PARTITIONS TBinlogType = 11 + TBinlogType_REPLACE_PARTITIONS TBinlogType = 12 + TBinlogType_TRUNCATE_TABLE TBinlogType = 13 + TBinlogType_RENAME_TABLE TBinlogType = 14 + TBinlogType_RENAME_COLUMN TBinlogType = 15 + TBinlogType_MODIFY_COMMENT TBinlogType = 16 + TBinlogType_MODIFY_VIEW_DEF TBinlogType = 17 + TBinlogType_REPLACE_TABLE TBinlogType = 18 + TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES TBinlogType = 19 + TBinlogType_INDEX_CHANGE_JOB TBinlogType = 20 + TBinlogType_RENAME_ROLLUP TBinlogType = 21 + TBinlogType_RENAME_PARTITION TBinlogType = 22 + TBinlogType_DROP_ROLLUP TBinlogType = 23 + TBinlogType_RECOVER_INFO TBinlogType = 24 + TBinlogType_RESTORE_INFO TBinlogType = 25 + TBinlogType_MIN_UNKNOWN TBinlogType = 26 + TBinlogType_UNKNOWN_11 TBinlogType = 27 + TBinlogType_UNKNOWN_12 TBinlogType = 28 + TBinlogType_UNKNOWN_13 TBinlogType = 29 + TBinlogType_UNKNOWN_14 TBinlogType = 30 + TBinlogType_UNKNOWN_15 TBinlogType = 31 + TBinlogType_UNKNOWN_16 TBinlogType = 32 + TBinlogType_UNKNOWN_17 TBinlogType = 33 + TBinlogType_UNKNOWN_18 TBinlogType = 34 + TBinlogType_UNKNOWN_19 TBinlogType = 35 + TBinlogType_UNKNOWN_20 TBinlogType = 36 + TBinlogType_UNKNOWN_21 TBinlogType = 37 + TBinlogType_UNKNOWN_22 TBinlogType = 38 + TBinlogType_UNKNOWN_23 TBinlogType = 39 + TBinlogType_UNKNOWN_24 TBinlogType = 40 + TBinlogType_UNKNOWN_25 TBinlogType = 41 + TBinlogType_UNKNOWN_26 TBinlogType = 42 + TBinlogType_UNKNOWN_27 TBinlogType = 43 + TBinlogType_UNKNOWN_28 TBinlogType = 44 + TBinlogType_UNKNOWN_29 TBinlogType = 45 + TBinlogType_UNKNOWN_30 TBinlogType = 46 + TBinlogType_UNKNOWN_31 TBinlogType = 47 + TBinlogType_UNKNOWN_32 TBinlogType = 48 + TBinlogType_UNKNOWN_33 TBinlogType = 49 + TBinlogType_UNKNOWN_34 TBinlogType = 50 + TBinlogType_UNKNOWN_35 TBinlogType = 51 + TBinlogType_UNKNOWN_36 TBinlogType = 52 + TBinlogType_UNKNOWN_37 TBinlogType = 53 + TBinlogType_UNKNOWN_38 TBinlogType = 54 + TBinlogType_UNKNOWN_39 TBinlogType = 55 + TBinlogType_UNKNOWN_40 TBinlogType = 56 + TBinlogType_UNKNOWN_41 TBinlogType = 57 + TBinlogType_UNKNOWN_42 TBinlogType = 58 + TBinlogType_UNKNOWN_43 TBinlogType = 59 + TBinlogType_UNKNOWN_44 TBinlogType = 60 + TBinlogType_UNKNOWN_45 TBinlogType = 61 + TBinlogType_UNKNOWN_46 TBinlogType = 62 + TBinlogType_UNKNOWN_47 TBinlogType = 63 + TBinlogType_UNKNOWN_48 TBinlogType = 64 + TBinlogType_UNKNOWN_49 TBinlogType = 65 + TBinlogType_UNKNOWN_50 TBinlogType = 66 + TBinlogType_UNKNOWN_51 TBinlogType = 67 + TBinlogType_UNKNOWN_52 TBinlogType = 68 + TBinlogType_UNKNOWN_53 TBinlogType = 69 + TBinlogType_UNKNOWN_54 TBinlogType = 70 + TBinlogType_UNKNOWN_55 TBinlogType = 71 + TBinlogType_UNKNOWN_56 TBinlogType = 72 + TBinlogType_UNKNOWN_57 TBinlogType = 73 + TBinlogType_UNKNOWN_58 TBinlogType = 74 + TBinlogType_UNKNOWN_59 TBinlogType = 75 + TBinlogType_UNKNOWN_60 TBinlogType = 76 + TBinlogType_UNKNOWN_61 TBinlogType = 77 + TBinlogType_UNKNOWN_62 TBinlogType = 78 + TBinlogType_UNKNOWN_63 TBinlogType = 79 + TBinlogType_UNKNOWN_64 TBinlogType = 80 + TBinlogType_UNKNOWN_65 TBinlogType = 81 + TBinlogType_UNKNOWN_66 TBinlogType = 82 + TBinlogType_UNKNOWN_67 TBinlogType = 83 + TBinlogType_UNKNOWN_68 TBinlogType = 84 + TBinlogType_UNKNOWN_69 TBinlogType = 85 + TBinlogType_UNKNOWN_70 TBinlogType = 86 + TBinlogType_UNKNOWN_71 TBinlogType = 87 + TBinlogType_UNKNOWN_72 TBinlogType = 88 + TBinlogType_UNKNOWN_73 TBinlogType = 89 + TBinlogType_UNKNOWN_74 TBinlogType = 90 + TBinlogType_UNKNOWN_75 TBinlogType = 91 + TBinlogType_UNKNOWN_76 TBinlogType = 92 + TBinlogType_UNKNOWN_77 TBinlogType = 93 + TBinlogType_UNKNOWN_78 TBinlogType = 94 + TBinlogType_UNKNOWN_79 TBinlogType = 95 + TBinlogType_UNKNOWN_80 TBinlogType = 96 + TBinlogType_UNKNOWN_81 TBinlogType = 97 + TBinlogType_UNKNOWN_82 TBinlogType = 98 + TBinlogType_UNKNOWN_83 TBinlogType = 99 + TBinlogType_UNKNOWN_84 TBinlogType = 100 + TBinlogType_UNKNOWN_85 TBinlogType = 101 + TBinlogType_UNKNOWN_86 TBinlogType = 102 + TBinlogType_UNKNOWN_87 TBinlogType = 103 + TBinlogType_UNKNOWN_88 TBinlogType = 104 + TBinlogType_UNKNOWN_89 TBinlogType = 105 + TBinlogType_UNKNOWN_90 TBinlogType = 106 + TBinlogType_UNKNOWN_91 TBinlogType = 107 + TBinlogType_UNKNOWN_92 TBinlogType = 108 + TBinlogType_UNKNOWN_93 TBinlogType = 109 + TBinlogType_UNKNOWN_94 TBinlogType = 110 + TBinlogType_UNKNOWN_95 TBinlogType = 111 + TBinlogType_UNKNOWN_96 TBinlogType = 112 + TBinlogType_UNKNOWN_97 TBinlogType = 113 + TBinlogType_UNKNOWN_98 TBinlogType = 114 + TBinlogType_UNKNOWN_99 TBinlogType = 115 + TBinlogType_UNKNOWN_100 TBinlogType = 116 ) func (p TBinlogType) String() string { @@ -571,6 +762,212 @@ func (p TBinlogType) String() string { return "REPLACE_PARTITIONS" case TBinlogType_TRUNCATE_TABLE: return "TRUNCATE_TABLE" + case TBinlogType_RENAME_TABLE: + return "RENAME_TABLE" + case TBinlogType_RENAME_COLUMN: + return "RENAME_COLUMN" + case TBinlogType_MODIFY_COMMENT: + return "MODIFY_COMMENT" + case TBinlogType_MODIFY_VIEW_DEF: + return "MODIFY_VIEW_DEF" + case TBinlogType_REPLACE_TABLE: + return "REPLACE_TABLE" + case TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES: + return "MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES" + case TBinlogType_INDEX_CHANGE_JOB: + return "INDEX_CHANGE_JOB" + case TBinlogType_RENAME_ROLLUP: + return "RENAME_ROLLUP" + case TBinlogType_RENAME_PARTITION: + return "RENAME_PARTITION" + case TBinlogType_DROP_ROLLUP: + return "DROP_ROLLUP" + case TBinlogType_RECOVER_INFO: + return "RECOVER_INFO" + case TBinlogType_RESTORE_INFO: + return "RESTORE_INFO" + case TBinlogType_MIN_UNKNOWN: + return "MIN_UNKNOWN" + case TBinlogType_UNKNOWN_11: + return "UNKNOWN_11" + case TBinlogType_UNKNOWN_12: + return "UNKNOWN_12" + case TBinlogType_UNKNOWN_13: + return "UNKNOWN_13" + case TBinlogType_UNKNOWN_14: + return "UNKNOWN_14" + case TBinlogType_UNKNOWN_15: + return "UNKNOWN_15" + case TBinlogType_UNKNOWN_16: + return "UNKNOWN_16" + case TBinlogType_UNKNOWN_17: + return "UNKNOWN_17" + case TBinlogType_UNKNOWN_18: + return "UNKNOWN_18" + case TBinlogType_UNKNOWN_19: + return "UNKNOWN_19" + case TBinlogType_UNKNOWN_20: + return "UNKNOWN_20" + case TBinlogType_UNKNOWN_21: + return "UNKNOWN_21" + case TBinlogType_UNKNOWN_22: + return "UNKNOWN_22" + case TBinlogType_UNKNOWN_23: + return "UNKNOWN_23" + case TBinlogType_UNKNOWN_24: + return "UNKNOWN_24" + case TBinlogType_UNKNOWN_25: + return "UNKNOWN_25" + case TBinlogType_UNKNOWN_26: + return "UNKNOWN_26" + case TBinlogType_UNKNOWN_27: + return "UNKNOWN_27" + case TBinlogType_UNKNOWN_28: + return "UNKNOWN_28" + case TBinlogType_UNKNOWN_29: + return "UNKNOWN_29" + case TBinlogType_UNKNOWN_30: + return "UNKNOWN_30" + case TBinlogType_UNKNOWN_31: + return "UNKNOWN_31" + case TBinlogType_UNKNOWN_32: + return "UNKNOWN_32" + case TBinlogType_UNKNOWN_33: + return "UNKNOWN_33" + case TBinlogType_UNKNOWN_34: + return "UNKNOWN_34" + case TBinlogType_UNKNOWN_35: + return "UNKNOWN_35" + case TBinlogType_UNKNOWN_36: + return "UNKNOWN_36" + case TBinlogType_UNKNOWN_37: + return "UNKNOWN_37" + case TBinlogType_UNKNOWN_38: + return "UNKNOWN_38" + case TBinlogType_UNKNOWN_39: + return "UNKNOWN_39" + case TBinlogType_UNKNOWN_40: + return "UNKNOWN_40" + case TBinlogType_UNKNOWN_41: + return "UNKNOWN_41" + case TBinlogType_UNKNOWN_42: + return "UNKNOWN_42" + case TBinlogType_UNKNOWN_43: + return "UNKNOWN_43" + case TBinlogType_UNKNOWN_44: + return "UNKNOWN_44" + case TBinlogType_UNKNOWN_45: + return "UNKNOWN_45" + case TBinlogType_UNKNOWN_46: + return "UNKNOWN_46" + case TBinlogType_UNKNOWN_47: + return "UNKNOWN_47" + case TBinlogType_UNKNOWN_48: + return "UNKNOWN_48" + case TBinlogType_UNKNOWN_49: + return "UNKNOWN_49" + case TBinlogType_UNKNOWN_50: + return "UNKNOWN_50" + case TBinlogType_UNKNOWN_51: + return "UNKNOWN_51" + case TBinlogType_UNKNOWN_52: + return "UNKNOWN_52" + case TBinlogType_UNKNOWN_53: + return "UNKNOWN_53" + case TBinlogType_UNKNOWN_54: + return "UNKNOWN_54" + case TBinlogType_UNKNOWN_55: + return "UNKNOWN_55" + case TBinlogType_UNKNOWN_56: + return "UNKNOWN_56" + case TBinlogType_UNKNOWN_57: + return "UNKNOWN_57" + case TBinlogType_UNKNOWN_58: + return "UNKNOWN_58" + case TBinlogType_UNKNOWN_59: + return "UNKNOWN_59" + case TBinlogType_UNKNOWN_60: + return "UNKNOWN_60" + case TBinlogType_UNKNOWN_61: + return "UNKNOWN_61" + case TBinlogType_UNKNOWN_62: + return "UNKNOWN_62" + case TBinlogType_UNKNOWN_63: + return "UNKNOWN_63" + case TBinlogType_UNKNOWN_64: + return "UNKNOWN_64" + case TBinlogType_UNKNOWN_65: + return "UNKNOWN_65" + case TBinlogType_UNKNOWN_66: + return "UNKNOWN_66" + case TBinlogType_UNKNOWN_67: + return "UNKNOWN_67" + case TBinlogType_UNKNOWN_68: + return "UNKNOWN_68" + case TBinlogType_UNKNOWN_69: + return "UNKNOWN_69" + case TBinlogType_UNKNOWN_70: + return "UNKNOWN_70" + case TBinlogType_UNKNOWN_71: + return "UNKNOWN_71" + case TBinlogType_UNKNOWN_72: + return "UNKNOWN_72" + case TBinlogType_UNKNOWN_73: + return "UNKNOWN_73" + case TBinlogType_UNKNOWN_74: + return "UNKNOWN_74" + case TBinlogType_UNKNOWN_75: + return "UNKNOWN_75" + case TBinlogType_UNKNOWN_76: + return "UNKNOWN_76" + case TBinlogType_UNKNOWN_77: + return "UNKNOWN_77" + case TBinlogType_UNKNOWN_78: + return "UNKNOWN_78" + case TBinlogType_UNKNOWN_79: + return "UNKNOWN_79" + case TBinlogType_UNKNOWN_80: + return "UNKNOWN_80" + case TBinlogType_UNKNOWN_81: + return "UNKNOWN_81" + case TBinlogType_UNKNOWN_82: + return "UNKNOWN_82" + case TBinlogType_UNKNOWN_83: + return "UNKNOWN_83" + case TBinlogType_UNKNOWN_84: + return "UNKNOWN_84" + case TBinlogType_UNKNOWN_85: + return "UNKNOWN_85" + case TBinlogType_UNKNOWN_86: + return "UNKNOWN_86" + case TBinlogType_UNKNOWN_87: + return "UNKNOWN_87" + case TBinlogType_UNKNOWN_88: + return "UNKNOWN_88" + case TBinlogType_UNKNOWN_89: + return "UNKNOWN_89" + case TBinlogType_UNKNOWN_90: + return "UNKNOWN_90" + case TBinlogType_UNKNOWN_91: + return "UNKNOWN_91" + case TBinlogType_UNKNOWN_92: + return "UNKNOWN_92" + case TBinlogType_UNKNOWN_93: + return "UNKNOWN_93" + case TBinlogType_UNKNOWN_94: + return "UNKNOWN_94" + case TBinlogType_UNKNOWN_95: + return "UNKNOWN_95" + case TBinlogType_UNKNOWN_96: + return "UNKNOWN_96" + case TBinlogType_UNKNOWN_97: + return "UNKNOWN_97" + case TBinlogType_UNKNOWN_98: + return "UNKNOWN_98" + case TBinlogType_UNKNOWN_99: + return "UNKNOWN_99" + case TBinlogType_UNKNOWN_100: + return "UNKNOWN_100" } return "" } @@ -605,6 +1002,212 @@ func TBinlogTypeFromString(s string) (TBinlogType, error) { return TBinlogType_REPLACE_PARTITIONS, nil case "TRUNCATE_TABLE": return TBinlogType_TRUNCATE_TABLE, nil + case "RENAME_TABLE": + return TBinlogType_RENAME_TABLE, nil + case "RENAME_COLUMN": + return TBinlogType_RENAME_COLUMN, nil + case "MODIFY_COMMENT": + return TBinlogType_MODIFY_COMMENT, nil + case "MODIFY_VIEW_DEF": + return TBinlogType_MODIFY_VIEW_DEF, nil + case "REPLACE_TABLE": + return TBinlogType_REPLACE_TABLE, nil + case "MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES": + return TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES, nil + case "INDEX_CHANGE_JOB": + return TBinlogType_INDEX_CHANGE_JOB, nil + case "RENAME_ROLLUP": + return TBinlogType_RENAME_ROLLUP, nil + case "RENAME_PARTITION": + return TBinlogType_RENAME_PARTITION, nil + case "DROP_ROLLUP": + return TBinlogType_DROP_ROLLUP, nil + case "RECOVER_INFO": + return TBinlogType_RECOVER_INFO, nil + case "RESTORE_INFO": + return TBinlogType_RESTORE_INFO, nil + case "MIN_UNKNOWN": + return TBinlogType_MIN_UNKNOWN, nil + case "UNKNOWN_11": + return TBinlogType_UNKNOWN_11, nil + case "UNKNOWN_12": + return TBinlogType_UNKNOWN_12, nil + case "UNKNOWN_13": + return TBinlogType_UNKNOWN_13, nil + case "UNKNOWN_14": + return TBinlogType_UNKNOWN_14, nil + case "UNKNOWN_15": + return TBinlogType_UNKNOWN_15, nil + case "UNKNOWN_16": + return TBinlogType_UNKNOWN_16, nil + case "UNKNOWN_17": + return TBinlogType_UNKNOWN_17, nil + case "UNKNOWN_18": + return TBinlogType_UNKNOWN_18, nil + case "UNKNOWN_19": + return TBinlogType_UNKNOWN_19, nil + case "UNKNOWN_20": + return TBinlogType_UNKNOWN_20, nil + case "UNKNOWN_21": + return TBinlogType_UNKNOWN_21, nil + case "UNKNOWN_22": + return TBinlogType_UNKNOWN_22, nil + case "UNKNOWN_23": + return TBinlogType_UNKNOWN_23, nil + case "UNKNOWN_24": + return TBinlogType_UNKNOWN_24, nil + case "UNKNOWN_25": + return TBinlogType_UNKNOWN_25, nil + case "UNKNOWN_26": + return TBinlogType_UNKNOWN_26, nil + case "UNKNOWN_27": + return TBinlogType_UNKNOWN_27, nil + case "UNKNOWN_28": + return TBinlogType_UNKNOWN_28, nil + case "UNKNOWN_29": + return TBinlogType_UNKNOWN_29, nil + case "UNKNOWN_30": + return TBinlogType_UNKNOWN_30, nil + case "UNKNOWN_31": + return TBinlogType_UNKNOWN_31, nil + case "UNKNOWN_32": + return TBinlogType_UNKNOWN_32, nil + case "UNKNOWN_33": + return TBinlogType_UNKNOWN_33, nil + case "UNKNOWN_34": + return TBinlogType_UNKNOWN_34, nil + case "UNKNOWN_35": + return TBinlogType_UNKNOWN_35, nil + case "UNKNOWN_36": + return TBinlogType_UNKNOWN_36, nil + case "UNKNOWN_37": + return TBinlogType_UNKNOWN_37, nil + case "UNKNOWN_38": + return TBinlogType_UNKNOWN_38, nil + case "UNKNOWN_39": + return TBinlogType_UNKNOWN_39, nil + case "UNKNOWN_40": + return TBinlogType_UNKNOWN_40, nil + case "UNKNOWN_41": + return TBinlogType_UNKNOWN_41, nil + case "UNKNOWN_42": + return TBinlogType_UNKNOWN_42, nil + case "UNKNOWN_43": + return TBinlogType_UNKNOWN_43, nil + case "UNKNOWN_44": + return TBinlogType_UNKNOWN_44, nil + case "UNKNOWN_45": + return TBinlogType_UNKNOWN_45, nil + case "UNKNOWN_46": + return TBinlogType_UNKNOWN_46, nil + case "UNKNOWN_47": + return TBinlogType_UNKNOWN_47, nil + case "UNKNOWN_48": + return TBinlogType_UNKNOWN_48, nil + case "UNKNOWN_49": + return TBinlogType_UNKNOWN_49, nil + case "UNKNOWN_50": + return TBinlogType_UNKNOWN_50, nil + case "UNKNOWN_51": + return TBinlogType_UNKNOWN_51, nil + case "UNKNOWN_52": + return TBinlogType_UNKNOWN_52, nil + case "UNKNOWN_53": + return TBinlogType_UNKNOWN_53, nil + case "UNKNOWN_54": + return TBinlogType_UNKNOWN_54, nil + case "UNKNOWN_55": + return TBinlogType_UNKNOWN_55, nil + case "UNKNOWN_56": + return TBinlogType_UNKNOWN_56, nil + case "UNKNOWN_57": + return TBinlogType_UNKNOWN_57, nil + case "UNKNOWN_58": + return TBinlogType_UNKNOWN_58, nil + case "UNKNOWN_59": + return TBinlogType_UNKNOWN_59, nil + case "UNKNOWN_60": + return TBinlogType_UNKNOWN_60, nil + case "UNKNOWN_61": + return TBinlogType_UNKNOWN_61, nil + case "UNKNOWN_62": + return TBinlogType_UNKNOWN_62, nil + case "UNKNOWN_63": + return TBinlogType_UNKNOWN_63, nil + case "UNKNOWN_64": + return TBinlogType_UNKNOWN_64, nil + case "UNKNOWN_65": + return TBinlogType_UNKNOWN_65, nil + case "UNKNOWN_66": + return TBinlogType_UNKNOWN_66, nil + case "UNKNOWN_67": + return TBinlogType_UNKNOWN_67, nil + case "UNKNOWN_68": + return TBinlogType_UNKNOWN_68, nil + case "UNKNOWN_69": + return TBinlogType_UNKNOWN_69, nil + case "UNKNOWN_70": + return TBinlogType_UNKNOWN_70, nil + case "UNKNOWN_71": + return TBinlogType_UNKNOWN_71, nil + case "UNKNOWN_72": + return TBinlogType_UNKNOWN_72, nil + case "UNKNOWN_73": + return TBinlogType_UNKNOWN_73, nil + case "UNKNOWN_74": + return TBinlogType_UNKNOWN_74, nil + case "UNKNOWN_75": + return TBinlogType_UNKNOWN_75, nil + case "UNKNOWN_76": + return TBinlogType_UNKNOWN_76, nil + case "UNKNOWN_77": + return TBinlogType_UNKNOWN_77, nil + case "UNKNOWN_78": + return TBinlogType_UNKNOWN_78, nil + case "UNKNOWN_79": + return TBinlogType_UNKNOWN_79, nil + case "UNKNOWN_80": + return TBinlogType_UNKNOWN_80, nil + case "UNKNOWN_81": + return TBinlogType_UNKNOWN_81, nil + case "UNKNOWN_82": + return TBinlogType_UNKNOWN_82, nil + case "UNKNOWN_83": + return TBinlogType_UNKNOWN_83, nil + case "UNKNOWN_84": + return TBinlogType_UNKNOWN_84, nil + case "UNKNOWN_85": + return TBinlogType_UNKNOWN_85, nil + case "UNKNOWN_86": + return TBinlogType_UNKNOWN_86, nil + case "UNKNOWN_87": + return TBinlogType_UNKNOWN_87, nil + case "UNKNOWN_88": + return TBinlogType_UNKNOWN_88, nil + case "UNKNOWN_89": + return TBinlogType_UNKNOWN_89, nil + case "UNKNOWN_90": + return TBinlogType_UNKNOWN_90, nil + case "UNKNOWN_91": + return TBinlogType_UNKNOWN_91, nil + case "UNKNOWN_92": + return TBinlogType_UNKNOWN_92, nil + case "UNKNOWN_93": + return TBinlogType_UNKNOWN_93, nil + case "UNKNOWN_94": + return TBinlogType_UNKNOWN_94, nil + case "UNKNOWN_95": + return TBinlogType_UNKNOWN_95, nil + case "UNKNOWN_96": + return TBinlogType_UNKNOWN_96, nil + case "UNKNOWN_97": + return TBinlogType_UNKNOWN_97, nil + case "UNKNOWN_98": + return TBinlogType_UNKNOWN_98, nil + case "UNKNOWN_99": + return TBinlogType_UNKNOWN_99, nil + case "UNKNOWN_100": + return TBinlogType_UNKNOWN_100, nil } return TBinlogType(0), fmt.Errorf("not a valid TBinlogType string") } @@ -669,7 +1272,7 @@ func (p *TSnapshotType) Value() (driver.Value, error) { type TGetBinlogLagRequest = TGetBinlogRequest func NewTGetBinlogLagRequest() *TGetBinlogLagRequest { - return NewTGetBinlogRequest() + return (*TGetBinlogLagRequest)(NewTGetBinlogRequest()) } type TSetSessionParams struct { @@ -681,7 +1284,6 @@ func NewTSetSessionParams() *TSetSessionParams { } func (p *TSetSessionParams) InitDefault() { - *p = TSetSessionParams{} } func (p *TSetSessionParams) GetUser() (v string) { @@ -721,17 +1323,14 @@ func (p *TSetSessionParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -763,11 +1362,14 @@ RequiredFieldNotSetError: } func (p *TSetSessionParams) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } @@ -781,7 +1383,6 @@ func (p *TSetSessionParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -822,6 +1423,7 @@ func (p *TSetSessionParams) String() string { return "" } return fmt.Sprintf("TSetSessionParams(%+v)", *p) + } func (p *TSetSessionParams) DeepEqual(ano *TSetSessionParams) bool { @@ -854,7 +1456,6 @@ func NewTAuthenticateParams() *TAuthenticateParams { } func (p *TAuthenticateParams) InitDefault() { - *p = TAuthenticateParams{} } func (p *TAuthenticateParams) GetUser() (v string) { @@ -903,10 +1504,8 @@ func (p *TAuthenticateParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -914,17 +1513,14 @@ func (p *TAuthenticateParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -961,20 +1557,25 @@ RequiredFieldNotSetError: } func (p *TAuthenticateParams) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TAuthenticateParams) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = v } + p.Passwd = _field return nil } @@ -992,7 +1593,6 @@ func (p *TAuthenticateParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1050,6 +1650,7 @@ func (p *TAuthenticateParams) String() string { return "" } return fmt.Sprintf("TAuthenticateParams(%+v)", *p) + } func (p *TAuthenticateParams) DeepEqual(ano *TAuthenticateParams) bool { @@ -1098,7 +1699,6 @@ func NewTColumnDesc() *TColumnDesc { } func (p *TColumnDesc) InitDefault() { - *p = TColumnDesc{} } func (p *TColumnDesc) GetColumnName() (v string) { @@ -1249,10 +1849,8 @@ func (p *TColumnDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -1260,77 +1858,62 @@ func (p *TColumnDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1367,85 +1950,103 @@ RequiredFieldNotSetError: } func (p *TColumnDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnName = v + _field = v } + p.ColumnName = _field return nil } - func (p *TColumnDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnType = types.TPrimitiveType(v) + _field = types.TPrimitiveType(v) } + p.ColumnType = _field return nil } - func (p *TColumnDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnLength = &v + _field = &v } + p.ColumnLength = _field return nil } - func (p *TColumnDesc) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnPrecision = &v + _field = &v } + p.ColumnPrecision = _field return nil } - func (p *TColumnDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnScale = &v + _field = &v } + p.ColumnScale = _field return nil } - func (p *TColumnDesc) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsAllowNull = &v + _field = &v } + p.IsAllowNull = _field return nil } - func (p *TColumnDesc) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnKey = &v + _field = &v } + p.ColumnKey = _field return nil } - func (p *TColumnDesc) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Children = make([]*TColumnDesc, 0, size) + _field := make([]*TColumnDesc, 0, size) + values := make([]TColumnDesc, size) for i := 0; i < size; i++ { - _elem := NewTColumnDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Children = append(p.Children, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Children = _field return nil } @@ -1487,7 +2088,6 @@ func (p *TColumnDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1667,6 +2267,7 @@ func (p *TColumnDesc) String() string { return "" } return fmt.Sprintf("TColumnDesc(%+v)", *p) + } func (p *TColumnDesc) DeepEqual(ano *TColumnDesc) bool { @@ -1800,7 +2401,6 @@ func NewTColumnDef() *TColumnDef { } func (p *TColumnDef) InitDefault() { - *p = TColumnDef{} } var TColumnDef_ColumnDesc_DEFAULT *TColumnDesc @@ -1866,27 +2466,22 @@ func (p *TColumnDef) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnDesc = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1918,19 +2513,22 @@ RequiredFieldNotSetError: } func (p *TColumnDef) ReadField1(iprot thrift.TProtocol) error { - p.ColumnDesc = NewTColumnDesc() - if err := p.ColumnDesc.Read(iprot); err != nil { + _field := NewTColumnDesc() + if err := _field.Read(iprot); err != nil { return err } + p.ColumnDesc = _field return nil } - func (p *TColumnDef) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Comment = &v + _field = &v } + p.Comment = _field return nil } @@ -1948,7 +2546,6 @@ func (p *TColumnDef) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2008,6 +2605,7 @@ func (p *TColumnDef) String() string { return "" } return fmt.Sprintf("TColumnDef(%+v)", *p) + } func (p *TColumnDef) DeepEqual(ano *TColumnDef) bool { @@ -2063,10 +2661,7 @@ func NewTDescribeTableParams() *TDescribeTableParams { } func (p *TDescribeTableParams) InitDefault() { - *p = TDescribeTableParams{ - - ShowHiddenColumns: false, - } + p.ShowHiddenColumns = false } var TDescribeTableParams_Db_DEFAULT string @@ -2207,10 +2802,8 @@ func (p *TDescribeTableParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -2218,67 +2811,54 @@ func (p *TDescribeTableParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2310,64 +2890,77 @@ RequiredFieldNotSetError: } func (p *TDescribeTableParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Db = _field return nil } - func (p *TDescribeTableParams) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } - func (p *TDescribeTableParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TDescribeTableParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.UserIp = _field return nil } - func (p *TDescribeTableParams) ReadField5(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } - func (p *TDescribeTableParams) ReadField6(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ShowHiddenColumns = v + _field = v } + p.ShowHiddenColumns = _field return nil } - func (p *TDescribeTableParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Catalog = &v + _field = &v } + p.Catalog = _field return nil } @@ -2405,7 +2998,6 @@ func (p *TDescribeTableParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2560,6 +3152,7 @@ func (p *TDescribeTableParams) String() string { return "" } return fmt.Sprintf("TDescribeTableParams(%+v)", *p) + } func (p *TDescribeTableParams) DeepEqual(ano *TDescribeTableParams) bool { @@ -2671,7 +3264,6 @@ func NewTDescribeTableResult_() *TDescribeTableResult_ { } func (p *TDescribeTableResult_) InitDefault() { - *p = TDescribeTableResult_{} } func (p *TDescribeTableResult_) GetColumns() (v []*TColumnDef) { @@ -2711,17 +3303,14 @@ func (p *TDescribeTableResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2757,18 +3346,22 @@ func (p *TDescribeTableResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Columns = make([]*TColumnDef, 0, size) + _field := make([]*TColumnDef, 0, size) + values := make([]TColumnDef, size) for i := 0; i < size; i++ { - _elem := NewTColumnDef() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } @@ -2782,7 +3375,6 @@ func (p *TDescribeTableResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2831,6 +3423,7 @@ func (p *TDescribeTableResult_) String() string { return "" } return fmt.Sprintf("TDescribeTableResult_(%+v)", *p) + } func (p *TDescribeTableResult_) DeepEqual(ano *TDescribeTableResult_) bool { @@ -2877,10 +3470,7 @@ func NewTDescribeTablesParams() *TDescribeTablesParams { } func (p *TDescribeTablesParams) InitDefault() { - *p = TDescribeTablesParams{ - - ShowHiddenColumns: false, - } + p.ShowHiddenColumns = false } var TDescribeTablesParams_Db_DEFAULT string @@ -3021,10 +3611,8 @@ func (p *TDescribeTablesParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -3032,67 +3620,54 @@ func (p *TDescribeTablesParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTablesName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3124,21 +3699,24 @@ RequiredFieldNotSetError: } func (p *TDescribeTablesParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Db = _field return nil } - func (p *TDescribeTablesParams) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TablesName = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -3146,55 +3724,64 @@ func (p *TDescribeTablesParams) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.TablesName = append(p.TablesName, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TablesName = _field return nil } - func (p *TDescribeTablesParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TDescribeTablesParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.UserIp = _field return nil } - func (p *TDescribeTablesParams) ReadField5(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } - func (p *TDescribeTablesParams) ReadField6(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ShowHiddenColumns = v + _field = v } + p.ShowHiddenColumns = _field return nil } - func (p *TDescribeTablesParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Catalog = &v + _field = &v } + p.Catalog = _field return nil } @@ -3232,7 +3819,6 @@ func (p *TDescribeTablesParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3395,6 +3981,7 @@ func (p *TDescribeTablesParams) String() string { return "" } return fmt.Sprintf("TDescribeTablesParams(%+v)", *p) + } func (p *TDescribeTablesParams) DeepEqual(ano *TDescribeTablesParams) bool { @@ -3513,7 +4100,6 @@ func NewTDescribeTablesResult_() *TDescribeTablesResult_ { } func (p *TDescribeTablesResult_) InitDefault() { - *p = TDescribeTablesResult_{} } func (p *TDescribeTablesResult_) GetTablesOffset() (v []int32) { @@ -3562,10 +4148,8 @@ func (p *TDescribeTablesResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTablesOffset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -3573,17 +4157,14 @@ func (p *TDescribeTablesResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3624,8 +4205,9 @@ func (p *TDescribeTablesResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.TablesOffset = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -3633,31 +4215,35 @@ func (p *TDescribeTablesResult_) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.TablesOffset = append(p.TablesOffset, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TablesOffset = _field return nil } - func (p *TDescribeTablesResult_) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]*TColumnDef, 0, size) + _field := make([]*TColumnDef, 0, size) + values := make([]TColumnDef, size) for i := 0; i < size; i++ { - _elem := NewTColumnDef() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } @@ -3675,7 +4261,6 @@ func (p *TDescribeTablesResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3749,6 +4334,7 @@ func (p *TDescribeTablesResult_) String() string { return "" } return fmt.Sprintf("TDescribeTablesResult_(%+v)", *p) + } func (p *TDescribeTablesResult_) DeepEqual(ano *TDescribeTablesResult_) bool { @@ -3803,7 +4389,6 @@ func NewTShowVariableRequest() *TShowVariableRequest { } func (p *TShowVariableRequest) InitDefault() { - *p = TShowVariableRequest{} } func (p *TShowVariableRequest) GetThreadId() (v int64) { @@ -3852,10 +4437,8 @@ func (p *TShowVariableRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetThreadId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -3863,17 +4446,14 @@ func (p *TShowVariableRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVarType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3910,20 +4490,25 @@ RequiredFieldNotSetError: } func (p *TShowVariableRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ThreadId = v + _field = v } + p.ThreadId = _field return nil } - func (p *TShowVariableRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TVarType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VarType = types.TVarType(v) + _field = types.TVarType(v) } + p.VarType = _field return nil } @@ -3941,7 +4526,6 @@ func (p *TShowVariableRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3999,6 +4583,7 @@ func (p *TShowVariableRequest) String() string { return "" } return fmt.Sprintf("TShowVariableRequest(%+v)", *p) + } func (p *TShowVariableRequest) DeepEqual(ano *TShowVariableRequest) bool { @@ -4032,7 +4617,7 @@ func (p *TShowVariableRequest) Field2DeepEqual(src types.TVarType) bool { } type TShowVariableResult_ struct { - Variables map[string]string `thrift:"variables,1,required" frugal:"1,required,map" json:"variables"` + Variables [][]string `thrift:"variables,1,required" frugal:"1,required,list>" json:"variables"` } func NewTShowVariableResult_() *TShowVariableResult_ { @@ -4040,13 +4625,12 @@ func NewTShowVariableResult_() *TShowVariableResult_ { } func (p *TShowVariableResult_) InitDefault() { - *p = TShowVariableResult_{} } -func (p *TShowVariableResult_) GetVariables() (v map[string]string) { +func (p *TShowVariableResult_) GetVariables() (v [][]string) { return p.Variables } -func (p *TShowVariableResult_) SetVariables(val map[string]string) { +func (p *TShowVariableResult_) SetVariables(val [][]string) { p.Variables = val } @@ -4075,22 +4659,19 @@ func (p *TShowVariableResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } issetVariables = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4122,31 +4703,38 @@ RequiredFieldNotSetError: } func (p *TShowVariableResult_) ReadField1(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() + _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Variables = make(map[string]string, size) + _field := make([][]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - _key = v } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { - var _val string - if v, err := iprot.ReadString(); err != nil { + var _elem1 string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem1 = v + } + + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - _val = v } - p.Variables[_key] = _val + _field = append(_field, _elem) } - if err := iprot.ReadMapEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } + p.Variables = _field return nil } @@ -4160,7 +4748,6 @@ func (p *TShowVariableResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4180,23 +4767,26 @@ WriteStructEndError: } func (p *TShowVariableResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("variables", thrift.MAP, 1); err != nil { + if err = oprot.WriteFieldBegin("variables", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Variables)); err != nil { + if err := oprot.WriteListBegin(thrift.LIST, len(p.Variables)); err != nil { return err } - for k, v := range p.Variables { - - if err := oprot.WriteString(k); err != nil { + for _, v := range p.Variables { + if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { return err } - - if err := oprot.WriteString(v); err != nil { + for _, v := range v { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -4214,6 +4804,7 @@ func (p *TShowVariableResult_) String() string { return "" } return fmt.Sprintf("TShowVariableResult_(%+v)", *p) + } func (p *TShowVariableResult_) DeepEqual(ano *TShowVariableResult_) bool { @@ -4228,16 +4819,22 @@ func (p *TShowVariableResult_) DeepEqual(ano *TShowVariableResult_) bool { return true } -func (p *TShowVariableResult_) Field1DeepEqual(src map[string]string) bool { +func (p *TShowVariableResult_) Field1DeepEqual(src [][]string) bool { if len(p.Variables) != len(src) { return false } - for k, v := range p.Variables { - _src := src[k] - if strings.Compare(v, _src) != 0 { + for i, v := range p.Variables { + _src := src[i] + if len(v) != len(_src) { return false } + for i, v := range v { + _src1 := _src[i] + if strings.Compare(v, _src1) != 0 { + return false + } + } } return true } @@ -4253,7 +4850,6 @@ func NewTTableRowFormat() *TTableRowFormat { } func (p *TTableRowFormat) InitDefault() { - *p = TTableRowFormat{} } var TTableRowFormat_FieldTerminator_DEFAULT string @@ -4334,37 +4930,30 @@ func (p *TTableRowFormat) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4390,29 +4979,36 @@ ReadStructEndError: } func (p *TTableRowFormat) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FieldTerminator = &v + _field = &v } + p.FieldTerminator = _field return nil } - func (p *TTableRowFormat) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LineTerminator = &v + _field = &v } + p.LineTerminator = _field return nil } - func (p *TTableRowFormat) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.EscapedBy = &v + _field = &v } + p.EscapedBy = _field return nil } @@ -4434,7 +5030,6 @@ func (p *TTableRowFormat) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4515,6 +5110,7 @@ func (p *TTableRowFormat) String() string { return "" } return fmt.Sprintf("TTableRowFormat(%+v)", *p) + } func (p *TTableRowFormat) DeepEqual(ano *TTableRowFormat) bool { @@ -4582,7 +5178,6 @@ func NewTPartitionKeyValue() *TPartitionKeyValue { } func (p *TPartitionKeyValue) InitDefault() { - *p = TPartitionKeyValue{} } func (p *TPartitionKeyValue) GetName() (v string) { @@ -4631,10 +5226,8 @@ func (p *TPartitionKeyValue) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -4642,17 +5235,14 @@ func (p *TPartitionKeyValue) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4689,20 +5279,25 @@ RequiredFieldNotSetError: } func (p *TPartitionKeyValue) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } - func (p *TPartitionKeyValue) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -4720,7 +5315,6 @@ func (p *TPartitionKeyValue) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4778,6 +5372,7 @@ func (p *TPartitionKeyValue) String() string { return "" } return fmt.Sprintf("TPartitionKeyValue(%+v)", *p) + } func (p *TPartitionKeyValue) DeepEqual(ano *TPartitionKeyValue) bool { @@ -4821,7 +5416,6 @@ func NewTSessionState() *TSessionState { } func (p *TSessionState) InitDefault() { - *p = TSessionState{} } func (p *TSessionState) GetDatabase() (v string) { @@ -4879,10 +5473,8 @@ func (p *TSessionState) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDatabase = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -4890,10 +5482,8 @@ func (p *TSessionState) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -4901,17 +5491,14 @@ func (p *TSessionState) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetConnectionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4953,29 +5540,36 @@ RequiredFieldNotSetError: } func (p *TSessionState) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Database = v + _field = v } + p.Database = _field return nil } - func (p *TSessionState) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TSessionState) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ConnectionId = v + _field = v } + p.ConnectionId = _field return nil } @@ -4997,7 +5591,6 @@ func (p *TSessionState) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5072,6 +5665,7 @@ func (p *TSessionState) String() string { return "" } return fmt.Sprintf("TSessionState(%+v)", *p) + } func (p *TSessionState) DeepEqual(ano *TSessionState) bool { @@ -5125,7 +5719,6 @@ func NewTClientRequest() *TClientRequest { } func (p *TClientRequest) InitDefault() { - *p = TClientRequest{} } func (p *TClientRequest) GetStmt() (v string) { @@ -5201,10 +5794,8 @@ func (p *TClientRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStmt = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -5212,10 +5803,8 @@ func (p *TClientRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetQueryOptions = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -5223,17 +5812,14 @@ func (p *TClientRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSessionState = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5275,27 +5861,30 @@ RequiredFieldNotSetError: } func (p *TClientRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Stmt = v + _field = v } + p.Stmt = _field return nil } - func (p *TClientRequest) ReadField2(iprot thrift.TProtocol) error { - p.QueryOptions = palointernalservice.NewTQueryOptions() - if err := p.QueryOptions.Read(iprot); err != nil { + _field := palointernalservice.NewTQueryOptions() + if err := _field.Read(iprot); err != nil { return err } + p.QueryOptions = _field return nil } - func (p *TClientRequest) ReadField3(iprot thrift.TProtocol) error { - p.SessionState = NewTSessionState() - if err := p.SessionState.Read(iprot); err != nil { + _field := NewTSessionState() + if err := _field.Read(iprot); err != nil { return err } + p.SessionState = _field return nil } @@ -5317,7 +5906,6 @@ func (p *TClientRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5392,6 +5980,7 @@ func (p *TClientRequest) String() string { return "" } return fmt.Sprintf("TClientRequest(%+v)", *p) + } func (p *TClientRequest) DeepEqual(ano *TClientRequest) bool { @@ -5443,7 +6032,6 @@ func NewTExplainParams() *TExplainParams { } func (p *TExplainParams) InitDefault() { - *p = TExplainParams{} } func (p *TExplainParams) GetExplain() (v string) { @@ -5483,17 +6071,14 @@ func (p *TExplainParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetExplain = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5525,11 +6110,14 @@ RequiredFieldNotSetError: } func (p *TExplainParams) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Explain = v + _field = v } + p.Explain = _field return nil } @@ -5543,7 +6131,6 @@ func (p *TExplainParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5584,6 +6171,7 @@ func (p *TExplainParams) String() string { return "" } return fmt.Sprintf("TExplainParams(%+v)", *p) + } func (p *TExplainParams) DeepEqual(ano *TExplainParams) bool { @@ -5617,7 +6205,6 @@ func NewTSetVar() *TSetVar { } func (p *TSetVar) InitDefault() { - *p = TSetVar{} } func (p *TSetVar) GetType() (v TSetType) { @@ -5684,10 +6271,8 @@ func (p *TSetVar) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -5695,10 +6280,8 @@ func (p *TSetVar) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVariable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -5706,17 +6289,14 @@ func (p *TSetVar) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5758,28 +6338,33 @@ RequiredFieldNotSetError: } func (p *TSetVar) ReadField1(iprot thrift.TProtocol) error { + + var _field TSetType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TSetType(v) + _field = TSetType(v) } + p.Type = _field return nil } - func (p *TSetVar) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Variable = v + _field = v } + p.Variable = _field return nil } - func (p *TSetVar) ReadField3(iprot thrift.TProtocol) error { - p.Value = exprs.NewTExpr() - if err := p.Value.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.Value = _field return nil } @@ -5801,7 +6386,6 @@ func (p *TSetVar) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5876,6 +6460,7 @@ func (p *TSetVar) String() string { return "" } return fmt.Sprintf("TSetVar(%+v)", *p) + } func (p *TSetVar) DeepEqual(ano *TSetVar) bool { @@ -5927,7 +6512,6 @@ func NewTSetParams() *TSetParams { } func (p *TSetParams) InitDefault() { - *p = TSetParams{} } func (p *TSetParams) GetSetVars() (v []*TSetVar) { @@ -5967,17 +6551,14 @@ func (p *TSetParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSetVars = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6013,18 +6594,22 @@ func (p *TSetParams) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.SetVars = make([]*TSetVar, 0, size) + _field := make([]*TSetVar, 0, size) + values := make([]TSetVar, size) for i := 0; i < size; i++ { - _elem := NewTSetVar() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SetVars = append(p.SetVars, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SetVars = _field return nil } @@ -6038,7 +6623,6 @@ func (p *TSetParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6087,6 +6671,7 @@ func (p *TSetParams) String() string { return "" } return fmt.Sprintf("TSetParams(%+v)", *p) + } func (p *TSetParams) DeepEqual(ano *TSetParams) bool { @@ -6125,7 +6710,6 @@ func NewTKillParams() *TKillParams { } func (p *TKillParams) InitDefault() { - *p = TKillParams{} } func (p *TKillParams) GetIsKillConnection() (v bool) { @@ -6174,10 +6758,8 @@ func (p *TKillParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsKillConnection = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -6185,17 +6767,14 @@ func (p *TKillParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetConnectionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6232,20 +6811,25 @@ RequiredFieldNotSetError: } func (p *TKillParams) ReadField1(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsKillConnection = v + _field = v } + p.IsKillConnection = _field return nil } - func (p *TKillParams) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ConnectionId = v + _field = v } + p.ConnectionId = _field return nil } @@ -6263,7 +6847,6 @@ func (p *TKillParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6321,6 +6904,7 @@ func (p *TKillParams) String() string { return "" } return fmt.Sprintf("TKillParams(%+v)", *p) + } func (p *TKillParams) DeepEqual(ano *TKillParams) bool { @@ -6361,7 +6945,6 @@ func NewTCommonDdlParams() *TCommonDdlParams { } func (p *TCommonDdlParams) InitDefault() { - *p = TCommonDdlParams{} } var fieldIDToName_TCommonDdlParams = map[int16]string{} @@ -6386,7 +6969,6 @@ func (p *TCommonDdlParams) Read(iprot thrift.TProtocol) (err error) { if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6414,7 +6996,6 @@ func (p *TCommonDdlParams) Write(oprot thrift.TProtocol) (err error) { goto WriteStructBeginError } if p != nil { - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6436,6 +7017,7 @@ func (p *TCommonDdlParams) String() string { return "" } return fmt.Sprintf("TCommonDdlParams(%+v)", *p) + } func (p *TCommonDdlParams) DeepEqual(ano *TCommonDdlParams) bool { @@ -6456,7 +7038,6 @@ func NewTUseDbParams() *TUseDbParams { } func (p *TUseDbParams) InitDefault() { - *p = TUseDbParams{} } func (p *TUseDbParams) GetDb() (v string) { @@ -6496,17 +7077,14 @@ func (p *TUseDbParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6538,11 +7116,14 @@ RequiredFieldNotSetError: } func (p *TUseDbParams) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = v } + p.Db = _field return nil } @@ -6556,7 +7137,6 @@ func (p *TUseDbParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6597,6 +7177,7 @@ func (p *TUseDbParams) String() string { return "" } return fmt.Sprintf("TUseDbParams(%+v)", *p) + } func (p *TUseDbParams) DeepEqual(ano *TUseDbParams) bool { @@ -6628,7 +7209,6 @@ func NewTResultSetMetadata() *TResultSetMetadata { } func (p *TResultSetMetadata) InitDefault() { - *p = TResultSetMetadata{} } func (p *TResultSetMetadata) GetColumnDescs() (v []*TColumnDesc) { @@ -6668,17 +7248,14 @@ func (p *TResultSetMetadata) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnDescs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6714,18 +7291,22 @@ func (p *TResultSetMetadata) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.ColumnDescs = make([]*TColumnDesc, 0, size) + _field := make([]*TColumnDesc, 0, size) + values := make([]TColumnDesc, size) for i := 0; i < size; i++ { - _elem := NewTColumnDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnDescs = append(p.ColumnDescs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnDescs = _field return nil } @@ -6739,7 +7320,6 @@ func (p *TResultSetMetadata) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6788,6 +7368,7 @@ func (p *TResultSetMetadata) String() string { return "" } return fmt.Sprintf("TResultSetMetadata(%+v)", *p) + } func (p *TResultSetMetadata) DeepEqual(ano *TResultSetMetadata) bool { @@ -6832,7 +7413,6 @@ func NewTQueryExecRequest() *TQueryExecRequest { } func (p *TQueryExecRequest) InitDefault() { - *p = TQueryExecRequest{} } var TQueryExecRequest_DescTbl_DEFAULT *descriptors.TDescriptorTable @@ -6983,10 +7563,8 @@ func (p *TQueryExecRequest) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -6994,40 +7572,32 @@ func (p *TQueryExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFragments = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { @@ -7035,10 +7605,8 @@ func (p *TQueryExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetQueryGlobals = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I32 { @@ -7046,27 +7614,22 @@ func (p *TQueryExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStmtType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7108,40 +7671,44 @@ RequiredFieldNotSetError: } func (p *TQueryExecRequest) ReadField1(iprot thrift.TProtocol) error { - p.DescTbl = descriptors.NewTDescriptorTable() - if err := p.DescTbl.Read(iprot); err != nil { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { return err } + p.DescTbl = _field return nil } - func (p *TQueryExecRequest) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Fragments = make([]*planner.TPlanFragment, 0, size) + _field := make([]*planner.TPlanFragment, 0, size) + values := make([]planner.TPlanFragment, size) for i := 0; i < size; i++ { - _elem := planner.NewTPlanFragment() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Fragments = append(p.Fragments, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Fragments = _field return nil } - func (p *TQueryExecRequest) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DestFragmentIdx = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -7149,20 +7716,20 @@ func (p *TQueryExecRequest) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.DestFragmentIdx = append(p.DestFragmentIdx, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DestFragmentIdx = _field return nil } - func (p *TQueryExecRequest) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.PerNodeScanRanges = make(map[types.TPlanNodeId][]*planner.TScanRangeLocations, size) + _field := make(map[types.TPlanNodeId][]*planner.TScanRangeLocations, size) for i := 0; i < size; i++ { var _key types.TPlanNodeId if v, err := iprot.ReadI32(); err != nil { @@ -7170,14 +7737,16 @@ func (p *TQueryExecRequest) ReadField4(iprot thrift.TProtocol) error { } else { _key = v } - _, size, err := iprot.ReadListBegin() if err != nil { return err } _val := make([]*planner.TScanRangeLocations, 0, size) + values := make([]planner.TScanRangeLocations, size) for i := 0; i < size; i++ { - _elem := planner.NewTScanRangeLocations() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } @@ -7188,45 +7757,50 @@ func (p *TQueryExecRequest) ReadField4(iprot thrift.TProtocol) error { return err } - p.PerNodeScanRanges[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.PerNodeScanRanges = _field return nil } - func (p *TQueryExecRequest) ReadField5(iprot thrift.TProtocol) error { - p.ResultSetMetadata = NewTResultSetMetadata() - if err := p.ResultSetMetadata.Read(iprot); err != nil { + _field := NewTResultSetMetadata() + if err := _field.Read(iprot); err != nil { return err } + p.ResultSetMetadata = _field return nil } - func (p *TQueryExecRequest) ReadField7(iprot thrift.TProtocol) error { - p.QueryGlobals = palointernalservice.NewTQueryGlobals() - if err := p.QueryGlobals.Read(iprot); err != nil { + _field := palointernalservice.NewTQueryGlobals() + if err := _field.Read(iprot); err != nil { return err } + p.QueryGlobals = _field return nil } - func (p *TQueryExecRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field types.TStmtType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StmtType = types.TStmtType(v) + _field = types.TStmtType(v) } + p.StmtType = _field return nil } - func (p *TQueryExecRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsBlockQuery = &v + _field = &v } + p.IsBlockQuery = _field return nil } @@ -7268,7 +7842,6 @@ func (p *TQueryExecRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7367,11 +7940,9 @@ func (p *TQueryExecRequest) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.PerNodeScanRanges { - if err := oprot.WriteI32(k); err != nil { return err } - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { return err } @@ -7475,6 +8046,7 @@ func (p *TQueryExecRequest) String() string { return "" } return fmt.Sprintf("TQueryExecRequest(%+v)", *p) + } func (p *TQueryExecRequest) DeepEqual(ano *TQueryExecRequest) bool { @@ -7610,7 +8182,6 @@ func NewTDdlExecRequest() *TDdlExecRequest { } func (p *TDdlExecRequest) InitDefault() { - *p = TDdlExecRequest{} } func (p *TDdlExecRequest) GetDdlType() (v TDdlType) { @@ -7735,67 +8306,54 @@ func (p *TDdlExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDdlType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7827,51 +8385,54 @@ RequiredFieldNotSetError: } func (p *TDdlExecRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field TDdlType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DdlType = TDdlType(v) + _field = TDdlType(v) } + p.DdlType = _field return nil } - func (p *TDdlExecRequest) ReadField2(iprot thrift.TProtocol) error { - p.UseDbParams = NewTUseDbParams() - if err := p.UseDbParams.Read(iprot); err != nil { + _field := NewTUseDbParams() + if err := _field.Read(iprot); err != nil { return err } + p.UseDbParams = _field return nil } - func (p *TDdlExecRequest) ReadField3(iprot thrift.TProtocol) error { - p.DescribeTableParams = NewTDescribeTableParams() - if err := p.DescribeTableParams.Read(iprot); err != nil { + _field := NewTDescribeTableParams() + if err := _field.Read(iprot); err != nil { return err } + p.DescribeTableParams = _field return nil } - func (p *TDdlExecRequest) ReadField10(iprot thrift.TProtocol) error { - p.ExplainParams = NewTExplainParams() - if err := p.ExplainParams.Read(iprot); err != nil { + _field := NewTExplainParams() + if err := _field.Read(iprot); err != nil { return err } + p.ExplainParams = _field return nil } - func (p *TDdlExecRequest) ReadField11(iprot thrift.TProtocol) error { - p.SetParams = NewTSetParams() - if err := p.SetParams.Read(iprot); err != nil { + _field := NewTSetParams() + if err := _field.Read(iprot); err != nil { return err } + p.SetParams = _field return nil } - func (p *TDdlExecRequest) ReadField12(iprot thrift.TProtocol) error { - p.KillParams = NewTKillParams() - if err := p.KillParams.Read(iprot); err != nil { + _field := NewTKillParams() + if err := _field.Read(iprot); err != nil { return err } + p.KillParams = _field return nil } @@ -7905,7 +8466,6 @@ func (p *TDdlExecRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8041,6 +8601,7 @@ func (p *TDdlExecRequest) String() string { return "" } return fmt.Sprintf("TDdlExecRequest(%+v)", *p) + } func (p *TDdlExecRequest) DeepEqual(ano *TDdlExecRequest) bool { @@ -8122,7 +8683,6 @@ func NewTExplainResult_() *TExplainResult_ { } func (p *TExplainResult_) InitDefault() { - *p = TExplainResult_{} } func (p *TExplainResult_) GetResults() (v []*data.TResultRow) { @@ -8162,17 +8722,14 @@ func (p *TExplainResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResults = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8208,18 +8765,22 @@ func (p *TExplainResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Results = make([]*data.TResultRow, 0, size) + _field := make([]*data.TResultRow, 0, size) + values := make([]data.TResultRow, size) for i := 0; i < size; i++ { - _elem := data.NewTResultRow() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Results = append(p.Results, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Results = _field return nil } @@ -8233,7 +8794,6 @@ func (p *TExplainResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8282,6 +8842,7 @@ func (p *TExplainResult_) String() string { return "" } return fmt.Sprintf("TExplainResult_(%+v)", *p) + } func (p *TExplainResult_) DeepEqual(ano *TExplainResult_) bool { @@ -8326,7 +8887,6 @@ func NewTExecRequest() *TExecRequest { } func (p *TExecRequest) InitDefault() { - *p = TExecRequest{} } func (p *TExecRequest) GetStmtType() (v types.TStmtType) { @@ -8487,20 +9047,16 @@ func (p *TExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStmtType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { @@ -8508,10 +9064,8 @@ func (p *TExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRequestId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { @@ -8519,57 +9073,46 @@ func (p *TExecRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetQueryOptions = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8611,68 +9154,73 @@ RequiredFieldNotSetError: } func (p *TExecRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TStmtType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StmtType = types.TStmtType(v) + _field = types.TStmtType(v) } + p.StmtType = _field return nil } - func (p *TExecRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SqlStmt = &v + _field = &v } + p.SqlStmt = _field return nil } - func (p *TExecRequest) ReadField3(iprot thrift.TProtocol) error { - p.RequestId = types.NewTUniqueId() - if err := p.RequestId.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.RequestId = _field return nil } - func (p *TExecRequest) ReadField4(iprot thrift.TProtocol) error { - p.QueryOptions = palointernalservice.NewTQueryOptions() - if err := p.QueryOptions.Read(iprot); err != nil { + _field := palointernalservice.NewTQueryOptions() + if err := _field.Read(iprot); err != nil { return err } + p.QueryOptions = _field return nil } - func (p *TExecRequest) ReadField5(iprot thrift.TProtocol) error { - p.QueryExecRequest = NewTQueryExecRequest() - if err := p.QueryExecRequest.Read(iprot); err != nil { + _field := NewTQueryExecRequest() + if err := _field.Read(iprot); err != nil { return err } + p.QueryExecRequest = _field return nil } - func (p *TExecRequest) ReadField6(iprot thrift.TProtocol) error { - p.DdlExecRequest = NewTDdlExecRequest() - if err := p.DdlExecRequest.Read(iprot); err != nil { + _field := NewTDdlExecRequest() + if err := _field.Read(iprot); err != nil { return err } + p.DdlExecRequest = _field return nil } - func (p *TExecRequest) ReadField7(iprot thrift.TProtocol) error { - p.ResultSetMetadata = NewTResultSetMetadata() - if err := p.ResultSetMetadata.Read(iprot); err != nil { + _field := NewTResultSetMetadata() + if err := _field.Read(iprot); err != nil { return err } + p.ResultSetMetadata = _field return nil } - func (p *TExecRequest) ReadField8(iprot thrift.TProtocol) error { - p.ExplainResult_ = NewTExplainResult_() - if err := p.ExplainResult_.Read(iprot); err != nil { + _field := NewTExplainResult_() + if err := _field.Read(iprot); err != nil { return err } + p.ExplainResult_ = _field return nil } @@ -8714,7 +9262,6 @@ func (p *TExecRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8884,6 +9431,7 @@ func (p *TExecRequest) String() string { return "" } return fmt.Sprintf("TExecRequest(%+v)", *p) + } func (p *TExecRequest) DeepEqual(ano *TExecRequest) bool { @@ -8995,7 +9543,6 @@ func NewTGetDbsParams() *TGetDbsParams { } func (p *TGetDbsParams) InitDefault() { - *p = TGetDbsParams{} } var TGetDbsParams_Pattern_DEFAULT string @@ -9127,67 +9674,54 @@ func (p *TGetDbsParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9213,55 +9747,66 @@ ReadStructEndError: } func (p *TGetDbsParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Pattern = &v + _field = &v } + p.Pattern = _field return nil } - func (p *TGetDbsParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TGetDbsParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.UserIp = _field return nil } - func (p *TGetDbsParams) ReadField4(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } - func (p *TGetDbsParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Catalog = &v + _field = &v } + p.Catalog = _field return nil } - func (p *TGetDbsParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.GetNullCatalog = &v + _field = &v } + p.GetNullCatalog = _field return nil } @@ -9295,7 +9840,6 @@ func (p *TGetDbsParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9433,6 +9977,7 @@ func (p *TGetDbsParams) String() string { return "" } return fmt.Sprintf("TGetDbsParams(%+v)", *p) + } func (p *TGetDbsParams) DeepEqual(ano *TGetDbsParams) bool { @@ -9542,7 +10087,6 @@ func NewTGetDbsResult_() *TGetDbsResult_ { } func (p *TGetDbsResult_) InitDefault() { - *p = TGetDbsResult_{} } var TGetDbsResult__Dbs_DEFAULT []string @@ -9640,47 +10184,38 @@ func (p *TGetDbsResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9710,8 +10245,9 @@ func (p *TGetDbsResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Dbs = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -9719,21 +10255,22 @@ func (p *TGetDbsResult_) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.Dbs = append(p.Dbs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Dbs = _field return nil } - func (p *TGetDbsResult_) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Catalogs = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -9741,21 +10278,22 @@ func (p *TGetDbsResult_) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.Catalogs = append(p.Catalogs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Catalogs = _field return nil } - func (p *TGetDbsResult_) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DbIds = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -9763,21 +10301,22 @@ func (p *TGetDbsResult_) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.DbIds = append(p.DbIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DbIds = _field return nil } - func (p *TGetDbsResult_) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.CatalogIds = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -9785,11 +10324,12 @@ func (p *TGetDbsResult_) ReadField4(iprot thrift.TProtocol) error { _elem = v } - p.CatalogIds = append(p.CatalogIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.CatalogIds = _field return nil } @@ -9815,7 +10355,6 @@ func (p *TGetDbsResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9947,6 +10486,7 @@ func (p *TGetDbsResult_) String() string { return "" } return fmt.Sprintf("TGetDbsResult_(%+v)", *p) + } func (p *TGetDbsResult_) DeepEqual(ano *TGetDbsResult_) bool { @@ -10038,7 +10578,6 @@ func NewTGetTablesParams() *TGetTablesParams { } func (p *TGetTablesParams) InitDefault() { - *p = TGetTablesParams{} } var TGetTablesParams_Db_DEFAULT string @@ -10187,77 +10726,62 @@ func (p *TGetTablesParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10283,64 +10807,77 @@ ReadStructEndError: } func (p *TGetTablesParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Db = _field return nil } - func (p *TGetTablesParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Pattern = &v + _field = &v } + p.Pattern = _field return nil } - func (p *TGetTablesParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TGetTablesParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.UserIp = _field return nil } - func (p *TGetTablesParams) ReadField5(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } - func (p *TGetTablesParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Type = &v + _field = &v } + p.Type = _field return nil } - func (p *TGetTablesParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Catalog = &v + _field = &v } + p.Catalog = _field return nil } @@ -10378,7 +10915,6 @@ func (p *TGetTablesParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10535,6 +11071,7 @@ func (p *TGetTablesParams) String() string { return "" } return fmt.Sprintf("TGetTablesParams(%+v)", *p) + } func (p *TGetTablesParams) DeepEqual(ano *TGetTablesParams) bool { @@ -10668,7 +11205,6 @@ func NewTTableStatus() *TTableStatus { } func (p *TTableStatus) InitDefault() { - *p = TTableStatus{} } func (p *TTableStatus) GetName() (v string) { @@ -10896,10 +11432,8 @@ func (p *TTableStatus) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -10907,10 +11441,8 @@ func (p *TTableStatus) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -10918,117 +11450,94 @@ func (p *TTableStatus) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetComment = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRING { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I64 { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11070,119 +11579,146 @@ RequiredFieldNotSetError: } func (p *TTableStatus) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } - func (p *TTableStatus) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Type = v + _field = v } + p.Type = _field return nil } - func (p *TTableStatus) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Comment = v + _field = v } + p.Comment = _field return nil } - func (p *TTableStatus) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Engine = &v + _field = &v } + p.Engine = _field return nil } - func (p *TTableStatus) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LastCheckTime = &v + _field = &v } + p.LastCheckTime = _field return nil } - func (p *TTableStatus) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CreateTime = &v + _field = &v } + p.CreateTime = _field return nil } - func (p *TTableStatus) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DdlSql = &v + _field = &v } + p.DdlSql = _field return nil } - func (p *TTableStatus) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.UpdateTime = &v + _field = &v } + p.UpdateTime = _field return nil } - func (p *TTableStatus) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CheckTime = &v + _field = &v } + p.CheckTime = _field return nil } - func (p *TTableStatus) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Collation = &v + _field = &v } + p.Collation = _field return nil } - func (p *TTableStatus) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Rows = &v + _field = &v } + p.Rows = _field return nil } - func (p *TTableStatus) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AvgRowLength = &v + _field = &v } + p.AvgRowLength = _field return nil } - func (p *TTableStatus) ReadField13(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DataLength = &v + _field = &v } + p.DataLength = _field return nil } @@ -11244,7 +11780,6 @@ func (p *TTableStatus) Write(oprot thrift.TProtocol) (err error) { fieldId = 13 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11509,6 +12044,7 @@ func (p *TTableStatus) String() string { return "" } return fmt.Sprintf("TTableStatus(%+v)", *p) + } func (p *TTableStatus) DeepEqual(ano *TTableStatus) bool { @@ -11710,7 +12246,6 @@ func NewTListTableStatusResult_() *TListTableStatusResult_ { } func (p *TListTableStatusResult_) InitDefault() { - *p = TListTableStatusResult_{} } func (p *TListTableStatusResult_) GetTables() (v []*TTableStatus) { @@ -11750,17 +12285,14 @@ func (p *TListTableStatusResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTables = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11796,18 +12328,22 @@ func (p *TListTableStatusResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Tables = make([]*TTableStatus, 0, size) + _field := make([]*TTableStatus, 0, size) + values := make([]TTableStatus, size) for i := 0; i < size; i++ { - _elem := NewTTableStatus() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Tables = append(p.Tables, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Tables = _field return nil } @@ -11821,7 +12357,6 @@ func (p *TListTableStatusResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11870,6 +12405,7 @@ func (p *TListTableStatusResult_) String() string { return "" } return fmt.Sprintf("TListTableStatusResult_(%+v)", *p) + } func (p *TListTableStatusResult_) DeepEqual(ano *TListTableStatusResult_) bool { @@ -11908,7 +12444,6 @@ func NewTTableMetadataNameIds() *TTableMetadataNameIds { } func (p *TTableMetadataNameIds) InitDefault() { - *p = TTableMetadataNameIds{} } var TTableMetadataNameIds_Name_DEFAULT string @@ -11972,27 +12507,22 @@ func (p *TTableMetadataNameIds) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12018,20 +12548,25 @@ ReadStructEndError: } func (p *TTableMetadataNameIds) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = &v + _field = &v } + p.Name = _field return nil } - func (p *TTableMetadataNameIds) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = &v + _field = &v } + p.Id = _field return nil } @@ -12049,7 +12584,6 @@ func (p *TTableMetadataNameIds) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12111,6 +12645,7 @@ func (p *TTableMetadataNameIds) String() string { return "" } return fmt.Sprintf("TTableMetadataNameIds(%+v)", *p) + } func (p *TTableMetadataNameIds) DeepEqual(ano *TTableMetadataNameIds) bool { @@ -12162,7 +12697,6 @@ func NewTListTableMetadataNameIdsResult_() *TListTableMetadataNameIdsResult_ { } func (p *TListTableMetadataNameIdsResult_) InitDefault() { - *p = TListTableMetadataNameIdsResult_{} } var TListTableMetadataNameIdsResult__Tables_DEFAULT []*TTableMetadataNameIds @@ -12209,17 +12743,14 @@ func (p *TListTableMetadataNameIdsResult_) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12249,18 +12780,22 @@ func (p *TListTableMetadataNameIdsResult_) ReadField1(iprot thrift.TProtocol) er if err != nil { return err } - p.Tables = make([]*TTableMetadataNameIds, 0, size) + _field := make([]*TTableMetadataNameIds, 0, size) + values := make([]TTableMetadataNameIds, size) for i := 0; i < size; i++ { - _elem := NewTTableMetadataNameIds() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Tables = append(p.Tables, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Tables = _field return nil } @@ -12274,7 +12809,6 @@ func (p *TListTableMetadataNameIdsResult_) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12325,6 +12859,7 @@ func (p *TListTableMetadataNameIdsResult_) String() string { return "" } return fmt.Sprintf("TListTableMetadataNameIdsResult_(%+v)", *p) + } func (p *TListTableMetadataNameIdsResult_) DeepEqual(ano *TListTableMetadataNameIdsResult_) bool { @@ -12362,7 +12897,6 @@ func NewTGetTablesResult_() *TGetTablesResult_ { } func (p *TGetTablesResult_) InitDefault() { - *p = TGetTablesResult_{} } func (p *TGetTablesResult_) GetTables() (v []string) { @@ -12400,17 +12934,14 @@ func (p *TGetTablesResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12440,8 +12971,9 @@ func (p *TGetTablesResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Tables = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -12449,11 +12981,12 @@ func (p *TGetTablesResult_) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.Tables = append(p.Tables, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Tables = _field return nil } @@ -12467,7 +13000,6 @@ func (p *TGetTablesResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12516,6 +13048,7 @@ func (p *TGetTablesResult_) String() string { return "" } return fmt.Sprintf("TGetTablesResult_(%+v)", *p) + } func (p *TGetTablesResult_) DeepEqual(ano *TGetTablesResult_) bool { @@ -12557,7 +13090,6 @@ func NewTPrivilegeStatus() *TPrivilegeStatus { } func (p *TPrivilegeStatus) InitDefault() { - *p = TPrivilegeStatus{} } var TPrivilegeStatus_TableName_DEFAULT string @@ -12672,57 +13204,46 @@ func (p *TPrivilegeStatus) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12748,47 +13269,58 @@ ReadStructEndError: } func (p *TPrivilegeStatus) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } - func (p *TPrivilegeStatus) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.PrivilegeType = &v + _field = &v } + p.PrivilegeType = _field return nil } - func (p *TPrivilegeStatus) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Grantee = &v + _field = &v } + p.Grantee = _field return nil } - func (p *TPrivilegeStatus) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Schema = &v + _field = &v } + p.Schema = _field return nil } - func (p *TPrivilegeStatus) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.IsGrantable = &v + _field = &v } + p.IsGrantable = _field return nil } @@ -12818,7 +13350,6 @@ func (p *TPrivilegeStatus) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12937,6 +13468,7 @@ func (p *TPrivilegeStatus) String() string { return "" } return fmt.Sprintf("TPrivilegeStatus(%+v)", *p) + } func (p *TPrivilegeStatus) DeepEqual(ano *TPrivilegeStatus) bool { @@ -13033,7 +13565,6 @@ func NewTListPrivilegesResult_() *TListPrivilegesResult_ { } func (p *TListPrivilegesResult_) InitDefault() { - *p = TListPrivilegesResult_{} } func (p *TListPrivilegesResult_) GetPrivileges() (v []*TPrivilegeStatus) { @@ -13073,17 +13604,14 @@ func (p *TListPrivilegesResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPrivileges = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13119,18 +13647,22 @@ func (p *TListPrivilegesResult_) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Privileges = make([]*TPrivilegeStatus, 0, size) + _field := make([]*TPrivilegeStatus, 0, size) + values := make([]TPrivilegeStatus, size) for i := 0; i < size; i++ { - _elem := NewTPrivilegeStatus() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Privileges = append(p.Privileges, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Privileges = _field return nil } @@ -13144,7 +13676,6 @@ func (p *TListPrivilegesResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13193,6 +13724,7 @@ func (p *TListPrivilegesResult_) String() string { return "" } return fmt.Sprintf("TListPrivilegesResult_(%+v)", *p) + } func (p *TListPrivilegesResult_) DeepEqual(ano *TListPrivilegesResult_) bool { @@ -13230,7 +13762,6 @@ func NewTReportExecStatusResult_() *TReportExecStatusResult_ { } func (p *TReportExecStatusResult_) InitDefault() { - *p = TReportExecStatusResult_{} } var TReportExecStatusResult__Status_DEFAULT *status.TStatus @@ -13277,17 +13808,14 @@ func (p *TReportExecStatusResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13313,10 +13841,11 @@ ReadStructEndError: } func (p *TReportExecStatusResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } @@ -13330,7 +13859,6 @@ func (p *TReportExecStatusResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13373,6 +13901,7 @@ func (p *TReportExecStatusResult_) String() string { return "" } return fmt.Sprintf("TReportExecStatusResult_(%+v)", *p) + } func (p *TReportExecStatusResult_) DeepEqual(ano *TReportExecStatusResult_) bool { @@ -13399,6 +13928,7 @@ type TDetailedReportParams struct { FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,1,optional" frugal:"1,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` Profile *runtimeprofile.TRuntimeProfileTree `thrift:"profile,2,optional" frugal:"2,optional,runtimeprofile.TRuntimeProfileTree" json:"profile,omitempty"` LoadChannelProfile *runtimeprofile.TRuntimeProfileTree `thrift:"loadChannelProfile,3,optional" frugal:"3,optional,runtimeprofile.TRuntimeProfileTree" json:"loadChannelProfile,omitempty"` + IsFragmentLevel *bool `thrift:"is_fragment_level,4,optional" frugal:"4,optional,bool" json:"is_fragment_level,omitempty"` } func NewTDetailedReportParams() *TDetailedReportParams { @@ -13406,7 +13936,6 @@ func NewTDetailedReportParams() *TDetailedReportParams { } func (p *TDetailedReportParams) InitDefault() { - *p = TDetailedReportParams{} } var TDetailedReportParams_FragmentInstanceId_DEFAULT *types.TUniqueId @@ -13435,6 +13964,15 @@ func (p *TDetailedReportParams) GetLoadChannelProfile() (v *runtimeprofile.TRunt } return p.LoadChannelProfile } + +var TDetailedReportParams_IsFragmentLevel_DEFAULT bool + +func (p *TDetailedReportParams) GetIsFragmentLevel() (v bool) { + if !p.IsSetIsFragmentLevel() { + return TDetailedReportParams_IsFragmentLevel_DEFAULT + } + return *p.IsFragmentLevel +} func (p *TDetailedReportParams) SetFragmentInstanceId(val *types.TUniqueId) { p.FragmentInstanceId = val } @@ -13444,11 +13982,15 @@ func (p *TDetailedReportParams) SetProfile(val *runtimeprofile.TRuntimeProfileTr func (p *TDetailedReportParams) SetLoadChannelProfile(val *runtimeprofile.TRuntimeProfileTree) { p.LoadChannelProfile = val } +func (p *TDetailedReportParams) SetIsFragmentLevel(val *bool) { + p.IsFragmentLevel = val +} var fieldIDToName_TDetailedReportParams = map[int16]string{ 1: "fragment_instance_id", 2: "profile", 3: "loadChannelProfile", + 4: "is_fragment_level", } func (p *TDetailedReportParams) IsSetFragmentInstanceId() bool { @@ -13463,6 +14005,10 @@ func (p *TDetailedReportParams) IsSetLoadChannelProfile() bool { return p.LoadChannelProfile != nil } +func (p *TDetailedReportParams) IsSetIsFragmentLevel() bool { + return p.IsFragmentLevel != nil +} + func (p *TDetailedReportParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -13487,37 +14033,38 @@ func (p *TDetailedReportParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13543,26 +14090,38 @@ ReadStructEndError: } func (p *TDetailedReportParams) ReadField1(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.FragmentInstanceId = _field return nil } - func (p *TDetailedReportParams) ReadField2(iprot thrift.TProtocol) error { - p.Profile = runtimeprofile.NewTRuntimeProfileTree() - if err := p.Profile.Read(iprot); err != nil { + _field := runtimeprofile.NewTRuntimeProfileTree() + if err := _field.Read(iprot); err != nil { return err } + p.Profile = _field return nil } - func (p *TDetailedReportParams) ReadField3(iprot thrift.TProtocol) error { - p.LoadChannelProfile = runtimeprofile.NewTRuntimeProfileTree() - if err := p.LoadChannelProfile.Read(iprot); err != nil { + _field := runtimeprofile.NewTRuntimeProfileTree() + if err := _field.Read(iprot); err != nil { return err } + p.LoadChannelProfile = _field + return nil +} +func (p *TDetailedReportParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsFragmentLevel = _field return nil } @@ -13584,7 +14143,10 @@ func (p *TDetailedReportParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13660,11 +14222,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TDetailedReportParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetIsFragmentLevel() { + if err = oprot.WriteFieldBegin("is_fragment_level", thrift.BOOL, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsFragmentLevel); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + func (p *TDetailedReportParams) String() string { if p == nil { return "" } return fmt.Sprintf("TDetailedReportParams(%+v)", *p) + } func (p *TDetailedReportParams) DeepEqual(ano *TDetailedReportParams) bool { @@ -13682,6 +14264,9 @@ func (p *TDetailedReportParams) DeepEqual(ano *TDetailedReportParams) bool { if !p.Field3DeepEqual(ano.LoadChannelProfile) { return false } + if !p.Field4DeepEqual(ano.IsFragmentLevel) { + return false + } return true } @@ -13706,413 +14291,234 @@ func (p *TDetailedReportParams) Field3DeepEqual(src *runtimeprofile.TRuntimeProf } return true } +func (p *TDetailedReportParams) Field4DeepEqual(src *bool) bool { -type TReportExecStatusParams struct { - ProtocolVersion FrontendServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocol_version"` - QueryId *types.TUniqueId `thrift:"query_id,2,optional" frugal:"2,optional,types.TUniqueId" json:"query_id,omitempty"` - BackendNum *int32 `thrift:"backend_num,3,optional" frugal:"3,optional,i32" json:"backend_num,omitempty"` - FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,4,optional" frugal:"4,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` - Status *status.TStatus `thrift:"status,5,optional" frugal:"5,optional,status.TStatus" json:"status,omitempty"` - Done *bool `thrift:"done,6,optional" frugal:"6,optional,bool" json:"done,omitempty"` - Profile *runtimeprofile.TRuntimeProfileTree `thrift:"profile,7,optional" frugal:"7,optional,runtimeprofile.TRuntimeProfileTree" json:"profile,omitempty"` - ErrorLog []string `thrift:"error_log,9,optional" frugal:"9,optional,list" json:"error_log,omitempty"` - DeltaUrls []string `thrift:"delta_urls,10,optional" frugal:"10,optional,list" json:"delta_urls,omitempty"` - LoadCounters map[string]string `thrift:"load_counters,11,optional" frugal:"11,optional,map" json:"load_counters,omitempty"` - TrackingUrl *string `thrift:"tracking_url,12,optional" frugal:"12,optional,string" json:"tracking_url,omitempty"` - ExportFiles []string `thrift:"export_files,13,optional" frugal:"13,optional,list" json:"export_files,omitempty"` - CommitInfos []*types.TTabletCommitInfo `thrift:"commitInfos,14,optional" frugal:"14,optional,list" json:"commitInfos,omitempty"` - LoadedRows *int64 `thrift:"loaded_rows,15,optional" frugal:"15,optional,i64" json:"loaded_rows,omitempty"` - BackendId *int64 `thrift:"backend_id,16,optional" frugal:"16,optional,i64" json:"backend_id,omitempty"` - LoadedBytes *int64 `thrift:"loaded_bytes,17,optional" frugal:"17,optional,i64" json:"loaded_bytes,omitempty"` - ErrorTabletInfos []*types.TErrorTabletInfo `thrift:"errorTabletInfos,18,optional" frugal:"18,optional,list" json:"errorTabletInfos,omitempty"` - FragmentId *int32 `thrift:"fragment_id,19,optional" frugal:"19,optional,i32" json:"fragment_id,omitempty"` - QueryType *palointernalservice.TQueryType `thrift:"query_type,20,optional" frugal:"20,optional,TQueryType" json:"query_type,omitempty"` - LoadChannelProfile *runtimeprofile.TRuntimeProfileTree `thrift:"loadChannelProfile,21,optional" frugal:"21,optional,runtimeprofile.TRuntimeProfileTree" json:"loadChannelProfile,omitempty"` - FinishedScanRanges *int32 `thrift:"finished_scan_ranges,22,optional" frugal:"22,optional,i32" json:"finished_scan_ranges,omitempty"` - DetailedReport []*TDetailedReportParams `thrift:"detailed_report,23,optional" frugal:"23,optional,list" json:"detailed_report,omitempty"` -} - -func NewTReportExecStatusParams() *TReportExecStatusParams { - return &TReportExecStatusParams{} -} - -func (p *TReportExecStatusParams) InitDefault() { - *p = TReportExecStatusParams{} -} - -func (p *TReportExecStatusParams) GetProtocolVersion() (v FrontendServiceVersion) { - return p.ProtocolVersion -} - -var TReportExecStatusParams_QueryId_DEFAULT *types.TUniqueId - -func (p *TReportExecStatusParams) GetQueryId() (v *types.TUniqueId) { - if !p.IsSetQueryId() { - return TReportExecStatusParams_QueryId_DEFAULT - } - return p.QueryId -} - -var TReportExecStatusParams_BackendNum_DEFAULT int32 - -func (p *TReportExecStatusParams) GetBackendNum() (v int32) { - if !p.IsSetBackendNum() { - return TReportExecStatusParams_BackendNum_DEFAULT - } - return *p.BackendNum -} - -var TReportExecStatusParams_FragmentInstanceId_DEFAULT *types.TUniqueId - -func (p *TReportExecStatusParams) GetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetFragmentInstanceId() { - return TReportExecStatusParams_FragmentInstanceId_DEFAULT - } - return p.FragmentInstanceId -} - -var TReportExecStatusParams_Status_DEFAULT *status.TStatus - -func (p *TReportExecStatusParams) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TReportExecStatusParams_Status_DEFAULT - } - return p.Status -} - -var TReportExecStatusParams_Done_DEFAULT bool - -func (p *TReportExecStatusParams) GetDone() (v bool) { - if !p.IsSetDone() { - return TReportExecStatusParams_Done_DEFAULT - } - return *p.Done -} - -var TReportExecStatusParams_Profile_DEFAULT *runtimeprofile.TRuntimeProfileTree - -func (p *TReportExecStatusParams) GetProfile() (v *runtimeprofile.TRuntimeProfileTree) { - if !p.IsSetProfile() { - return TReportExecStatusParams_Profile_DEFAULT + if p.IsFragmentLevel == src { + return true + } else if p.IsFragmentLevel == nil || src == nil { + return false } - return p.Profile -} - -var TReportExecStatusParams_ErrorLog_DEFAULT []string - -func (p *TReportExecStatusParams) GetErrorLog() (v []string) { - if !p.IsSetErrorLog() { - return TReportExecStatusParams_ErrorLog_DEFAULT + if *p.IsFragmentLevel != *src { + return false } - return p.ErrorLog + return true } -var TReportExecStatusParams_DeltaUrls_DEFAULT []string - -func (p *TReportExecStatusParams) GetDeltaUrls() (v []string) { - if !p.IsSetDeltaUrls() { - return TReportExecStatusParams_DeltaUrls_DEFAULT - } - return p.DeltaUrls +type TQueryStatistics struct { + ScanRows *int64 `thrift:"scan_rows,1,optional" frugal:"1,optional,i64" json:"scan_rows,omitempty"` + ScanBytes *int64 `thrift:"scan_bytes,2,optional" frugal:"2,optional,i64" json:"scan_bytes,omitempty"` + ReturnedRows *int64 `thrift:"returned_rows,3,optional" frugal:"3,optional,i64" json:"returned_rows,omitempty"` + CpuMs *int64 `thrift:"cpu_ms,4,optional" frugal:"4,optional,i64" json:"cpu_ms,omitempty"` + MaxPeakMemoryBytes *int64 `thrift:"max_peak_memory_bytes,5,optional" frugal:"5,optional,i64" json:"max_peak_memory_bytes,omitempty"` + CurrentUsedMemoryBytes *int64 `thrift:"current_used_memory_bytes,6,optional" frugal:"6,optional,i64" json:"current_used_memory_bytes,omitempty"` + WorkloadGroupId *int64 `thrift:"workload_group_id,7,optional" frugal:"7,optional,i64" json:"workload_group_id,omitempty"` + ShuffleSendBytes *int64 `thrift:"shuffle_send_bytes,8,optional" frugal:"8,optional,i64" json:"shuffle_send_bytes,omitempty"` + ShuffleSendRows *int64 `thrift:"shuffle_send_rows,9,optional" frugal:"9,optional,i64" json:"shuffle_send_rows,omitempty"` + ScanBytesFromLocalStorage *int64 `thrift:"scan_bytes_from_local_storage,10,optional" frugal:"10,optional,i64" json:"scan_bytes_from_local_storage,omitempty"` + ScanBytesFromRemoteStorage *int64 `thrift:"scan_bytes_from_remote_storage,11,optional" frugal:"11,optional,i64" json:"scan_bytes_from_remote_storage,omitempty"` } -var TReportExecStatusParams_LoadCounters_DEFAULT map[string]string - -func (p *TReportExecStatusParams) GetLoadCounters() (v map[string]string) { - if !p.IsSetLoadCounters() { - return TReportExecStatusParams_LoadCounters_DEFAULT - } - return p.LoadCounters +func NewTQueryStatistics() *TQueryStatistics { + return &TQueryStatistics{} } -var TReportExecStatusParams_TrackingUrl_DEFAULT string - -func (p *TReportExecStatusParams) GetTrackingUrl() (v string) { - if !p.IsSetTrackingUrl() { - return TReportExecStatusParams_TrackingUrl_DEFAULT - } - return *p.TrackingUrl +func (p *TQueryStatistics) InitDefault() { } -var TReportExecStatusParams_ExportFiles_DEFAULT []string +var TQueryStatistics_ScanRows_DEFAULT int64 -func (p *TReportExecStatusParams) GetExportFiles() (v []string) { - if !p.IsSetExportFiles() { - return TReportExecStatusParams_ExportFiles_DEFAULT +func (p *TQueryStatistics) GetScanRows() (v int64) { + if !p.IsSetScanRows() { + return TQueryStatistics_ScanRows_DEFAULT } - return p.ExportFiles + return *p.ScanRows } -var TReportExecStatusParams_CommitInfos_DEFAULT []*types.TTabletCommitInfo +var TQueryStatistics_ScanBytes_DEFAULT int64 -func (p *TReportExecStatusParams) GetCommitInfos() (v []*types.TTabletCommitInfo) { - if !p.IsSetCommitInfos() { - return TReportExecStatusParams_CommitInfos_DEFAULT +func (p *TQueryStatistics) GetScanBytes() (v int64) { + if !p.IsSetScanBytes() { + return TQueryStatistics_ScanBytes_DEFAULT } - return p.CommitInfos + return *p.ScanBytes } -var TReportExecStatusParams_LoadedRows_DEFAULT int64 +var TQueryStatistics_ReturnedRows_DEFAULT int64 -func (p *TReportExecStatusParams) GetLoadedRows() (v int64) { - if !p.IsSetLoadedRows() { - return TReportExecStatusParams_LoadedRows_DEFAULT +func (p *TQueryStatistics) GetReturnedRows() (v int64) { + if !p.IsSetReturnedRows() { + return TQueryStatistics_ReturnedRows_DEFAULT } - return *p.LoadedRows + return *p.ReturnedRows } -var TReportExecStatusParams_BackendId_DEFAULT int64 +var TQueryStatistics_CpuMs_DEFAULT int64 -func (p *TReportExecStatusParams) GetBackendId() (v int64) { - if !p.IsSetBackendId() { - return TReportExecStatusParams_BackendId_DEFAULT +func (p *TQueryStatistics) GetCpuMs() (v int64) { + if !p.IsSetCpuMs() { + return TQueryStatistics_CpuMs_DEFAULT } - return *p.BackendId + return *p.CpuMs } -var TReportExecStatusParams_LoadedBytes_DEFAULT int64 +var TQueryStatistics_MaxPeakMemoryBytes_DEFAULT int64 -func (p *TReportExecStatusParams) GetLoadedBytes() (v int64) { - if !p.IsSetLoadedBytes() { - return TReportExecStatusParams_LoadedBytes_DEFAULT +func (p *TQueryStatistics) GetMaxPeakMemoryBytes() (v int64) { + if !p.IsSetMaxPeakMemoryBytes() { + return TQueryStatistics_MaxPeakMemoryBytes_DEFAULT } - return *p.LoadedBytes + return *p.MaxPeakMemoryBytes } -var TReportExecStatusParams_ErrorTabletInfos_DEFAULT []*types.TErrorTabletInfo +var TQueryStatistics_CurrentUsedMemoryBytes_DEFAULT int64 -func (p *TReportExecStatusParams) GetErrorTabletInfos() (v []*types.TErrorTabletInfo) { - if !p.IsSetErrorTabletInfos() { - return TReportExecStatusParams_ErrorTabletInfos_DEFAULT +func (p *TQueryStatistics) GetCurrentUsedMemoryBytes() (v int64) { + if !p.IsSetCurrentUsedMemoryBytes() { + return TQueryStatistics_CurrentUsedMemoryBytes_DEFAULT } - return p.ErrorTabletInfos + return *p.CurrentUsedMemoryBytes } -var TReportExecStatusParams_FragmentId_DEFAULT int32 +var TQueryStatistics_WorkloadGroupId_DEFAULT int64 -func (p *TReportExecStatusParams) GetFragmentId() (v int32) { - if !p.IsSetFragmentId() { - return TReportExecStatusParams_FragmentId_DEFAULT +func (p *TQueryStatistics) GetWorkloadGroupId() (v int64) { + if !p.IsSetWorkloadGroupId() { + return TQueryStatistics_WorkloadGroupId_DEFAULT } - return *p.FragmentId + return *p.WorkloadGroupId } -var TReportExecStatusParams_QueryType_DEFAULT palointernalservice.TQueryType +var TQueryStatistics_ShuffleSendBytes_DEFAULT int64 -func (p *TReportExecStatusParams) GetQueryType() (v palointernalservice.TQueryType) { - if !p.IsSetQueryType() { - return TReportExecStatusParams_QueryType_DEFAULT +func (p *TQueryStatistics) GetShuffleSendBytes() (v int64) { + if !p.IsSetShuffleSendBytes() { + return TQueryStatistics_ShuffleSendBytes_DEFAULT } - return *p.QueryType + return *p.ShuffleSendBytes } -var TReportExecStatusParams_LoadChannelProfile_DEFAULT *runtimeprofile.TRuntimeProfileTree +var TQueryStatistics_ShuffleSendRows_DEFAULT int64 -func (p *TReportExecStatusParams) GetLoadChannelProfile() (v *runtimeprofile.TRuntimeProfileTree) { - if !p.IsSetLoadChannelProfile() { - return TReportExecStatusParams_LoadChannelProfile_DEFAULT +func (p *TQueryStatistics) GetShuffleSendRows() (v int64) { + if !p.IsSetShuffleSendRows() { + return TQueryStatistics_ShuffleSendRows_DEFAULT } - return p.LoadChannelProfile + return *p.ShuffleSendRows } -var TReportExecStatusParams_FinishedScanRanges_DEFAULT int32 +var TQueryStatistics_ScanBytesFromLocalStorage_DEFAULT int64 -func (p *TReportExecStatusParams) GetFinishedScanRanges() (v int32) { - if !p.IsSetFinishedScanRanges() { - return TReportExecStatusParams_FinishedScanRanges_DEFAULT +func (p *TQueryStatistics) GetScanBytesFromLocalStorage() (v int64) { + if !p.IsSetScanBytesFromLocalStorage() { + return TQueryStatistics_ScanBytesFromLocalStorage_DEFAULT } - return *p.FinishedScanRanges + return *p.ScanBytesFromLocalStorage } -var TReportExecStatusParams_DetailedReport_DEFAULT []*TDetailedReportParams +var TQueryStatistics_ScanBytesFromRemoteStorage_DEFAULT int64 -func (p *TReportExecStatusParams) GetDetailedReport() (v []*TDetailedReportParams) { - if !p.IsSetDetailedReport() { - return TReportExecStatusParams_DetailedReport_DEFAULT +func (p *TQueryStatistics) GetScanBytesFromRemoteStorage() (v int64) { + if !p.IsSetScanBytesFromRemoteStorage() { + return TQueryStatistics_ScanBytesFromRemoteStorage_DEFAULT } - return p.DetailedReport -} -func (p *TReportExecStatusParams) SetProtocolVersion(val FrontendServiceVersion) { - p.ProtocolVersion = val -} -func (p *TReportExecStatusParams) SetQueryId(val *types.TUniqueId) { - p.QueryId = val -} -func (p *TReportExecStatusParams) SetBackendNum(val *int32) { - p.BackendNum = val + return *p.ScanBytesFromRemoteStorage } -func (p *TReportExecStatusParams) SetFragmentInstanceId(val *types.TUniqueId) { - p.FragmentInstanceId = val -} -func (p *TReportExecStatusParams) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TReportExecStatusParams) SetDone(val *bool) { - p.Done = val -} -func (p *TReportExecStatusParams) SetProfile(val *runtimeprofile.TRuntimeProfileTree) { - p.Profile = val -} -func (p *TReportExecStatusParams) SetErrorLog(val []string) { - p.ErrorLog = val -} -func (p *TReportExecStatusParams) SetDeltaUrls(val []string) { - p.DeltaUrls = val -} -func (p *TReportExecStatusParams) SetLoadCounters(val map[string]string) { - p.LoadCounters = val -} -func (p *TReportExecStatusParams) SetTrackingUrl(val *string) { - p.TrackingUrl = val -} -func (p *TReportExecStatusParams) SetExportFiles(val []string) { - p.ExportFiles = val +func (p *TQueryStatistics) SetScanRows(val *int64) { + p.ScanRows = val } -func (p *TReportExecStatusParams) SetCommitInfos(val []*types.TTabletCommitInfo) { - p.CommitInfos = val +func (p *TQueryStatistics) SetScanBytes(val *int64) { + p.ScanBytes = val } -func (p *TReportExecStatusParams) SetLoadedRows(val *int64) { - p.LoadedRows = val +func (p *TQueryStatistics) SetReturnedRows(val *int64) { + p.ReturnedRows = val } -func (p *TReportExecStatusParams) SetBackendId(val *int64) { - p.BackendId = val +func (p *TQueryStatistics) SetCpuMs(val *int64) { + p.CpuMs = val } -func (p *TReportExecStatusParams) SetLoadedBytes(val *int64) { - p.LoadedBytes = val +func (p *TQueryStatistics) SetMaxPeakMemoryBytes(val *int64) { + p.MaxPeakMemoryBytes = val } -func (p *TReportExecStatusParams) SetErrorTabletInfos(val []*types.TErrorTabletInfo) { - p.ErrorTabletInfos = val +func (p *TQueryStatistics) SetCurrentUsedMemoryBytes(val *int64) { + p.CurrentUsedMemoryBytes = val } -func (p *TReportExecStatusParams) SetFragmentId(val *int32) { - p.FragmentId = val +func (p *TQueryStatistics) SetWorkloadGroupId(val *int64) { + p.WorkloadGroupId = val } -func (p *TReportExecStatusParams) SetQueryType(val *palointernalservice.TQueryType) { - p.QueryType = val +func (p *TQueryStatistics) SetShuffleSendBytes(val *int64) { + p.ShuffleSendBytes = val } -func (p *TReportExecStatusParams) SetLoadChannelProfile(val *runtimeprofile.TRuntimeProfileTree) { - p.LoadChannelProfile = val +func (p *TQueryStatistics) SetShuffleSendRows(val *int64) { + p.ShuffleSendRows = val } -func (p *TReportExecStatusParams) SetFinishedScanRanges(val *int32) { - p.FinishedScanRanges = val +func (p *TQueryStatistics) SetScanBytesFromLocalStorage(val *int64) { + p.ScanBytesFromLocalStorage = val } -func (p *TReportExecStatusParams) SetDetailedReport(val []*TDetailedReportParams) { - p.DetailedReport = val +func (p *TQueryStatistics) SetScanBytesFromRemoteStorage(val *int64) { + p.ScanBytesFromRemoteStorage = val } -var fieldIDToName_TReportExecStatusParams = map[int16]string{ - 1: "protocol_version", - 2: "query_id", - 3: "backend_num", - 4: "fragment_instance_id", - 5: "status", - 6: "done", - 7: "profile", - 9: "error_log", - 10: "delta_urls", - 11: "load_counters", - 12: "tracking_url", - 13: "export_files", - 14: "commitInfos", - 15: "loaded_rows", - 16: "backend_id", - 17: "loaded_bytes", - 18: "errorTabletInfos", - 19: "fragment_id", - 20: "query_type", - 21: "loadChannelProfile", - 22: "finished_scan_ranges", - 23: "detailed_report", +var fieldIDToName_TQueryStatistics = map[int16]string{ + 1: "scan_rows", + 2: "scan_bytes", + 3: "returned_rows", + 4: "cpu_ms", + 5: "max_peak_memory_bytes", + 6: "current_used_memory_bytes", + 7: "workload_group_id", + 8: "shuffle_send_bytes", + 9: "shuffle_send_rows", + 10: "scan_bytes_from_local_storage", + 11: "scan_bytes_from_remote_storage", } -func (p *TReportExecStatusParams) IsSetQueryId() bool { - return p.QueryId != nil +func (p *TQueryStatistics) IsSetScanRows() bool { + return p.ScanRows != nil } -func (p *TReportExecStatusParams) IsSetBackendNum() bool { - return p.BackendNum != nil +func (p *TQueryStatistics) IsSetScanBytes() bool { + return p.ScanBytes != nil } -func (p *TReportExecStatusParams) IsSetFragmentInstanceId() bool { - return p.FragmentInstanceId != nil +func (p *TQueryStatistics) IsSetReturnedRows() bool { + return p.ReturnedRows != nil } -func (p *TReportExecStatusParams) IsSetStatus() bool { - return p.Status != nil +func (p *TQueryStatistics) IsSetCpuMs() bool { + return p.CpuMs != nil } -func (p *TReportExecStatusParams) IsSetDone() bool { - return p.Done != nil +func (p *TQueryStatistics) IsSetMaxPeakMemoryBytes() bool { + return p.MaxPeakMemoryBytes != nil } -func (p *TReportExecStatusParams) IsSetProfile() bool { - return p.Profile != nil +func (p *TQueryStatistics) IsSetCurrentUsedMemoryBytes() bool { + return p.CurrentUsedMemoryBytes != nil } -func (p *TReportExecStatusParams) IsSetErrorLog() bool { - return p.ErrorLog != nil +func (p *TQueryStatistics) IsSetWorkloadGroupId() bool { + return p.WorkloadGroupId != nil } -func (p *TReportExecStatusParams) IsSetDeltaUrls() bool { - return p.DeltaUrls != nil +func (p *TQueryStatistics) IsSetShuffleSendBytes() bool { + return p.ShuffleSendBytes != nil } -func (p *TReportExecStatusParams) IsSetLoadCounters() bool { - return p.LoadCounters != nil +func (p *TQueryStatistics) IsSetShuffleSendRows() bool { + return p.ShuffleSendRows != nil } -func (p *TReportExecStatusParams) IsSetTrackingUrl() bool { - return p.TrackingUrl != nil +func (p *TQueryStatistics) IsSetScanBytesFromLocalStorage() bool { + return p.ScanBytesFromLocalStorage != nil } -func (p *TReportExecStatusParams) IsSetExportFiles() bool { - return p.ExportFiles != nil +func (p *TQueryStatistics) IsSetScanBytesFromRemoteStorage() bool { + return p.ScanBytesFromRemoteStorage != nil } -func (p *TReportExecStatusParams) IsSetCommitInfos() bool { - return p.CommitInfos != nil -} - -func (p *TReportExecStatusParams) IsSetLoadedRows() bool { - return p.LoadedRows != nil -} - -func (p *TReportExecStatusParams) IsSetBackendId() bool { - return p.BackendId != nil -} - -func (p *TReportExecStatusParams) IsSetLoadedBytes() bool { - return p.LoadedBytes != nil -} - -func (p *TReportExecStatusParams) IsSetErrorTabletInfos() bool { - return p.ErrorTabletInfos != nil -} - -func (p *TReportExecStatusParams) IsSetFragmentId() bool { - return p.FragmentId != nil -} - -func (p *TReportExecStatusParams) IsSetQueryType() bool { - return p.QueryType != nil -} - -func (p *TReportExecStatusParams) IsSetLoadChannelProfile() bool { - return p.LoadChannelProfile != nil -} - -func (p *TReportExecStatusParams) IsSetFinishedScanRanges() bool { - return p.FinishedScanRanges != nil -} - -func (p *TReportExecStatusParams) IsSetDetailedReport() bool { - return p.DetailedReport != nil -} - -func (p *TReportExecStatusParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TQueryStatistics) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -14129,232 +14535,98 @@ func (p *TReportExecStatusParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: - if fieldTypeId == thrift.MAP { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.LIST { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.LIST { - if err = p.ReadField14(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.I64 { - if err = p.ReadField15(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 16: if fieldTypeId == thrift.I64 { - if err = p.ReadField16(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 17: - if fieldTypeId == thrift.I64 { - if err = p.ReadField17(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.LIST { - if err = p.ReadField18(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.I32 { - if err = p.ReadField19(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.I32 { - if err = p.ReadField20(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField21(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.I32 { - if err = p.ReadField22(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 23: - if fieldTypeId == thrift.LIST { - if err = p.ReadField23(iprot); err != nil { + if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14363,17 +14635,13 @@ func (p *TReportExecStatusParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportExecStatusParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryStatistics[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14381,299 +14649,133 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReportExecStatusParams[fieldId])) -} - -func (p *TReportExecStatusParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ProtocolVersion = FrontendServiceVersion(v) - } - return nil } -func (p *TReportExecStatusParams) ReadField2(iprot thrift.TProtocol) error { - p.QueryId = types.NewTUniqueId() - if err := p.QueryId.Read(iprot); err != nil { - return err - } - return nil -} +func (p *TQueryStatistics) ReadField1(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BackendNum = &v - } - return nil -} - -func (p *TReportExecStatusParams) ReadField4(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TReportExecStatusParams) ReadField5(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err + _field = &v } + p.ScanRows = _field return nil } +func (p *TQueryStatistics) ReadField2(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Done = &v - } - return nil -} - -func (p *TReportExecStatusParams) ReadField7(iprot thrift.TProtocol) error { - p.Profile = runtimeprofile.NewTRuntimeProfileTree() - if err := p.Profile.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TReportExecStatusParams) ReadField9(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ErrorLog = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } - - p.ErrorLog = append(p.ErrorLog, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TReportExecStatusParams) ReadField10(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.DeltaUrls = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } - - p.DeltaUrls = append(p.DeltaUrls, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TReportExecStatusParams) ReadField11(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.LoadCounters = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - - var _val string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _val = v - } - - p.LoadCounters[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err + _field = &v } + p.ScanBytes = _field return nil } +func (p *TQueryStatistics) ReadField3(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TrackingUrl = &v - } - return nil -} - -func (p *TReportExecStatusParams) ReadField13(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ExportFiles = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } - - p.ExportFiles = append(p.ExportFiles, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TReportExecStatusParams) ReadField14(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTTabletCommitInfo() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.CommitInfos = append(p.CommitInfos, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + _field = &v } + p.ReturnedRows = _field return nil } +func (p *TQueryStatistics) ReadField4(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField15(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LoadedRows = &v + _field = &v } + p.CpuMs = _field return nil } +func (p *TQueryStatistics) ReadField5(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField16(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BackendId = &v + _field = &v } + p.MaxPeakMemoryBytes = _field return nil } +func (p *TQueryStatistics) ReadField6(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField17(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LoadedBytes = &v - } - return nil -} - -func (p *TReportExecStatusParams) ReadField18(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ErrorTabletInfos = make([]*types.TErrorTabletInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTErrorTabletInfo() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.ErrorTabletInfos = append(p.ErrorTabletInfos, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + _field = &v } + p.CurrentUsedMemoryBytes = _field return nil } +func (p *TQueryStatistics) ReadField7(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField19(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FragmentId = &v + _field = &v } + p.WorkloadGroupId = _field return nil } +func (p *TQueryStatistics) ReadField8(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField20(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - tmp := palointernalservice.TQueryType(v) - p.QueryType = &tmp + _field = &v } + p.ShuffleSendBytes = _field return nil } +func (p *TQueryStatistics) ReadField9(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField21(iprot thrift.TProtocol) error { - p.LoadChannelProfile = runtimeprofile.NewTRuntimeProfileTree() - if err := p.LoadChannelProfile.Read(iprot); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.ShuffleSendRows = _field return nil } +func (p *TQueryStatistics) ReadField10(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField22(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FinishedScanRanges = &v + _field = &v } + p.ScanBytesFromLocalStorage = _field return nil } +func (p *TQueryStatistics) ReadField11(iprot thrift.TProtocol) error { -func (p *TReportExecStatusParams) ReadField23(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.DetailedReport = make([]*TDetailedReportParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTDetailedReportParams() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.DetailedReport = append(p.DetailedReport, _elem) - } - if err := iprot.ReadListEnd(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.ScanBytesFromRemoteStorage = _field return nil } -func (p *TReportExecStatusParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TQueryStatistics) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TReportExecStatusParams"); err != nil { + if err = oprot.WriteStructBegin("TQueryStatistics"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14705,6 +14807,10 @@ func (p *TReportExecStatusParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } if err = p.writeField9(oprot); err != nil { fieldId = 9 goto WriteFieldError @@ -14717,55 +14823,6 @@ func (p *TReportExecStatusParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 11 goto WriteFieldError } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError - } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError - } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError - } - if err = p.writeField17(oprot); err != nil { - fieldId = 17 - goto WriteFieldError - } - if err = p.writeField18(oprot); err != nil { - fieldId = 18 - goto WriteFieldError - } - if err = p.writeField19(oprot); err != nil { - fieldId = 19 - goto WriteFieldError - } - if err = p.writeField20(oprot); err != nil { - fieldId = 20 - goto WriteFieldError - } - if err = p.writeField21(oprot); err != nil { - fieldId = 21 - goto WriteFieldError - } - if err = p.writeField22(oprot); err != nil { - fieldId = 22 - goto WriteFieldError - } - if err = p.writeField23(oprot); err != nil { - fieldId = 23 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14784,15 +14841,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TReportExecStatusParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TQueryStatistics) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetScanRows() { + if err = oprot.WriteFieldBegin("scan_rows", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ScanRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -14801,12 +14860,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryId() { - if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 2); err != nil { +func (p *TQueryStatistics) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetScanBytes() { + if err = oprot.WriteFieldBegin("scan_bytes", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := p.QueryId.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.ScanBytes); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14820,12 +14879,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendNum() { - if err = oprot.WriteFieldBegin("backend_num", thrift.I32, 3); err != nil { +func (p *TQueryStatistics) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetReturnedRows() { + if err = oprot.WriteFieldBegin("returned_rows", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.BackendNum); err != nil { + if err := oprot.WriteI64(*p.ReturnedRows); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14839,12 +14898,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentInstanceId() { - if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 4); err != nil { +func (p *TQueryStatistics) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetCpuMs() { + if err = oprot.WriteFieldBegin("cpu_ms", thrift.I64, 4); err != nil { goto WriteFieldBeginError } - if err := p.FragmentInstanceId.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.CpuMs); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14858,12 +14917,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 5); err != nil { +func (p *TQueryStatistics) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxPeakMemoryBytes() { + if err = oprot.WriteFieldBegin("max_peak_memory_bytes", thrift.I64, 5); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.MaxPeakMemoryBytes); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14877,12 +14936,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetDone() { - if err = oprot.WriteFieldBegin("done", thrift.BOOL, 6); err != nil { +func (p *TQueryStatistics) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUsedMemoryBytes() { + if err = oprot.WriteFieldBegin("current_used_memory_bytes", thrift.I64, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.Done); err != nil { + if err := oprot.WriteI64(*p.CurrentUsedMemoryBytes); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14896,12 +14955,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetProfile() { - if err = oprot.WriteFieldBegin("profile", thrift.STRUCT, 7); err != nil { +func (p *TQueryStatistics) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadGroupId() { + if err = oprot.WriteFieldBegin("workload_group_id", thrift.I64, 7); err != nil { goto WriteFieldBeginError } - if err := p.Profile.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.WorkloadGroupId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14915,20 +14974,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetErrorLog() { - if err = oprot.WriteFieldBegin("error_log", thrift.LIST, 9); err != nil { +func (p *TQueryStatistics) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetShuffleSendBytes() { + if err = oprot.WriteFieldBegin("shuffle_send_bytes", thrift.I64, 8); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ErrorLog)); err != nil { - return err - } - for _, v := range p.ErrorLog { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteI64(*p.ShuffleSendBytes); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14937,25 +14988,17 @@ func (p *TReportExecStatusParams) writeField9(oprot thrift.TProtocol) (err error } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetDeltaUrls() { - if err = oprot.WriteFieldBegin("delta_urls", thrift.LIST, 10); err != nil { +func (p *TQueryStatistics) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetShuffleSendRows() { + if err = oprot.WriteFieldBegin("shuffle_send_rows", thrift.I64, 9); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.DeltaUrls)); err != nil { - return err - } - for _, v := range p.DeltaUrls { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteI64(*p.ShuffleSendRows); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14964,263 +15007,17 @@ func (p *TReportExecStatusParams) writeField10(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadCounters() { - if err = oprot.WriteFieldBegin("load_counters", thrift.MAP, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.LoadCounters)); err != nil { - return err - } - for k, v := range p.LoadCounters { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetTrackingUrl() { - if err = oprot.WriteFieldBegin("tracking_url", thrift.STRING, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.TrackingUrl); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetExportFiles() { - if err = oprot.WriteFieldBegin("export_files", thrift.LIST, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ExportFiles)); err != nil { - return err - } - for _, v := range p.ExportFiles { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetCommitInfos() { - if err = oprot.WriteFieldBegin("commitInfos", thrift.LIST, 14); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommitInfos)); err != nil { - return err - } - for _, v := range p.CommitInfos { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadedRows() { - if err = oprot.WriteFieldBegin("loaded_rows", thrift.I64, 15); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LoadedRows); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendId() { - if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 16); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.BackendId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadedBytes() { - if err = oprot.WriteFieldBegin("loaded_bytes", thrift.I64, 17); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LoadedBytes); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetErrorTabletInfos() { - if err = oprot.WriteFieldBegin("errorTabletInfos", thrift.LIST, 18); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ErrorTabletInfos)); err != nil { - return err - } - for _, v := range p.ErrorTabletInfos { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentId() { - if err = oprot.WriteFieldBegin("fragment_id", thrift.I32, 19); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.FragmentId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryType() { - if err = oprot.WriteFieldBegin("query_type", thrift.I32, 20); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.QueryType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) -} - -func (p *TReportExecStatusParams) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadChannelProfile() { - if err = oprot.WriteFieldBegin("loadChannelProfile", thrift.STRUCT, 21); err != nil { - goto WriteFieldBeginError - } - if err := p.LoadChannelProfile.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetFinishedScanRanges() { - if err = oprot.WriteFieldBegin("finished_scan_ranges", thrift.I32, 22); err != nil { +func (p *TQueryStatistics) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetScanBytesFromLocalStorage() { + if err = oprot.WriteFieldBegin("scan_bytes_from_local_storage", thrift.I64, 10); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.FinishedScanRanges); err != nil { + if err := oprot.WriteI64(*p.ScanBytesFromLocalStorage); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15229,25 +15026,17 @@ func (p *TReportExecStatusParams) writeField22(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TReportExecStatusParams) writeField23(oprot thrift.TProtocol) (err error) { - if p.IsSetDetailedReport() { - if err = oprot.WriteFieldBegin("detailed_report", thrift.LIST, 23); err != nil { +func (p *TQueryStatistics) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetScanBytesFromRemoteStorage() { + if err = oprot.WriteFieldBegin("scan_bytes_from_remote_storage", thrift.I64, 11); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DetailedReport)); err != nil { - return err - } - for _, v := range p.DetailedReport { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteI64(*p.ScanBytesFromRemoteStorage); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15256,382 +15045,247 @@ func (p *TReportExecStatusParams) writeField23(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TReportExecStatusParams) String() string { +func (p *TQueryStatistics) String() string { if p == nil { return "" } - return fmt.Sprintf("TReportExecStatusParams(%+v)", *p) + return fmt.Sprintf("TQueryStatistics(%+v)", *p) + } -func (p *TReportExecStatusParams) DeepEqual(ano *TReportExecStatusParams) bool { +func (p *TQueryStatistics) DeepEqual(ano *TQueryStatistics) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ProtocolVersion) { - return false - } - if !p.Field2DeepEqual(ano.QueryId) { - return false - } - if !p.Field3DeepEqual(ano.BackendNum) { - return false - } - if !p.Field4DeepEqual(ano.FragmentInstanceId) { - return false - } - if !p.Field5DeepEqual(ano.Status) { - return false - } - if !p.Field6DeepEqual(ano.Done) { - return false - } - if !p.Field7DeepEqual(ano.Profile) { - return false - } - if !p.Field9DeepEqual(ano.ErrorLog) { - return false - } - if !p.Field10DeepEqual(ano.DeltaUrls) { - return false - } - if !p.Field11DeepEqual(ano.LoadCounters) { - return false - } - if !p.Field12DeepEqual(ano.TrackingUrl) { - return false - } - if !p.Field13DeepEqual(ano.ExportFiles) { - return false - } - if !p.Field14DeepEqual(ano.CommitInfos) { + if !p.Field1DeepEqual(ano.ScanRows) { return false } - if !p.Field15DeepEqual(ano.LoadedRows) { + if !p.Field2DeepEqual(ano.ScanBytes) { return false } - if !p.Field16DeepEqual(ano.BackendId) { + if !p.Field3DeepEqual(ano.ReturnedRows) { return false } - if !p.Field17DeepEqual(ano.LoadedBytes) { + if !p.Field4DeepEqual(ano.CpuMs) { return false } - if !p.Field18DeepEqual(ano.ErrorTabletInfos) { + if !p.Field5DeepEqual(ano.MaxPeakMemoryBytes) { return false } - if !p.Field19DeepEqual(ano.FragmentId) { + if !p.Field6DeepEqual(ano.CurrentUsedMemoryBytes) { return false } - if !p.Field20DeepEqual(ano.QueryType) { + if !p.Field7DeepEqual(ano.WorkloadGroupId) { return false } - if !p.Field21DeepEqual(ano.LoadChannelProfile) { + if !p.Field8DeepEqual(ano.ShuffleSendBytes) { return false } - if !p.Field22DeepEqual(ano.FinishedScanRanges) { + if !p.Field9DeepEqual(ano.ShuffleSendRows) { return false } - if !p.Field23DeepEqual(ano.DetailedReport) { + if !p.Field10DeepEqual(ano.ScanBytesFromLocalStorage) { return false } - return true -} - -func (p *TReportExecStatusParams) Field1DeepEqual(src FrontendServiceVersion) bool { - - if p.ProtocolVersion != src { + if !p.Field11DeepEqual(ano.ScanBytesFromRemoteStorage) { return false } return true } -func (p *TReportExecStatusParams) Field2DeepEqual(src *types.TUniqueId) bool { - if !p.QueryId.DeepEqual(src) { - return false - } - return true -} -func (p *TReportExecStatusParams) Field3DeepEqual(src *int32) bool { +func (p *TQueryStatistics) Field1DeepEqual(src *int64) bool { - if p.BackendNum == src { + if p.ScanRows == src { return true - } else if p.BackendNum == nil || src == nil { - return false - } - if *p.BackendNum != *src { - return false - } - return true -} -func (p *TReportExecStatusParams) Field4DeepEqual(src *types.TUniqueId) bool { - - if !p.FragmentInstanceId.DeepEqual(src) { + } else if p.ScanRows == nil || src == nil { return false } - return true -} -func (p *TReportExecStatusParams) Field5DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { + if *p.ScanRows != *src { return false } return true } -func (p *TReportExecStatusParams) Field6DeepEqual(src *bool) bool { +func (p *TQueryStatistics) Field2DeepEqual(src *int64) bool { - if p.Done == src { + if p.ScanBytes == src { return true - } else if p.Done == nil || src == nil { - return false - } - if *p.Done != *src { - return false - } - return true -} -func (p *TReportExecStatusParams) Field7DeepEqual(src *runtimeprofile.TRuntimeProfileTree) bool { - - if !p.Profile.DeepEqual(src) { + } else if p.ScanBytes == nil || src == nil { return false } - return true -} -func (p *TReportExecStatusParams) Field9DeepEqual(src []string) bool { - - if len(p.ErrorLog) != len(src) { + if *p.ScanBytes != *src { return false } - for i, v := range p.ErrorLog { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } return true } -func (p *TReportExecStatusParams) Field10DeepEqual(src []string) bool { +func (p *TQueryStatistics) Field3DeepEqual(src *int64) bool { - if len(p.DeltaUrls) != len(src) { + if p.ReturnedRows == src { + return true + } else if p.ReturnedRows == nil || src == nil { return false } - for i, v := range p.DeltaUrls { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } - return true -} -func (p *TReportExecStatusParams) Field11DeepEqual(src map[string]string) bool { - - if len(p.LoadCounters) != len(src) { + if *p.ReturnedRows != *src { return false } - for k, v := range p.LoadCounters { - _src := src[k] - if strings.Compare(v, _src) != 0 { - return false - } - } return true } -func (p *TReportExecStatusParams) Field12DeepEqual(src *string) bool { +func (p *TQueryStatistics) Field4DeepEqual(src *int64) bool { - if p.TrackingUrl == src { + if p.CpuMs == src { return true - } else if p.TrackingUrl == nil || src == nil { + } else if p.CpuMs == nil || src == nil { return false } - if strings.Compare(*p.TrackingUrl, *src) != 0 { + if *p.CpuMs != *src { return false } return true } -func (p *TReportExecStatusParams) Field13DeepEqual(src []string) bool { +func (p *TQueryStatistics) Field5DeepEqual(src *int64) bool { - if len(p.ExportFiles) != len(src) { + if p.MaxPeakMemoryBytes == src { + return true + } else if p.MaxPeakMemoryBytes == nil || src == nil { return false } - for i, v := range p.ExportFiles { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } - return true -} -func (p *TReportExecStatusParams) Field14DeepEqual(src []*types.TTabletCommitInfo) bool { - - if len(p.CommitInfos) != len(src) { + if *p.MaxPeakMemoryBytes != *src { return false } - for i, v := range p.CommitInfos { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TReportExecStatusParams) Field15DeepEqual(src *int64) bool { +func (p *TQueryStatistics) Field6DeepEqual(src *int64) bool { - if p.LoadedRows == src { + if p.CurrentUsedMemoryBytes == src { return true - } else if p.LoadedRows == nil || src == nil { + } else if p.CurrentUsedMemoryBytes == nil || src == nil { return false } - if *p.LoadedRows != *src { + if *p.CurrentUsedMemoryBytes != *src { return false } return true } -func (p *TReportExecStatusParams) Field16DeepEqual(src *int64) bool { +func (p *TQueryStatistics) Field7DeepEqual(src *int64) bool { - if p.BackendId == src { + if p.WorkloadGroupId == src { return true - } else if p.BackendId == nil || src == nil { + } else if p.WorkloadGroupId == nil || src == nil { return false } - if *p.BackendId != *src { + if *p.WorkloadGroupId != *src { return false } return true } -func (p *TReportExecStatusParams) Field17DeepEqual(src *int64) bool { +func (p *TQueryStatistics) Field8DeepEqual(src *int64) bool { - if p.LoadedBytes == src { + if p.ShuffleSendBytes == src { return true - } else if p.LoadedBytes == nil || src == nil { + } else if p.ShuffleSendBytes == nil || src == nil { return false } - if *p.LoadedBytes != *src { - return false - } - return true -} -func (p *TReportExecStatusParams) Field18DeepEqual(src []*types.TErrorTabletInfo) bool { - - if len(p.ErrorTabletInfos) != len(src) { + if *p.ShuffleSendBytes != *src { return false } - for i, v := range p.ErrorTabletInfos { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TReportExecStatusParams) Field19DeepEqual(src *int32) bool { +func (p *TQueryStatistics) Field9DeepEqual(src *int64) bool { - if p.FragmentId == src { + if p.ShuffleSendRows == src { return true - } else if p.FragmentId == nil || src == nil { + } else if p.ShuffleSendRows == nil || src == nil { return false } - if *p.FragmentId != *src { + if *p.ShuffleSendRows != *src { return false } return true } -func (p *TReportExecStatusParams) Field20DeepEqual(src *palointernalservice.TQueryType) bool { +func (p *TQueryStatistics) Field10DeepEqual(src *int64) bool { - if p.QueryType == src { + if p.ScanBytesFromLocalStorage == src { return true - } else if p.QueryType == nil || src == nil { - return false - } - if *p.QueryType != *src { + } else if p.ScanBytesFromLocalStorage == nil || src == nil { return false } - return true -} -func (p *TReportExecStatusParams) Field21DeepEqual(src *runtimeprofile.TRuntimeProfileTree) bool { - - if !p.LoadChannelProfile.DeepEqual(src) { + if *p.ScanBytesFromLocalStorage != *src { return false } return true } -func (p *TReportExecStatusParams) Field22DeepEqual(src *int32) bool { +func (p *TQueryStatistics) Field11DeepEqual(src *int64) bool { - if p.FinishedScanRanges == src { + if p.ScanBytesFromRemoteStorage == src { return true - } else if p.FinishedScanRanges == nil || src == nil { + } else if p.ScanBytesFromRemoteStorage == nil || src == nil { return false } - if *p.FinishedScanRanges != *src { + if *p.ScanBytesFromRemoteStorage != *src { return false } return true } -func (p *TReportExecStatusParams) Field23DeepEqual(src []*TDetailedReportParams) bool { - if len(p.DetailedReport) != len(src) { - return false - } - for i, v := range p.DetailedReport { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +type TReportWorkloadRuntimeStatusParams struct { + BackendId *int64 `thrift:"backend_id,1,optional" frugal:"1,optional,i64" json:"backend_id,omitempty"` + QueryStatisticsMap map[string]*TQueryStatistics `thrift:"query_statistics_map,2,optional" frugal:"2,optional,map" json:"query_statistics_map,omitempty"` } -type TFeResult_ struct { - ProtocolVersion FrontendServiceVersion `thrift:"protocolVersion,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocolVersion"` - Status *status.TStatus `thrift:"status,2,required" frugal:"2,required,status.TStatus" json:"status"` +func NewTReportWorkloadRuntimeStatusParams() *TReportWorkloadRuntimeStatusParams { + return &TReportWorkloadRuntimeStatusParams{} } -func NewTFeResult_() *TFeResult_ { - return &TFeResult_{} +func (p *TReportWorkloadRuntimeStatusParams) InitDefault() { } -func (p *TFeResult_) InitDefault() { - *p = TFeResult_{} -} +var TReportWorkloadRuntimeStatusParams_BackendId_DEFAULT int64 -func (p *TFeResult_) GetProtocolVersion() (v FrontendServiceVersion) { - return p.ProtocolVersion +func (p *TReportWorkloadRuntimeStatusParams) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TReportWorkloadRuntimeStatusParams_BackendId_DEFAULT + } + return *p.BackendId } -var TFeResult__Status_DEFAULT *status.TStatus +var TReportWorkloadRuntimeStatusParams_QueryStatisticsMap_DEFAULT map[string]*TQueryStatistics -func (p *TFeResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TFeResult__Status_DEFAULT +func (p *TReportWorkloadRuntimeStatusParams) GetQueryStatisticsMap() (v map[string]*TQueryStatistics) { + if !p.IsSetQueryStatisticsMap() { + return TReportWorkloadRuntimeStatusParams_QueryStatisticsMap_DEFAULT } - return p.Status + return p.QueryStatisticsMap } -func (p *TFeResult_) SetProtocolVersion(val FrontendServiceVersion) { - p.ProtocolVersion = val +func (p *TReportWorkloadRuntimeStatusParams) SetBackendId(val *int64) { + p.BackendId = val } -func (p *TFeResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TReportWorkloadRuntimeStatusParams) SetQueryStatisticsMap(val map[string]*TQueryStatistics) { + p.QueryStatisticsMap = val } -var fieldIDToName_TFeResult_ = map[int16]string{ - 1: "protocolVersion", - 2: "status", +var fieldIDToName_TReportWorkloadRuntimeStatusParams = map[int16]string{ + 1: "backend_id", + 2: "query_statistics_map", } -func (p *TFeResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TReportWorkloadRuntimeStatusParams) IsSetBackendId() bool { + return p.BackendId != nil } -func (p *TFeResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TReportWorkloadRuntimeStatusParams) IsSetQueryStatisticsMap() bool { + return p.QueryStatisticsMap != nil +} + +func (p *TReportWorkloadRuntimeStatusParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -15648,33 +15302,26 @@ func (p *TFeResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.MAP { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15683,22 +15330,13 @@ func (p *TFeResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetStatus { - fieldId = 2 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFeResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportWorkloadRuntimeStatusParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15706,30 +15344,52 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFeResult_[fieldId])) } -func (p *TFeResult_) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TReportWorkloadRuntimeStatusParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ProtocolVersion = FrontendServiceVersion(v) + _field = &v } + p.BackendId = _field return nil } +func (p *TReportWorkloadRuntimeStatusParams) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]*TQueryStatistics, size) + values := make([]TQueryStatistics, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } -func (p *TFeResult_) ReadField2(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err } + p.QueryStatisticsMap = _field return nil } -func (p *TFeResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TReportWorkloadRuntimeStatusParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFeResult"); err != nil { + if err = oprot.WriteStructBegin("TReportWorkloadRuntimeStatusParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15741,7 +15401,6 @@ func (p *TFeResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15760,15 +15419,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFeResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocolVersion", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TReportWorkloadRuntimeStatusParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -15777,15 +15438,28 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFeResult_) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TReportWorkloadRuntimeStatusParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryStatisticsMap() { + if err = oprot.WriteFieldBegin("query_statistics_map", thrift.MAP, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRUCT, len(p.QueryStatisticsMap)); err != nil { + return err + } + for k, v := range p.QueryStatisticsMap { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -15794,505 +15468,162 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFeResult_) String() string { +func (p *TReportWorkloadRuntimeStatusParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TFeResult_(%+v)", *p) + return fmt.Sprintf("TReportWorkloadRuntimeStatusParams(%+v)", *p) + } -func (p *TFeResult_) DeepEqual(ano *TFeResult_) bool { +func (p *TReportWorkloadRuntimeStatusParams) DeepEqual(ano *TReportWorkloadRuntimeStatusParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ProtocolVersion) { + if !p.Field1DeepEqual(ano.BackendId) { return false } - if !p.Field2DeepEqual(ano.Status) { + if !p.Field2DeepEqual(ano.QueryStatisticsMap) { return false } return true } -func (p *TFeResult_) Field1DeepEqual(src FrontendServiceVersion) bool { +func (p *TReportWorkloadRuntimeStatusParams) Field1DeepEqual(src *int64) bool { - if p.ProtocolVersion != src { + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { return false } - return true -} -func (p *TFeResult_) Field2DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { + if *p.BackendId != *src { return false } return true } +func (p *TReportWorkloadRuntimeStatusParams) Field2DeepEqual(src map[string]*TQueryStatistics) bool { -type TMasterOpRequest struct { - User string `thrift:"user,1,required" frugal:"1,required,string" json:"user"` - Db string `thrift:"db,2,required" frugal:"2,required,string" json:"db"` - Sql string `thrift:"sql,3,required" frugal:"3,required,string" json:"sql"` - ResourceInfo *types.TResourceInfo `thrift:"resourceInfo,4,optional" frugal:"4,optional,types.TResourceInfo" json:"resourceInfo,omitempty"` - Cluster *string `thrift:"cluster,5,optional" frugal:"5,optional,string" json:"cluster,omitempty"` - ExecMemLimit *int64 `thrift:"execMemLimit,6,optional" frugal:"6,optional,i64" json:"execMemLimit,omitempty"` - QueryTimeout *int32 `thrift:"queryTimeout,7,optional" frugal:"7,optional,i32" json:"queryTimeout,omitempty"` - UserIp *string `thrift:"user_ip,8,optional" frugal:"8,optional,string" json:"user_ip,omitempty"` - TimeZone *string `thrift:"time_zone,9,optional" frugal:"9,optional,string" json:"time_zone,omitempty"` - StmtId *int64 `thrift:"stmt_id,10,optional" frugal:"10,optional,i64" json:"stmt_id,omitempty"` - SqlMode *int64 `thrift:"sqlMode,11,optional" frugal:"11,optional,i64" json:"sqlMode,omitempty"` - LoadMemLimit *int64 `thrift:"loadMemLimit,12,optional" frugal:"12,optional,i64" json:"loadMemLimit,omitempty"` - EnableStrictMode *bool `thrift:"enableStrictMode,13,optional" frugal:"13,optional,bool" json:"enableStrictMode,omitempty"` - CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,14,optional" frugal:"14,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` - StmtIdx *int32 `thrift:"stmtIdx,15,optional" frugal:"15,optional,i32" json:"stmtIdx,omitempty"` - QueryOptions *palointernalservice.TQueryOptions `thrift:"query_options,16,optional" frugal:"16,optional,palointernalservice.TQueryOptions" json:"query_options,omitempty"` - QueryId *types.TUniqueId `thrift:"query_id,17,optional" frugal:"17,optional,types.TUniqueId" json:"query_id,omitempty"` - InsertVisibleTimeoutMs *int64 `thrift:"insert_visible_timeout_ms,18,optional" frugal:"18,optional,i64" json:"insert_visible_timeout_ms,omitempty"` - SessionVariables map[string]string `thrift:"session_variables,19,optional" frugal:"19,optional,map" json:"session_variables,omitempty"` - FoldConstantByBe *bool `thrift:"foldConstantByBe,20,optional" frugal:"20,optional,bool" json:"foldConstantByBe,omitempty"` - TraceCarrier map[string]string `thrift:"trace_carrier,21,optional" frugal:"21,optional,map" json:"trace_carrier,omitempty"` - ClientNodeHost *string `thrift:"clientNodeHost,22,optional" frugal:"22,optional,string" json:"clientNodeHost,omitempty"` - ClientNodePort *int32 `thrift:"clientNodePort,23,optional" frugal:"23,optional,i32" json:"clientNodePort,omitempty"` - SyncJournalOnly *bool `thrift:"syncJournalOnly,24,optional" frugal:"24,optional,bool" json:"syncJournalOnly,omitempty"` - DefaultCatalog *string `thrift:"defaultCatalog,25,optional" frugal:"25,optional,string" json:"defaultCatalog,omitempty"` - DefaultDatabase *string `thrift:"defaultDatabase,26,optional" frugal:"26,optional,string" json:"defaultDatabase,omitempty"` -} - -func NewTMasterOpRequest() *TMasterOpRequest { - return &TMasterOpRequest{} -} - -func (p *TMasterOpRequest) InitDefault() { - *p = TMasterOpRequest{} -} - -func (p *TMasterOpRequest) GetUser() (v string) { - return p.User -} - -func (p *TMasterOpRequest) GetDb() (v string) { - return p.Db -} - -func (p *TMasterOpRequest) GetSql() (v string) { - return p.Sql -} - -var TMasterOpRequest_ResourceInfo_DEFAULT *types.TResourceInfo - -func (p *TMasterOpRequest) GetResourceInfo() (v *types.TResourceInfo) { - if !p.IsSetResourceInfo() { - return TMasterOpRequest_ResourceInfo_DEFAULT - } - return p.ResourceInfo -} - -var TMasterOpRequest_Cluster_DEFAULT string - -func (p *TMasterOpRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TMasterOpRequest_Cluster_DEFAULT - } - return *p.Cluster -} - -var TMasterOpRequest_ExecMemLimit_DEFAULT int64 - -func (p *TMasterOpRequest) GetExecMemLimit() (v int64) { - if !p.IsSetExecMemLimit() { - return TMasterOpRequest_ExecMemLimit_DEFAULT - } - return *p.ExecMemLimit -} - -var TMasterOpRequest_QueryTimeout_DEFAULT int32 - -func (p *TMasterOpRequest) GetQueryTimeout() (v int32) { - if !p.IsSetQueryTimeout() { - return TMasterOpRequest_QueryTimeout_DEFAULT - } - return *p.QueryTimeout -} - -var TMasterOpRequest_UserIp_DEFAULT string - -func (p *TMasterOpRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TMasterOpRequest_UserIp_DEFAULT - } - return *p.UserIp -} - -var TMasterOpRequest_TimeZone_DEFAULT string - -func (p *TMasterOpRequest) GetTimeZone() (v string) { - if !p.IsSetTimeZone() { - return TMasterOpRequest_TimeZone_DEFAULT - } - return *p.TimeZone -} - -var TMasterOpRequest_StmtId_DEFAULT int64 - -func (p *TMasterOpRequest) GetStmtId() (v int64) { - if !p.IsSetStmtId() { - return TMasterOpRequest_StmtId_DEFAULT - } - return *p.StmtId -} - -var TMasterOpRequest_SqlMode_DEFAULT int64 - -func (p *TMasterOpRequest) GetSqlMode() (v int64) { - if !p.IsSetSqlMode() { - return TMasterOpRequest_SqlMode_DEFAULT - } - return *p.SqlMode -} - -var TMasterOpRequest_LoadMemLimit_DEFAULT int64 - -func (p *TMasterOpRequest) GetLoadMemLimit() (v int64) { - if !p.IsSetLoadMemLimit() { - return TMasterOpRequest_LoadMemLimit_DEFAULT + if len(p.QueryStatisticsMap) != len(src) { + return false } - return *p.LoadMemLimit -} - -var TMasterOpRequest_EnableStrictMode_DEFAULT bool - -func (p *TMasterOpRequest) GetEnableStrictMode() (v bool) { - if !p.IsSetEnableStrictMode() { - return TMasterOpRequest_EnableStrictMode_DEFAULT + for k, v := range p.QueryStatisticsMap { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } } - return *p.EnableStrictMode + return true } -var TMasterOpRequest_CurrentUserIdent_DEFAULT *types.TUserIdentity - -func (p *TMasterOpRequest) GetCurrentUserIdent() (v *types.TUserIdentity) { - if !p.IsSetCurrentUserIdent() { - return TMasterOpRequest_CurrentUserIdent_DEFAULT - } - return p.CurrentUserIdent +type TQueryProfile struct { + QueryId *types.TUniqueId `thrift:"query_id,1,optional" frugal:"1,optional,types.TUniqueId" json:"query_id,omitempty"` + FragmentIdToProfile map[int32][]*TDetailedReportParams `thrift:"fragment_id_to_profile,2,optional" frugal:"2,optional,map>" json:"fragment_id_to_profile,omitempty"` + FragmentInstanceIds []*types.TUniqueId `thrift:"fragment_instance_ids,3,optional" frugal:"3,optional,list" json:"fragment_instance_ids,omitempty"` + InstanceProfiles []*runtimeprofile.TRuntimeProfileTree `thrift:"instance_profiles,4,optional" frugal:"4,optional,list" json:"instance_profiles,omitempty"` + LoadChannelProfiles []*runtimeprofile.TRuntimeProfileTree `thrift:"load_channel_profiles,5,optional" frugal:"5,optional,list" json:"load_channel_profiles,omitempty"` } -var TMasterOpRequest_StmtIdx_DEFAULT int32 - -func (p *TMasterOpRequest) GetStmtIdx() (v int32) { - if !p.IsSetStmtIdx() { - return TMasterOpRequest_StmtIdx_DEFAULT - } - return *p.StmtIdx +func NewTQueryProfile() *TQueryProfile { + return &TQueryProfile{} } -var TMasterOpRequest_QueryOptions_DEFAULT *palointernalservice.TQueryOptions - -func (p *TMasterOpRequest) GetQueryOptions() (v *palointernalservice.TQueryOptions) { - if !p.IsSetQueryOptions() { - return TMasterOpRequest_QueryOptions_DEFAULT - } - return p.QueryOptions +func (p *TQueryProfile) InitDefault() { } -var TMasterOpRequest_QueryId_DEFAULT *types.TUniqueId +var TQueryProfile_QueryId_DEFAULT *types.TUniqueId -func (p *TMasterOpRequest) GetQueryId() (v *types.TUniqueId) { +func (p *TQueryProfile) GetQueryId() (v *types.TUniqueId) { if !p.IsSetQueryId() { - return TMasterOpRequest_QueryId_DEFAULT + return TQueryProfile_QueryId_DEFAULT } return p.QueryId } -var TMasterOpRequest_InsertVisibleTimeoutMs_DEFAULT int64 - -func (p *TMasterOpRequest) GetInsertVisibleTimeoutMs() (v int64) { - if !p.IsSetInsertVisibleTimeoutMs() { - return TMasterOpRequest_InsertVisibleTimeoutMs_DEFAULT - } - return *p.InsertVisibleTimeoutMs -} - -var TMasterOpRequest_SessionVariables_DEFAULT map[string]string - -func (p *TMasterOpRequest) GetSessionVariables() (v map[string]string) { - if !p.IsSetSessionVariables() { - return TMasterOpRequest_SessionVariables_DEFAULT - } - return p.SessionVariables -} - -var TMasterOpRequest_FoldConstantByBe_DEFAULT bool - -func (p *TMasterOpRequest) GetFoldConstantByBe() (v bool) { - if !p.IsSetFoldConstantByBe() { - return TMasterOpRequest_FoldConstantByBe_DEFAULT - } - return *p.FoldConstantByBe -} - -var TMasterOpRequest_TraceCarrier_DEFAULT map[string]string - -func (p *TMasterOpRequest) GetTraceCarrier() (v map[string]string) { - if !p.IsSetTraceCarrier() { - return TMasterOpRequest_TraceCarrier_DEFAULT - } - return p.TraceCarrier -} - -var TMasterOpRequest_ClientNodeHost_DEFAULT string - -func (p *TMasterOpRequest) GetClientNodeHost() (v string) { - if !p.IsSetClientNodeHost() { - return TMasterOpRequest_ClientNodeHost_DEFAULT - } - return *p.ClientNodeHost -} - -var TMasterOpRequest_ClientNodePort_DEFAULT int32 +var TQueryProfile_FragmentIdToProfile_DEFAULT map[int32][]*TDetailedReportParams -func (p *TMasterOpRequest) GetClientNodePort() (v int32) { - if !p.IsSetClientNodePort() { - return TMasterOpRequest_ClientNodePort_DEFAULT +func (p *TQueryProfile) GetFragmentIdToProfile() (v map[int32][]*TDetailedReportParams) { + if !p.IsSetFragmentIdToProfile() { + return TQueryProfile_FragmentIdToProfile_DEFAULT } - return *p.ClientNodePort + return p.FragmentIdToProfile } -var TMasterOpRequest_SyncJournalOnly_DEFAULT bool +var TQueryProfile_FragmentInstanceIds_DEFAULT []*types.TUniqueId -func (p *TMasterOpRequest) GetSyncJournalOnly() (v bool) { - if !p.IsSetSyncJournalOnly() { - return TMasterOpRequest_SyncJournalOnly_DEFAULT +func (p *TQueryProfile) GetFragmentInstanceIds() (v []*types.TUniqueId) { + if !p.IsSetFragmentInstanceIds() { + return TQueryProfile_FragmentInstanceIds_DEFAULT } - return *p.SyncJournalOnly + return p.FragmentInstanceIds } -var TMasterOpRequest_DefaultCatalog_DEFAULT string +var TQueryProfile_InstanceProfiles_DEFAULT []*runtimeprofile.TRuntimeProfileTree -func (p *TMasterOpRequest) GetDefaultCatalog() (v string) { - if !p.IsSetDefaultCatalog() { - return TMasterOpRequest_DefaultCatalog_DEFAULT +func (p *TQueryProfile) GetInstanceProfiles() (v []*runtimeprofile.TRuntimeProfileTree) { + if !p.IsSetInstanceProfiles() { + return TQueryProfile_InstanceProfiles_DEFAULT } - return *p.DefaultCatalog + return p.InstanceProfiles } -var TMasterOpRequest_DefaultDatabase_DEFAULT string +var TQueryProfile_LoadChannelProfiles_DEFAULT []*runtimeprofile.TRuntimeProfileTree -func (p *TMasterOpRequest) GetDefaultDatabase() (v string) { - if !p.IsSetDefaultDatabase() { - return TMasterOpRequest_DefaultDatabase_DEFAULT +func (p *TQueryProfile) GetLoadChannelProfiles() (v []*runtimeprofile.TRuntimeProfileTree) { + if !p.IsSetLoadChannelProfiles() { + return TQueryProfile_LoadChannelProfiles_DEFAULT } - return *p.DefaultDatabase -} -func (p *TMasterOpRequest) SetUser(val string) { - p.User = val -} -func (p *TMasterOpRequest) SetDb(val string) { - p.Db = val -} -func (p *TMasterOpRequest) SetSql(val string) { - p.Sql = val -} -func (p *TMasterOpRequest) SetResourceInfo(val *types.TResourceInfo) { - p.ResourceInfo = val -} -func (p *TMasterOpRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TMasterOpRequest) SetExecMemLimit(val *int64) { - p.ExecMemLimit = val -} -func (p *TMasterOpRequest) SetQueryTimeout(val *int32) { - p.QueryTimeout = val -} -func (p *TMasterOpRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TMasterOpRequest) SetTimeZone(val *string) { - p.TimeZone = val -} -func (p *TMasterOpRequest) SetStmtId(val *int64) { - p.StmtId = val -} -func (p *TMasterOpRequest) SetSqlMode(val *int64) { - p.SqlMode = val -} -func (p *TMasterOpRequest) SetLoadMemLimit(val *int64) { - p.LoadMemLimit = val -} -func (p *TMasterOpRequest) SetEnableStrictMode(val *bool) { - p.EnableStrictMode = val -} -func (p *TMasterOpRequest) SetCurrentUserIdent(val *types.TUserIdentity) { - p.CurrentUserIdent = val -} -func (p *TMasterOpRequest) SetStmtIdx(val *int32) { - p.StmtIdx = val -} -func (p *TMasterOpRequest) SetQueryOptions(val *palointernalservice.TQueryOptions) { - p.QueryOptions = val + return p.LoadChannelProfiles } -func (p *TMasterOpRequest) SetQueryId(val *types.TUniqueId) { +func (p *TQueryProfile) SetQueryId(val *types.TUniqueId) { p.QueryId = val } -func (p *TMasterOpRequest) SetInsertVisibleTimeoutMs(val *int64) { - p.InsertVisibleTimeoutMs = val -} -func (p *TMasterOpRequest) SetSessionVariables(val map[string]string) { - p.SessionVariables = val -} -func (p *TMasterOpRequest) SetFoldConstantByBe(val *bool) { - p.FoldConstantByBe = val -} -func (p *TMasterOpRequest) SetTraceCarrier(val map[string]string) { - p.TraceCarrier = val -} -func (p *TMasterOpRequest) SetClientNodeHost(val *string) { - p.ClientNodeHost = val -} -func (p *TMasterOpRequest) SetClientNodePort(val *int32) { - p.ClientNodePort = val -} -func (p *TMasterOpRequest) SetSyncJournalOnly(val *bool) { - p.SyncJournalOnly = val -} -func (p *TMasterOpRequest) SetDefaultCatalog(val *string) { - p.DefaultCatalog = val -} -func (p *TMasterOpRequest) SetDefaultDatabase(val *string) { - p.DefaultDatabase = val -} - -var fieldIDToName_TMasterOpRequest = map[int16]string{ - 1: "user", - 2: "db", - 3: "sql", - 4: "resourceInfo", - 5: "cluster", - 6: "execMemLimit", - 7: "queryTimeout", - 8: "user_ip", - 9: "time_zone", - 10: "stmt_id", - 11: "sqlMode", - 12: "loadMemLimit", - 13: "enableStrictMode", - 14: "current_user_ident", - 15: "stmtIdx", - 16: "query_options", - 17: "query_id", - 18: "insert_visible_timeout_ms", - 19: "session_variables", - 20: "foldConstantByBe", - 21: "trace_carrier", - 22: "clientNodeHost", - 23: "clientNodePort", - 24: "syncJournalOnly", - 25: "defaultCatalog", - 26: "defaultDatabase", -} - -func (p *TMasterOpRequest) IsSetResourceInfo() bool { - return p.ResourceInfo != nil -} - -func (p *TMasterOpRequest) IsSetCluster() bool { - return p.Cluster != nil -} - -func (p *TMasterOpRequest) IsSetExecMemLimit() bool { - return p.ExecMemLimit != nil -} - -func (p *TMasterOpRequest) IsSetQueryTimeout() bool { - return p.QueryTimeout != nil -} - -func (p *TMasterOpRequest) IsSetUserIp() bool { - return p.UserIp != nil -} - -func (p *TMasterOpRequest) IsSetTimeZone() bool { - return p.TimeZone != nil -} - -func (p *TMasterOpRequest) IsSetStmtId() bool { - return p.StmtId != nil -} - -func (p *TMasterOpRequest) IsSetSqlMode() bool { - return p.SqlMode != nil -} - -func (p *TMasterOpRequest) IsSetLoadMemLimit() bool { - return p.LoadMemLimit != nil +func (p *TQueryProfile) SetFragmentIdToProfile(val map[int32][]*TDetailedReportParams) { + p.FragmentIdToProfile = val } - -func (p *TMasterOpRequest) IsSetEnableStrictMode() bool { - return p.EnableStrictMode != nil +func (p *TQueryProfile) SetFragmentInstanceIds(val []*types.TUniqueId) { + p.FragmentInstanceIds = val } - -func (p *TMasterOpRequest) IsSetCurrentUserIdent() bool { - return p.CurrentUserIdent != nil +func (p *TQueryProfile) SetInstanceProfiles(val []*runtimeprofile.TRuntimeProfileTree) { + p.InstanceProfiles = val } - -func (p *TMasterOpRequest) IsSetStmtIdx() bool { - return p.StmtIdx != nil +func (p *TQueryProfile) SetLoadChannelProfiles(val []*runtimeprofile.TRuntimeProfileTree) { + p.LoadChannelProfiles = val } -func (p *TMasterOpRequest) IsSetQueryOptions() bool { - return p.QueryOptions != nil +var fieldIDToName_TQueryProfile = map[int16]string{ + 1: "query_id", + 2: "fragment_id_to_profile", + 3: "fragment_instance_ids", + 4: "instance_profiles", + 5: "load_channel_profiles", } -func (p *TMasterOpRequest) IsSetQueryId() bool { +func (p *TQueryProfile) IsSetQueryId() bool { return p.QueryId != nil } -func (p *TMasterOpRequest) IsSetInsertVisibleTimeoutMs() bool { - return p.InsertVisibleTimeoutMs != nil -} - -func (p *TMasterOpRequest) IsSetSessionVariables() bool { - return p.SessionVariables != nil -} - -func (p *TMasterOpRequest) IsSetFoldConstantByBe() bool { - return p.FoldConstantByBe != nil -} - -func (p *TMasterOpRequest) IsSetTraceCarrier() bool { - return p.TraceCarrier != nil -} - -func (p *TMasterOpRequest) IsSetClientNodeHost() bool { - return p.ClientNodeHost != nil -} - -func (p *TMasterOpRequest) IsSetClientNodePort() bool { - return p.ClientNodePort != nil +func (p *TQueryProfile) IsSetFragmentIdToProfile() bool { + return p.FragmentIdToProfile != nil } -func (p *TMasterOpRequest) IsSetSyncJournalOnly() bool { - return p.SyncJournalOnly != nil +func (p *TQueryProfile) IsSetFragmentInstanceIds() bool { + return p.FragmentInstanceIds != nil } -func (p *TMasterOpRequest) IsSetDefaultCatalog() bool { - return p.DefaultCatalog != nil +func (p *TQueryProfile) IsSetInstanceProfiles() bool { + return p.InstanceProfiles != nil } -func (p *TMasterOpRequest) IsSetDefaultDatabase() bool { - return p.DefaultDatabase != nil +func (p *TQueryProfile) IsSetLoadChannelProfiles() bool { + return p.LoadChannelProfiles != nil } -func (p *TMasterOpRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TQueryProfile) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetDb bool = false - var issetSql bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -16309,274 +15640,50 @@ func (p *TMasterOpRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.MAP { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetSql = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.STRING { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.I64 { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField14(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.I32 { - if err = p.ReadField15(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 16: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField16(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 17: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField17(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.I64 { - if err = p.ReadField18(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.MAP { - if err = p.ReadField19(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField20(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.MAP { - if err = p.ReadField21(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.STRING { - if err = p.ReadField22(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 23: - if fieldTypeId == thrift.I32 { - if err = p.ReadField23(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 24: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField24(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 25: - if fieldTypeId == thrift.STRING { - if err = p.ReadField25(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 26: - if fieldTypeId == thrift.STRING { - if err = p.ReadField26(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -16585,27 +15692,13 @@ func (p *TMasterOpRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetUser { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetSql { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryProfile[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16613,283 +15706,130 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpRequest[fieldId])) -} - -func (p *TMasterOpRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = v - } - return nil -} - -func (p *TMasterOpRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = v - } - return nil -} - -func (p *TMasterOpRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Sql = v - } - return nil -} - -func (p *TMasterOpRequest) ReadField4(iprot thrift.TProtocol) error { - p.ResourceInfo = types.NewTResourceInfo() - if err := p.ResourceInfo.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TMasterOpRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ExecMemLimit = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.QueryTimeout = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.TimeZone = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.StmtId = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.SqlMode = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadMemLimit = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableStrictMode = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField14(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TMasterOpRequest) ReadField15(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.StmtIdx = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField16(iprot thrift.TProtocol) error { - p.QueryOptions = palointernalservice.NewTQueryOptions() - if err := p.QueryOptions.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TMasterOpRequest) ReadField17(iprot thrift.TProtocol) error { - p.QueryId = types.NewTUniqueId() - if err := p.QueryId.Read(iprot); err != nil { - return err - } - return nil } -func (p *TMasterOpRequest) ReadField18(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TQueryProfile) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err - } else { - p.InsertVisibleTimeoutMs = &v } + p.QueryId = _field return nil } - -func (p *TMasterOpRequest) ReadField19(iprot thrift.TProtocol) error { +func (p *TQueryProfile) ReadField2(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.SessionVariables = make(map[string]string, size) + _field := make(map[int32][]*TDetailedReportParams, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { _key = v } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*TDetailedReportParams, 0, size) + values := make([]TDetailedReportParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() - var _val string - if v, err := iprot.ReadString(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - _val = v } - p.SessionVariables[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.FragmentIdToProfile = _field return nil } - -func (p *TMasterOpRequest) ReadField20(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.FoldConstantByBe = &v - } - return nil -} - -func (p *TMasterOpRequest) ReadField21(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() +func (p *TQueryProfile) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TraceCarrier = make(map[string]string, size) + _field := make([]*types.TUniqueId, 0, size) + values := make([]types.TUniqueId, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } + _elem := &values[i] + _elem.InitDefault() - var _val string - if v, err := iprot.ReadString(); err != nil { + if err := _elem.Read(iprot); err != nil { return err - } else { - _val = v } - p.TraceCarrier[_key] = _val + _field = append(_field, _elem) } - if err := iprot.ReadMapEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } + p.FragmentInstanceIds = _field return nil } - -func (p *TMasterOpRequest) ReadField22(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TQueryProfile) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.ClientNodeHost = &v } - return nil -} + _field := make([]*runtimeprofile.TRuntimeProfileTree, 0, size) + values := make([]runtimeprofile.TRuntimeProfileTree, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TMasterOpRequest) ReadField23(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ClientNodePort = &v - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TMasterOpRequest) ReadField24(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.SyncJournalOnly = &v } + p.InstanceProfiles = _field return nil } - -func (p *TMasterOpRequest) ReadField25(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TQueryProfile) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.DefaultCatalog = &v } - return nil -} + _field := make([]*runtimeprofile.TRuntimeProfileTree, 0, size) + values := make([]runtimeprofile.TRuntimeProfileTree, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TMasterOpRequest) ReadField26(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.DefaultDatabase = &v } + p.LoadChannelProfiles = _field return nil } -func (p *TMasterOpRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TQueryProfile) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TMasterOpRequest"); err != nil { + if err = oprot.WriteStructBegin("TQueryProfile"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16913,91 +15853,6 @@ func (p *TMasterOpRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError - } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError - } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError - } - if err = p.writeField17(oprot); err != nil { - fieldId = 17 - goto WriteFieldError - } - if err = p.writeField18(oprot); err != nil { - fieldId = 18 - goto WriteFieldError - } - if err = p.writeField19(oprot); err != nil { - fieldId = 19 - goto WriteFieldError - } - if err = p.writeField20(oprot); err != nil { - fieldId = 20 - goto WriteFieldError - } - if err = p.writeField21(oprot); err != nil { - fieldId = 21 - goto WriteFieldError - } - if err = p.writeField22(oprot); err != nil { - fieldId = 22 - goto WriteFieldError - } - if err = p.writeField23(oprot); err != nil { - fieldId = 23 - goto WriteFieldError - } - if err = p.writeField24(oprot); err != nil { - fieldId = 24 - goto WriteFieldError - } - if err = p.writeField25(oprot); err != nil { - fieldId = 25 - goto WriteFieldError - } - if err = p.writeField26(oprot); err != nil { - fieldId = 26 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17016,63 +15871,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TMasterOpRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("sql", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Sql); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetResourceInfo() { - if err = oprot.WriteFieldBegin("resourceInfo", thrift.STRUCT, 4); err != nil { +func (p *TQueryProfile) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.ResourceInfo.Write(oprot); err != nil { + if err := p.QueryId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17081,17 +15885,36 @@ func (p *TMasterOpRequest) writeField4(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TMasterOpRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 5); err != nil { +func (p *TQueryProfile) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentIdToProfile() { + if err = oprot.WriteFieldBegin("fragment_id_to_profile", thrift.MAP, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.FragmentIdToProfile)); err != nil { + return err + } + for k, v := range p.FragmentIdToProfile { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17100,17 +15923,25 @@ func (p *TMasterOpRequest) writeField5(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TMasterOpRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetExecMemLimit() { - if err = oprot.WriteFieldBegin("execMemLimit", thrift.I64, 6); err != nil { +func (p *TQueryProfile) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentInstanceIds() { + if err = oprot.WriteFieldBegin("fragment_instance_ids", thrift.LIST, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ExecMemLimit); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FragmentInstanceIds)); err != nil { + return err + } + for _, v := range p.FragmentInstanceIds { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17119,17 +15950,25 @@ func (p *TMasterOpRequest) writeField6(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TMasterOpRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryTimeout() { - if err = oprot.WriteFieldBegin("queryTimeout", thrift.I32, 7); err != nil { +func (p *TQueryProfile) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetInstanceProfiles() { + if err = oprot.WriteFieldBegin("instance_profiles", thrift.LIST, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.QueryTimeout); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.InstanceProfiles)); err != nil { + return err + } + for _, v := range p.InstanceProfiles { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17138,17 +15977,25 @@ func (p *TMasterOpRequest) writeField7(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TMasterOpRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 8); err != nil { +func (p *TQueryProfile) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadChannelProfiles() { + if err = oprot.WriteFieldBegin("load_channel_profiles", thrift.LIST, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.UserIp); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.LoadChannelProfiles)); err != nil { + return err + } + for _, v := range p.LoadChannelProfiles { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17157,169 +16004,362 @@ func (p *TMasterOpRequest) writeField8(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TMasterOpRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeZone() { - if err = oprot.WriteFieldBegin("time_zone", thrift.STRING, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.TimeZone); err != nil { - return err +func (p *TQueryProfile) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryProfile(%+v)", *p) + +} + +func (p *TQueryProfile) DeepEqual(ano *TQueryProfile) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.QueryId) { + return false + } + if !p.Field2DeepEqual(ano.FragmentIdToProfile) { + return false + } + if !p.Field3DeepEqual(ano.FragmentInstanceIds) { + return false + } + if !p.Field4DeepEqual(ano.InstanceProfiles) { + return false + } + if !p.Field5DeepEqual(ano.LoadChannelProfiles) { + return false + } + return true +} + +func (p *TQueryProfile) Field1DeepEqual(src *types.TUniqueId) bool { + + if !p.QueryId.DeepEqual(src) { + return false + } + return true +} +func (p *TQueryProfile) Field2DeepEqual(src map[int32][]*TDetailedReportParams) bool { + + if len(p.FragmentIdToProfile) != len(src) { + return false + } + for k, v := range p.FragmentIdToProfile { + _src := src[k] + if len(v) != len(_src) { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return true } +func (p *TQueryProfile) Field3DeepEqual(src []*types.TUniqueId) bool { -func (p *TMasterOpRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetStmtId() { - if err = oprot.WriteFieldBegin("stmt_id", thrift.I64, 10); err != nil { - goto WriteFieldBeginError + if len(p.FragmentInstanceIds) != len(src) { + return false + } + for i, v := range p.FragmentInstanceIds { + _src := src[i] + if !v.DeepEqual(_src) { + return false } - if err := oprot.WriteI64(*p.StmtId); err != nil { - return err + } + return true +} +func (p *TQueryProfile) Field4DeepEqual(src []*runtimeprofile.TRuntimeProfileTree) bool { + + if len(p.InstanceProfiles) != len(src) { + return false + } + for i, v := range p.InstanceProfiles { + _src := src[i] + if !v.DeepEqual(_src) { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + } + return true +} +func (p *TQueryProfile) Field5DeepEqual(src []*runtimeprofile.TRuntimeProfileTree) bool { + + if len(p.LoadChannelProfiles) != len(src) { + return false + } + for i, v := range p.LoadChannelProfiles { + _src := src[i] + if !v.DeepEqual(_src) { + return false } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return true } -func (p *TMasterOpRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetSqlMode() { - if err = oprot.WriteFieldBegin("sqlMode", thrift.I64, 11); err != nil { - goto WriteFieldBeginError +type TFragmentInstanceReport struct { + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,1,optional" frugal:"1,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` + NumFinishedRange *int32 `thrift:"num_finished_range,2,optional" frugal:"2,optional,i32" json:"num_finished_range,omitempty"` + LoadedRows *int64 `thrift:"loaded_rows,3,optional" frugal:"3,optional,i64" json:"loaded_rows,omitempty"` + LoadedBytes *int64 `thrift:"loaded_bytes,4,optional" frugal:"4,optional,i64" json:"loaded_bytes,omitempty"` +} + +func NewTFragmentInstanceReport() *TFragmentInstanceReport { + return &TFragmentInstanceReport{} +} + +func (p *TFragmentInstanceReport) InitDefault() { +} + +var TFragmentInstanceReport_FragmentInstanceId_DEFAULT *types.TUniqueId + +func (p *TFragmentInstanceReport) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TFragmentInstanceReport_FragmentInstanceId_DEFAULT + } + return p.FragmentInstanceId +} + +var TFragmentInstanceReport_NumFinishedRange_DEFAULT int32 + +func (p *TFragmentInstanceReport) GetNumFinishedRange() (v int32) { + if !p.IsSetNumFinishedRange() { + return TFragmentInstanceReport_NumFinishedRange_DEFAULT + } + return *p.NumFinishedRange +} + +var TFragmentInstanceReport_LoadedRows_DEFAULT int64 + +func (p *TFragmentInstanceReport) GetLoadedRows() (v int64) { + if !p.IsSetLoadedRows() { + return TFragmentInstanceReport_LoadedRows_DEFAULT + } + return *p.LoadedRows +} + +var TFragmentInstanceReport_LoadedBytes_DEFAULT int64 + +func (p *TFragmentInstanceReport) GetLoadedBytes() (v int64) { + if !p.IsSetLoadedBytes() { + return TFragmentInstanceReport_LoadedBytes_DEFAULT + } + return *p.LoadedBytes +} +func (p *TFragmentInstanceReport) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val +} +func (p *TFragmentInstanceReport) SetNumFinishedRange(val *int32) { + p.NumFinishedRange = val +} +func (p *TFragmentInstanceReport) SetLoadedRows(val *int64) { + p.LoadedRows = val +} +func (p *TFragmentInstanceReport) SetLoadedBytes(val *int64) { + p.LoadedBytes = val +} + +var fieldIDToName_TFragmentInstanceReport = map[int16]string{ + 1: "fragment_instance_id", + 2: "num_finished_range", + 3: "loaded_rows", + 4: "loaded_bytes", +} + +func (p *TFragmentInstanceReport) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil +} + +func (p *TFragmentInstanceReport) IsSetNumFinishedRange() bool { + return p.NumFinishedRange != nil +} + +func (p *TFragmentInstanceReport) IsSetLoadedRows() bool { + return p.LoadedRows != nil +} + +func (p *TFragmentInstanceReport) IsSetLoadedBytes() bool { + return p.LoadedBytes != nil +} + +func (p *TFragmentInstanceReport) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteI64(*p.SqlMode); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFragmentInstanceReport[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TMasterOpRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadMemLimit() { - if err = oprot.WriteFieldBegin("loadMemLimit", thrift.I64, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LoadMemLimit); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TFragmentInstanceReport) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err } + p.FragmentInstanceId = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TFragmentInstanceReport) ReadField2(iprot thrift.TProtocol) error { -func (p *TMasterOpRequest) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableStrictMode() { - if err = oprot.WriteFieldBegin("enableStrictMode", thrift.BOOL, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.EnableStrictMode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } + p.NumFinishedRange = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } +func (p *TFragmentInstanceReport) ReadField3(iprot thrift.TProtocol) error { -func (p *TMasterOpRequest) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetCurrentUserIdent() { - if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 14); err != nil { - goto WriteFieldBeginError - } - if err := p.CurrentUserIdent.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.LoadedRows = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } +func (p *TFragmentInstanceReport) ReadField4(iprot thrift.TProtocol) error { -func (p *TMasterOpRequest) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetStmtIdx() { - if err = oprot.WriteFieldBegin("stmtIdx", thrift.I32, 15); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.StmtIdx); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.LoadedBytes = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } -func (p *TMasterOpRequest) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryOptions() { - if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 16); err != nil { - goto WriteFieldBeginError +func (p *TFragmentInstanceReport) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFragmentInstanceReport"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } - if err := p.QueryOptions.Write(oprot); err != nil { - return err + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TMasterOpRequest) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryId() { - if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 17); err != nil { +func (p *TFragmentInstanceReport) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentInstanceId() { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.QueryId.Write(oprot); err != nil { + if err := p.FragmentInstanceId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17328,17 +16368,17 @@ func (p *TMasterOpRequest) writeField17(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TMasterOpRequest) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetInsertVisibleTimeoutMs() { - if err = oprot.WriteFieldBegin("insert_visible_timeout_ms", thrift.I64, 18); err != nil { +func (p *TFragmentInstanceReport) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetNumFinishedRange() { + if err = oprot.WriteFieldBegin("num_finished_range", thrift.I32, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.InsertVisibleTimeoutMs); err != nil { + if err := oprot.WriteI32(*p.NumFinishedRange); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17347,30 +16387,17 @@ func (p *TMasterOpRequest) writeField18(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TMasterOpRequest) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetSessionVariables() { - if err = oprot.WriteFieldBegin("session_variables", thrift.MAP, 19); err != nil { +func (p *TFragmentInstanceReport) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedRows() { + if err = oprot.WriteFieldBegin("loaded_rows", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.SessionVariables)); err != nil { - return err - } - for k, v := range p.SessionVariables { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteI64(*p.LoadedRows); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17379,17 +16406,17 @@ func (p *TMasterOpRequest) writeField19(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TMasterOpRequest) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetFoldConstantByBe() { - if err = oprot.WriteFieldBegin("foldConstantByBe", thrift.BOOL, 20); err != nil { +func (p *TFragmentInstanceReport) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedBytes() { + if err = oprot.WriteFieldBegin("loaded_bytes", thrift.I64, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.FoldConstantByBe); err != nil { + if err := oprot.WriteI64(*p.LoadedBytes); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17398,595 +16425,633 @@ func (p *TMasterOpRequest) writeField20(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TMasterOpRequest) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetTraceCarrier() { - if err = oprot.WriteFieldBegin("trace_carrier", thrift.MAP, 21); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.TraceCarrier)); err != nil { - return err - } - for k, v := range p.TraceCarrier { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetClientNodeHost() { - if err = oprot.WriteFieldBegin("clientNodeHost", thrift.STRING, 22); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.ClientNodeHost); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField23(oprot thrift.TProtocol) (err error) { - if p.IsSetClientNodePort() { - if err = oprot.WriteFieldBegin("clientNodePort", thrift.I32, 23); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.ClientNodePort); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField24(oprot thrift.TProtocol) (err error) { - if p.IsSetSyncJournalOnly() { - if err = oprot.WriteFieldBegin("syncJournalOnly", thrift.BOOL, 24); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.SyncJournalOnly); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField25(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultCatalog() { - if err = oprot.WriteFieldBegin("defaultCatalog", thrift.STRING, 25); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.DefaultCatalog); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) -} - -func (p *TMasterOpRequest) writeField26(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultDatabase() { - if err = oprot.WriteFieldBegin("defaultDatabase", thrift.STRING, 26); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.DefaultDatabase); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) -} - -func (p *TMasterOpRequest) String() string { +func (p *TFragmentInstanceReport) String() string { if p == nil { return "" } - return fmt.Sprintf("TMasterOpRequest(%+v)", *p) + return fmt.Sprintf("TFragmentInstanceReport(%+v)", *p) + } -func (p *TMasterOpRequest) DeepEqual(ano *TMasterOpRequest) bool { +func (p *TFragmentInstanceReport) DeepEqual(ano *TFragmentInstanceReport) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.User) { - return false - } - if !p.Field2DeepEqual(ano.Db) { - return false - } - if !p.Field3DeepEqual(ano.Sql) { - return false - } - if !p.Field4DeepEqual(ano.ResourceInfo) { - return false - } - if !p.Field5DeepEqual(ano.Cluster) { - return false - } - if !p.Field6DeepEqual(ano.ExecMemLimit) { - return false - } - if !p.Field7DeepEqual(ano.QueryTimeout) { - return false - } - if !p.Field8DeepEqual(ano.UserIp) { - return false - } - if !p.Field9DeepEqual(ano.TimeZone) { - return false - } - if !p.Field10DeepEqual(ano.StmtId) { - return false - } - if !p.Field11DeepEqual(ano.SqlMode) { - return false - } - if !p.Field12DeepEqual(ano.LoadMemLimit) { - return false - } - if !p.Field13DeepEqual(ano.EnableStrictMode) { - return false - } - if !p.Field14DeepEqual(ano.CurrentUserIdent) { - return false - } - if !p.Field15DeepEqual(ano.StmtIdx) { - return false - } - if !p.Field16DeepEqual(ano.QueryOptions) { - return false - } - if !p.Field17DeepEqual(ano.QueryId) { - return false - } - if !p.Field18DeepEqual(ano.InsertVisibleTimeoutMs) { - return false - } - if !p.Field19DeepEqual(ano.SessionVariables) { - return false - } - if !p.Field20DeepEqual(ano.FoldConstantByBe) { - return false - } - if !p.Field21DeepEqual(ano.TraceCarrier) { - return false - } - if !p.Field22DeepEqual(ano.ClientNodeHost) { - return false - } - if !p.Field23DeepEqual(ano.ClientNodePort) { + if !p.Field1DeepEqual(ano.FragmentInstanceId) { return false } - if !p.Field24DeepEqual(ano.SyncJournalOnly) { + if !p.Field2DeepEqual(ano.NumFinishedRange) { return false } - if !p.Field25DeepEqual(ano.DefaultCatalog) { + if !p.Field3DeepEqual(ano.LoadedRows) { return false } - if !p.Field26DeepEqual(ano.DefaultDatabase) { + if !p.Field4DeepEqual(ano.LoadedBytes) { return false } return true } -func (p *TMasterOpRequest) Field1DeepEqual(src string) bool { - - if strings.Compare(p.User, src) != 0 { - return false - } - return true -} -func (p *TMasterOpRequest) Field2DeepEqual(src string) bool { +func (p *TFragmentInstanceReport) Field1DeepEqual(src *types.TUniqueId) bool { - if strings.Compare(p.Db, src) != 0 { + if !p.FragmentInstanceId.DeepEqual(src) { return false } return true } -func (p *TMasterOpRequest) Field3DeepEqual(src string) bool { +func (p *TFragmentInstanceReport) Field2DeepEqual(src *int32) bool { - if strings.Compare(p.Sql, src) != 0 { + if p.NumFinishedRange == src { + return true + } else if p.NumFinishedRange == nil || src == nil { return false } - return true -} -func (p *TMasterOpRequest) Field4DeepEqual(src *types.TResourceInfo) bool { - - if !p.ResourceInfo.DeepEqual(src) { + if *p.NumFinishedRange != *src { return false } return true } -func (p *TMasterOpRequest) Field5DeepEqual(src *string) bool { +func (p *TFragmentInstanceReport) Field3DeepEqual(src *int64) bool { - if p.Cluster == src { + if p.LoadedRows == src { return true - } else if p.Cluster == nil || src == nil { + } else if p.LoadedRows == nil || src == nil { return false } - if strings.Compare(*p.Cluster, *src) != 0 { + if *p.LoadedRows != *src { return false } return true } -func (p *TMasterOpRequest) Field6DeepEqual(src *int64) bool { +func (p *TFragmentInstanceReport) Field4DeepEqual(src *int64) bool { - if p.ExecMemLimit == src { + if p.LoadedBytes == src { return true - } else if p.ExecMemLimit == nil || src == nil { + } else if p.LoadedBytes == nil || src == nil { return false } - if *p.ExecMemLimit != *src { + if *p.LoadedBytes != *src { return false } return true } -func (p *TMasterOpRequest) Field7DeepEqual(src *int32) bool { - if p.QueryTimeout == src { - return true - } else if p.QueryTimeout == nil || src == nil { - return false - } - if *p.QueryTimeout != *src { - return false - } - return true +type TReportExecStatusParams struct { + ProtocolVersion FrontendServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocol_version"` + QueryId *types.TUniqueId `thrift:"query_id,2,optional" frugal:"2,optional,types.TUniqueId" json:"query_id,omitempty"` + BackendNum *int32 `thrift:"backend_num,3,optional" frugal:"3,optional,i32" json:"backend_num,omitempty"` + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,4,optional" frugal:"4,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` + Status *status.TStatus `thrift:"status,5,optional" frugal:"5,optional,status.TStatus" json:"status,omitempty"` + Done *bool `thrift:"done,6,optional" frugal:"6,optional,bool" json:"done,omitempty"` + Profile *runtimeprofile.TRuntimeProfileTree `thrift:"profile,7,optional" frugal:"7,optional,runtimeprofile.TRuntimeProfileTree" json:"profile,omitempty"` + ErrorLog []string `thrift:"error_log,9,optional" frugal:"9,optional,list" json:"error_log,omitempty"` + DeltaUrls []string `thrift:"delta_urls,10,optional" frugal:"10,optional,list" json:"delta_urls,omitempty"` + LoadCounters map[string]string `thrift:"load_counters,11,optional" frugal:"11,optional,map" json:"load_counters,omitempty"` + TrackingUrl *string `thrift:"tracking_url,12,optional" frugal:"12,optional,string" json:"tracking_url,omitempty"` + ExportFiles []string `thrift:"export_files,13,optional" frugal:"13,optional,list" json:"export_files,omitempty"` + CommitInfos []*types.TTabletCommitInfo `thrift:"commitInfos,14,optional" frugal:"14,optional,list" json:"commitInfos,omitempty"` + LoadedRows *int64 `thrift:"loaded_rows,15,optional" frugal:"15,optional,i64" json:"loaded_rows,omitempty"` + BackendId *int64 `thrift:"backend_id,16,optional" frugal:"16,optional,i64" json:"backend_id,omitempty"` + LoadedBytes *int64 `thrift:"loaded_bytes,17,optional" frugal:"17,optional,i64" json:"loaded_bytes,omitempty"` + ErrorTabletInfos []*types.TErrorTabletInfo `thrift:"errorTabletInfos,18,optional" frugal:"18,optional,list" json:"errorTabletInfos,omitempty"` + FragmentId *int32 `thrift:"fragment_id,19,optional" frugal:"19,optional,i32" json:"fragment_id,omitempty"` + QueryType *palointernalservice.TQueryType `thrift:"query_type,20,optional" frugal:"20,optional,TQueryType" json:"query_type,omitempty"` + LoadChannelProfile *runtimeprofile.TRuntimeProfileTree `thrift:"loadChannelProfile,21,optional" frugal:"21,optional,runtimeprofile.TRuntimeProfileTree" json:"loadChannelProfile,omitempty"` + FinishedScanRanges *int32 `thrift:"finished_scan_ranges,22,optional" frugal:"22,optional,i32" json:"finished_scan_ranges,omitempty"` + DetailedReport []*TDetailedReportParams `thrift:"detailed_report,23,optional" frugal:"23,optional,list" json:"detailed_report,omitempty"` + QueryStatistics *TQueryStatistics `thrift:"query_statistics,24,optional" frugal:"24,optional,TQueryStatistics" json:"query_statistics,omitempty"` + ReportWorkloadRuntimeStatus *TReportWorkloadRuntimeStatusParams `thrift:"report_workload_runtime_status,25,optional" frugal:"25,optional,TReportWorkloadRuntimeStatusParams" json:"report_workload_runtime_status,omitempty"` + HivePartitionUpdates []*datasinks.THivePartitionUpdate `thrift:"hive_partition_updates,26,optional" frugal:"26,optional,list" json:"hive_partition_updates,omitempty"` + QueryProfile *TQueryProfile `thrift:"query_profile,27,optional" frugal:"27,optional,TQueryProfile" json:"query_profile,omitempty"` + IcebergCommitDatas []*datasinks.TIcebergCommitData `thrift:"iceberg_commit_datas,28,optional" frugal:"28,optional,list" json:"iceberg_commit_datas,omitempty"` + TxnId *int64 `thrift:"txn_id,29,optional" frugal:"29,optional,i64" json:"txn_id,omitempty"` + Label *string `thrift:"label,30,optional" frugal:"30,optional,string" json:"label,omitempty"` + FragmentInstanceReports []*TFragmentInstanceReport `thrift:"fragment_instance_reports,31,optional" frugal:"31,optional,list" json:"fragment_instance_reports,omitempty"` } -func (p *TMasterOpRequest) Field8DeepEqual(src *string) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false - } - if strings.Compare(*p.UserIp, *src) != 0 { - return false - } - return true +func NewTReportExecStatusParams() *TReportExecStatusParams { + return &TReportExecStatusParams{} } -func (p *TMasterOpRequest) Field9DeepEqual(src *string) bool { - if p.TimeZone == src { - return true - } else if p.TimeZone == nil || src == nil { - return false - } - if strings.Compare(*p.TimeZone, *src) != 0 { - return false - } - return true +func (p *TReportExecStatusParams) InitDefault() { } -func (p *TMasterOpRequest) Field10DeepEqual(src *int64) bool { - if p.StmtId == src { - return true - } else if p.StmtId == nil || src == nil { - return false - } - if *p.StmtId != *src { - return false - } - return true +func (p *TReportExecStatusParams) GetProtocolVersion() (v FrontendServiceVersion) { + return p.ProtocolVersion } -func (p *TMasterOpRequest) Field11DeepEqual(src *int64) bool { - if p.SqlMode == src { - return true - } else if p.SqlMode == nil || src == nil { - return false - } - if *p.SqlMode != *src { - return false +var TReportExecStatusParams_QueryId_DEFAULT *types.TUniqueId + +func (p *TReportExecStatusParams) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TReportExecStatusParams_QueryId_DEFAULT } - return true + return p.QueryId } -func (p *TMasterOpRequest) Field12DeepEqual(src *int64) bool { - if p.LoadMemLimit == src { - return true - } else if p.LoadMemLimit == nil || src == nil { - return false - } - if *p.LoadMemLimit != *src { - return false +var TReportExecStatusParams_BackendNum_DEFAULT int32 + +func (p *TReportExecStatusParams) GetBackendNum() (v int32) { + if !p.IsSetBackendNum() { + return TReportExecStatusParams_BackendNum_DEFAULT } - return true + return *p.BackendNum } -func (p *TMasterOpRequest) Field13DeepEqual(src *bool) bool { - if p.EnableStrictMode == src { - return true - } else if p.EnableStrictMode == nil || src == nil { - return false - } - if *p.EnableStrictMode != *src { - return false +var TReportExecStatusParams_FragmentInstanceId_DEFAULT *types.TUniqueId + +func (p *TReportExecStatusParams) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TReportExecStatusParams_FragmentInstanceId_DEFAULT } - return true + return p.FragmentInstanceId } -func (p *TMasterOpRequest) Field14DeepEqual(src *types.TUserIdentity) bool { - if !p.CurrentUserIdent.DeepEqual(src) { - return false +var TReportExecStatusParams_Status_DEFAULT *status.TStatus + +func (p *TReportExecStatusParams) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TReportExecStatusParams_Status_DEFAULT } - return true + return p.Status } -func (p *TMasterOpRequest) Field15DeepEqual(src *int32) bool { - if p.StmtIdx == src { - return true - } else if p.StmtIdx == nil || src == nil { - return false - } - if *p.StmtIdx != *src { - return false +var TReportExecStatusParams_Done_DEFAULT bool + +func (p *TReportExecStatusParams) GetDone() (v bool) { + if !p.IsSetDone() { + return TReportExecStatusParams_Done_DEFAULT } - return true + return *p.Done } -func (p *TMasterOpRequest) Field16DeepEqual(src *palointernalservice.TQueryOptions) bool { - if !p.QueryOptions.DeepEqual(src) { - return false +var TReportExecStatusParams_Profile_DEFAULT *runtimeprofile.TRuntimeProfileTree + +func (p *TReportExecStatusParams) GetProfile() (v *runtimeprofile.TRuntimeProfileTree) { + if !p.IsSetProfile() { + return TReportExecStatusParams_Profile_DEFAULT } - return true + return p.Profile } -func (p *TMasterOpRequest) Field17DeepEqual(src *types.TUniqueId) bool { - if !p.QueryId.DeepEqual(src) { - return false +var TReportExecStatusParams_ErrorLog_DEFAULT []string + +func (p *TReportExecStatusParams) GetErrorLog() (v []string) { + if !p.IsSetErrorLog() { + return TReportExecStatusParams_ErrorLog_DEFAULT } - return true + return p.ErrorLog } -func (p *TMasterOpRequest) Field18DeepEqual(src *int64) bool { - if p.InsertVisibleTimeoutMs == src { - return true - } else if p.InsertVisibleTimeoutMs == nil || src == nil { - return false - } - if *p.InsertVisibleTimeoutMs != *src { - return false +var TReportExecStatusParams_DeltaUrls_DEFAULT []string + +func (p *TReportExecStatusParams) GetDeltaUrls() (v []string) { + if !p.IsSetDeltaUrls() { + return TReportExecStatusParams_DeltaUrls_DEFAULT } - return true + return p.DeltaUrls } -func (p *TMasterOpRequest) Field19DeepEqual(src map[string]string) bool { - if len(p.SessionVariables) != len(src) { - return false - } - for k, v := range p.SessionVariables { - _src := src[k] - if strings.Compare(v, _src) != 0 { - return false - } +var TReportExecStatusParams_LoadCounters_DEFAULT map[string]string + +func (p *TReportExecStatusParams) GetLoadCounters() (v map[string]string) { + if !p.IsSetLoadCounters() { + return TReportExecStatusParams_LoadCounters_DEFAULT } - return true + return p.LoadCounters } -func (p *TMasterOpRequest) Field20DeepEqual(src *bool) bool { - if p.FoldConstantByBe == src { - return true - } else if p.FoldConstantByBe == nil || src == nil { - return false - } - if *p.FoldConstantByBe != *src { - return false +var TReportExecStatusParams_TrackingUrl_DEFAULT string + +func (p *TReportExecStatusParams) GetTrackingUrl() (v string) { + if !p.IsSetTrackingUrl() { + return TReportExecStatusParams_TrackingUrl_DEFAULT } - return true + return *p.TrackingUrl } -func (p *TMasterOpRequest) Field21DeepEqual(src map[string]string) bool { - if len(p.TraceCarrier) != len(src) { - return false - } - for k, v := range p.TraceCarrier { - _src := src[k] - if strings.Compare(v, _src) != 0 { - return false - } +var TReportExecStatusParams_ExportFiles_DEFAULT []string + +func (p *TReportExecStatusParams) GetExportFiles() (v []string) { + if !p.IsSetExportFiles() { + return TReportExecStatusParams_ExportFiles_DEFAULT } - return true + return p.ExportFiles } -func (p *TMasterOpRequest) Field22DeepEqual(src *string) bool { - if p.ClientNodeHost == src { - return true - } else if p.ClientNodeHost == nil || src == nil { - return false - } - if strings.Compare(*p.ClientNodeHost, *src) != 0 { - return false +var TReportExecStatusParams_CommitInfos_DEFAULT []*types.TTabletCommitInfo + +func (p *TReportExecStatusParams) GetCommitInfos() (v []*types.TTabletCommitInfo) { + if !p.IsSetCommitInfos() { + return TReportExecStatusParams_CommitInfos_DEFAULT } - return true + return p.CommitInfos } -func (p *TMasterOpRequest) Field23DeepEqual(src *int32) bool { - if p.ClientNodePort == src { - return true - } else if p.ClientNodePort == nil || src == nil { - return false - } - if *p.ClientNodePort != *src { - return false +var TReportExecStatusParams_LoadedRows_DEFAULT int64 + +func (p *TReportExecStatusParams) GetLoadedRows() (v int64) { + if !p.IsSetLoadedRows() { + return TReportExecStatusParams_LoadedRows_DEFAULT } - return true + return *p.LoadedRows } -func (p *TMasterOpRequest) Field24DeepEqual(src *bool) bool { - if p.SyncJournalOnly == src { - return true - } else if p.SyncJournalOnly == nil || src == nil { - return false - } - if *p.SyncJournalOnly != *src { - return false +var TReportExecStatusParams_BackendId_DEFAULT int64 + +func (p *TReportExecStatusParams) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TReportExecStatusParams_BackendId_DEFAULT } - return true + return *p.BackendId } -func (p *TMasterOpRequest) Field25DeepEqual(src *string) bool { - if p.DefaultCatalog == src { - return true - } else if p.DefaultCatalog == nil || src == nil { - return false - } - if strings.Compare(*p.DefaultCatalog, *src) != 0 { - return false +var TReportExecStatusParams_LoadedBytes_DEFAULT int64 + +func (p *TReportExecStatusParams) GetLoadedBytes() (v int64) { + if !p.IsSetLoadedBytes() { + return TReportExecStatusParams_LoadedBytes_DEFAULT } - return true + return *p.LoadedBytes } -func (p *TMasterOpRequest) Field26DeepEqual(src *string) bool { - if p.DefaultDatabase == src { - return true - } else if p.DefaultDatabase == nil || src == nil { - return false - } - if strings.Compare(*p.DefaultDatabase, *src) != 0 { - return false +var TReportExecStatusParams_ErrorTabletInfos_DEFAULT []*types.TErrorTabletInfo + +func (p *TReportExecStatusParams) GetErrorTabletInfos() (v []*types.TErrorTabletInfo) { + if !p.IsSetErrorTabletInfos() { + return TReportExecStatusParams_ErrorTabletInfos_DEFAULT } - return true + return p.ErrorTabletInfos } -type TColumnDefinition struct { - ColumnName string `thrift:"columnName,1,required" frugal:"1,required,string" json:"columnName"` - ColumnType *types.TColumnType `thrift:"columnType,2,required" frugal:"2,required,types.TColumnType" json:"columnType"` - AggType *types.TAggregationType `thrift:"aggType,3,optional" frugal:"3,optional,TAggregationType" json:"aggType,omitempty"` - DefaultValue *string `thrift:"defaultValue,4,optional" frugal:"4,optional,string" json:"defaultValue,omitempty"` -} +var TReportExecStatusParams_FragmentId_DEFAULT int32 -func NewTColumnDefinition() *TColumnDefinition { - return &TColumnDefinition{} +func (p *TReportExecStatusParams) GetFragmentId() (v int32) { + if !p.IsSetFragmentId() { + return TReportExecStatusParams_FragmentId_DEFAULT + } + return *p.FragmentId } -func (p *TColumnDefinition) InitDefault() { - *p = TColumnDefinition{} +var TReportExecStatusParams_QueryType_DEFAULT palointernalservice.TQueryType + +func (p *TReportExecStatusParams) GetQueryType() (v palointernalservice.TQueryType) { + if !p.IsSetQueryType() { + return TReportExecStatusParams_QueryType_DEFAULT + } + return *p.QueryType } -func (p *TColumnDefinition) GetColumnName() (v string) { - return p.ColumnName +var TReportExecStatusParams_LoadChannelProfile_DEFAULT *runtimeprofile.TRuntimeProfileTree + +func (p *TReportExecStatusParams) GetLoadChannelProfile() (v *runtimeprofile.TRuntimeProfileTree) { + if !p.IsSetLoadChannelProfile() { + return TReportExecStatusParams_LoadChannelProfile_DEFAULT + } + return p.LoadChannelProfile } -var TColumnDefinition_ColumnType_DEFAULT *types.TColumnType +var TReportExecStatusParams_FinishedScanRanges_DEFAULT int32 -func (p *TColumnDefinition) GetColumnType() (v *types.TColumnType) { - if !p.IsSetColumnType() { - return TColumnDefinition_ColumnType_DEFAULT +func (p *TReportExecStatusParams) GetFinishedScanRanges() (v int32) { + if !p.IsSetFinishedScanRanges() { + return TReportExecStatusParams_FinishedScanRanges_DEFAULT } - return p.ColumnType + return *p.FinishedScanRanges } -var TColumnDefinition_AggType_DEFAULT types.TAggregationType +var TReportExecStatusParams_DetailedReport_DEFAULT []*TDetailedReportParams -func (p *TColumnDefinition) GetAggType() (v types.TAggregationType) { - if !p.IsSetAggType() { - return TColumnDefinition_AggType_DEFAULT +func (p *TReportExecStatusParams) GetDetailedReport() (v []*TDetailedReportParams) { + if !p.IsSetDetailedReport() { + return TReportExecStatusParams_DetailedReport_DEFAULT } - return *p.AggType + return p.DetailedReport } -var TColumnDefinition_DefaultValue_DEFAULT string +var TReportExecStatusParams_QueryStatistics_DEFAULT *TQueryStatistics -func (p *TColumnDefinition) GetDefaultValue() (v string) { - if !p.IsSetDefaultValue() { - return TColumnDefinition_DefaultValue_DEFAULT +func (p *TReportExecStatusParams) GetQueryStatistics() (v *TQueryStatistics) { + if !p.IsSetQueryStatistics() { + return TReportExecStatusParams_QueryStatistics_DEFAULT } - return *p.DefaultValue + return p.QueryStatistics } -func (p *TColumnDefinition) SetColumnName(val string) { - p.ColumnName = val + +var TReportExecStatusParams_ReportWorkloadRuntimeStatus_DEFAULT *TReportWorkloadRuntimeStatusParams + +func (p *TReportExecStatusParams) GetReportWorkloadRuntimeStatus() (v *TReportWorkloadRuntimeStatusParams) { + if !p.IsSetReportWorkloadRuntimeStatus() { + return TReportExecStatusParams_ReportWorkloadRuntimeStatus_DEFAULT + } + return p.ReportWorkloadRuntimeStatus } -func (p *TColumnDefinition) SetColumnType(val *types.TColumnType) { - p.ColumnType = val + +var TReportExecStatusParams_HivePartitionUpdates_DEFAULT []*datasinks.THivePartitionUpdate + +func (p *TReportExecStatusParams) GetHivePartitionUpdates() (v []*datasinks.THivePartitionUpdate) { + if !p.IsSetHivePartitionUpdates() { + return TReportExecStatusParams_HivePartitionUpdates_DEFAULT + } + return p.HivePartitionUpdates } -func (p *TColumnDefinition) SetAggType(val *types.TAggregationType) { - p.AggType = val + +var TReportExecStatusParams_QueryProfile_DEFAULT *TQueryProfile + +func (p *TReportExecStatusParams) GetQueryProfile() (v *TQueryProfile) { + if !p.IsSetQueryProfile() { + return TReportExecStatusParams_QueryProfile_DEFAULT + } + return p.QueryProfile } -func (p *TColumnDefinition) SetDefaultValue(val *string) { - p.DefaultValue = val + +var TReportExecStatusParams_IcebergCommitDatas_DEFAULT []*datasinks.TIcebergCommitData + +func (p *TReportExecStatusParams) GetIcebergCommitDatas() (v []*datasinks.TIcebergCommitData) { + if !p.IsSetIcebergCommitDatas() { + return TReportExecStatusParams_IcebergCommitDatas_DEFAULT + } + return p.IcebergCommitDatas } -var fieldIDToName_TColumnDefinition = map[int16]string{ - 1: "columnName", - 2: "columnType", - 3: "aggType", - 4: "defaultValue", +var TReportExecStatusParams_TxnId_DEFAULT int64 + +func (p *TReportExecStatusParams) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TReportExecStatusParams_TxnId_DEFAULT + } + return *p.TxnId } -func (p *TColumnDefinition) IsSetColumnType() bool { - return p.ColumnType != nil +var TReportExecStatusParams_Label_DEFAULT string + +func (p *TReportExecStatusParams) GetLabel() (v string) { + if !p.IsSetLabel() { + return TReportExecStatusParams_Label_DEFAULT + } + return *p.Label } -func (p *TColumnDefinition) IsSetAggType() bool { - return p.AggType != nil +var TReportExecStatusParams_FragmentInstanceReports_DEFAULT []*TFragmentInstanceReport + +func (p *TReportExecStatusParams) GetFragmentInstanceReports() (v []*TFragmentInstanceReport) { + if !p.IsSetFragmentInstanceReports() { + return TReportExecStatusParams_FragmentInstanceReports_DEFAULT + } + return p.FragmentInstanceReports +} +func (p *TReportExecStatusParams) SetProtocolVersion(val FrontendServiceVersion) { + p.ProtocolVersion = val +} +func (p *TReportExecStatusParams) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TReportExecStatusParams) SetBackendNum(val *int32) { + p.BackendNum = val +} +func (p *TReportExecStatusParams) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val +} +func (p *TReportExecStatusParams) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TReportExecStatusParams) SetDone(val *bool) { + p.Done = val +} +func (p *TReportExecStatusParams) SetProfile(val *runtimeprofile.TRuntimeProfileTree) { + p.Profile = val +} +func (p *TReportExecStatusParams) SetErrorLog(val []string) { + p.ErrorLog = val +} +func (p *TReportExecStatusParams) SetDeltaUrls(val []string) { + p.DeltaUrls = val +} +func (p *TReportExecStatusParams) SetLoadCounters(val map[string]string) { + p.LoadCounters = val +} +func (p *TReportExecStatusParams) SetTrackingUrl(val *string) { + p.TrackingUrl = val +} +func (p *TReportExecStatusParams) SetExportFiles(val []string) { + p.ExportFiles = val +} +func (p *TReportExecStatusParams) SetCommitInfos(val []*types.TTabletCommitInfo) { + p.CommitInfos = val +} +func (p *TReportExecStatusParams) SetLoadedRows(val *int64) { + p.LoadedRows = val +} +func (p *TReportExecStatusParams) SetBackendId(val *int64) { + p.BackendId = val +} +func (p *TReportExecStatusParams) SetLoadedBytes(val *int64) { + p.LoadedBytes = val +} +func (p *TReportExecStatusParams) SetErrorTabletInfos(val []*types.TErrorTabletInfo) { + p.ErrorTabletInfos = val +} +func (p *TReportExecStatusParams) SetFragmentId(val *int32) { + p.FragmentId = val +} +func (p *TReportExecStatusParams) SetQueryType(val *palointernalservice.TQueryType) { + p.QueryType = val +} +func (p *TReportExecStatusParams) SetLoadChannelProfile(val *runtimeprofile.TRuntimeProfileTree) { + p.LoadChannelProfile = val +} +func (p *TReportExecStatusParams) SetFinishedScanRanges(val *int32) { + p.FinishedScanRanges = val +} +func (p *TReportExecStatusParams) SetDetailedReport(val []*TDetailedReportParams) { + p.DetailedReport = val +} +func (p *TReportExecStatusParams) SetQueryStatistics(val *TQueryStatistics) { + p.QueryStatistics = val +} +func (p *TReportExecStatusParams) SetReportWorkloadRuntimeStatus(val *TReportWorkloadRuntimeStatusParams) { + p.ReportWorkloadRuntimeStatus = val +} +func (p *TReportExecStatusParams) SetHivePartitionUpdates(val []*datasinks.THivePartitionUpdate) { + p.HivePartitionUpdates = val +} +func (p *TReportExecStatusParams) SetQueryProfile(val *TQueryProfile) { + p.QueryProfile = val +} +func (p *TReportExecStatusParams) SetIcebergCommitDatas(val []*datasinks.TIcebergCommitData) { + p.IcebergCommitDatas = val +} +func (p *TReportExecStatusParams) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TReportExecStatusParams) SetLabel(val *string) { + p.Label = val +} +func (p *TReportExecStatusParams) SetFragmentInstanceReports(val []*TFragmentInstanceReport) { + p.FragmentInstanceReports = val } -func (p *TColumnDefinition) IsSetDefaultValue() bool { - return p.DefaultValue != nil +var fieldIDToName_TReportExecStatusParams = map[int16]string{ + 1: "protocol_version", + 2: "query_id", + 3: "backend_num", + 4: "fragment_instance_id", + 5: "status", + 6: "done", + 7: "profile", + 9: "error_log", + 10: "delta_urls", + 11: "load_counters", + 12: "tracking_url", + 13: "export_files", + 14: "commitInfos", + 15: "loaded_rows", + 16: "backend_id", + 17: "loaded_bytes", + 18: "errorTabletInfos", + 19: "fragment_id", + 20: "query_type", + 21: "loadChannelProfile", + 22: "finished_scan_ranges", + 23: "detailed_report", + 24: "query_statistics", + 25: "report_workload_runtime_status", + 26: "hive_partition_updates", + 27: "query_profile", + 28: "iceberg_commit_datas", + 29: "txn_id", + 30: "label", + 31: "fragment_instance_reports", } -func (p *TColumnDefinition) Read(iprot thrift.TProtocol) (err error) { +func (p *TReportExecStatusParams) IsSetQueryId() bool { + return p.QueryId != nil +} + +func (p *TReportExecStatusParams) IsSetBackendNum() bool { + return p.BackendNum != nil +} + +func (p *TReportExecStatusParams) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil +} + +func (p *TReportExecStatusParams) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TReportExecStatusParams) IsSetDone() bool { + return p.Done != nil +} + +func (p *TReportExecStatusParams) IsSetProfile() bool { + return p.Profile != nil +} + +func (p *TReportExecStatusParams) IsSetErrorLog() bool { + return p.ErrorLog != nil +} + +func (p *TReportExecStatusParams) IsSetDeltaUrls() bool { + return p.DeltaUrls != nil +} + +func (p *TReportExecStatusParams) IsSetLoadCounters() bool { + return p.LoadCounters != nil +} + +func (p *TReportExecStatusParams) IsSetTrackingUrl() bool { + return p.TrackingUrl != nil +} + +func (p *TReportExecStatusParams) IsSetExportFiles() bool { + return p.ExportFiles != nil +} + +func (p *TReportExecStatusParams) IsSetCommitInfos() bool { + return p.CommitInfos != nil +} + +func (p *TReportExecStatusParams) IsSetLoadedRows() bool { + return p.LoadedRows != nil +} + +func (p *TReportExecStatusParams) IsSetBackendId() bool { + return p.BackendId != nil +} + +func (p *TReportExecStatusParams) IsSetLoadedBytes() bool { + return p.LoadedBytes != nil +} + +func (p *TReportExecStatusParams) IsSetErrorTabletInfos() bool { + return p.ErrorTabletInfos != nil +} + +func (p *TReportExecStatusParams) IsSetFragmentId() bool { + return p.FragmentId != nil +} + +func (p *TReportExecStatusParams) IsSetQueryType() bool { + return p.QueryType != nil +} + +func (p *TReportExecStatusParams) IsSetLoadChannelProfile() bool { + return p.LoadChannelProfile != nil +} + +func (p *TReportExecStatusParams) IsSetFinishedScanRanges() bool { + return p.FinishedScanRanges != nil +} + +func (p *TReportExecStatusParams) IsSetDetailedReport() bool { + return p.DetailedReport != nil +} + +func (p *TReportExecStatusParams) IsSetQueryStatistics() bool { + return p.QueryStatistics != nil +} + +func (p *TReportExecStatusParams) IsSetReportWorkloadRuntimeStatus() bool { + return p.ReportWorkloadRuntimeStatus != nil +} + +func (p *TReportExecStatusParams) IsSetHivePartitionUpdates() bool { + return p.HivePartitionUpdates != nil +} + +func (p *TReportExecStatusParams) IsSetQueryProfile() bool { + return p.QueryProfile != nil +} + +func (p *TReportExecStatusParams) IsSetIcebergCommitDatas() bool { + return p.IcebergCommitDatas != nil +} + +func (p *TReportExecStatusParams) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TReportExecStatusParams) IsSetLabel() bool { + return p.Label != nil +} + +func (p *TReportExecStatusParams) IsSetFragmentInstanceReports() bool { + return p.FragmentInstanceReports != nil +} + +func (p *TReportExecStatusParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetColumnName bool = false - var issetColumnType bool = false + var issetProtocolVersion bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -18003,53 +17068,251 @@ func (p *TColumnDefinition) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetColumnType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.LIST { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.LIST { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.MAP { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.LIST { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.LIST { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I64 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.I64 { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.I64 { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.LIST { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.I32 { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.I32 { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.I32 { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.LIST { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 24: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField24(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 25: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField25(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 26: + if fieldTypeId == thrift.LIST { + if err = p.ReadField26(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 27: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField27(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 28: + if fieldTypeId == thrift.LIST { + if err = p.ReadField28(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 29: + if fieldTypeId == thrift.I64 { + if err = p.ReadField29(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 30: + if fieldTypeId == thrift.STRING { + if err = p.ReadField30(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 31: + if fieldTypeId == thrift.LIST { + if err = p.ReadField31(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18058,22 +17321,17 @@ func (p *TColumnDefinition) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetColumnName { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } - - if !issetColumnType { - fieldId = 2 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnDefinition[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportExecStatusParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18082,604 +17340,569 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TColumnDefinition[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReportExecStatusParams[fieldId])) } -func (p *TColumnDefinition) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TReportExecStatusParams) ReadField1(iprot thrift.TProtocol) error { + + var _field FrontendServiceVersion + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnName = v + _field = FrontendServiceVersion(v) } + p.ProtocolVersion = _field return nil } - -func (p *TColumnDefinition) ReadField2(iprot thrift.TProtocol) error { - p.ColumnType = types.NewTColumnType() - if err := p.ColumnType.Read(iprot); err != nil { +func (p *TReportExecStatusParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.QueryId = _field return nil } +func (p *TReportExecStatusParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TColumnDefinition) ReadField3(iprot thrift.TProtocol) error { + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - tmp := types.TAggregationType(v) - p.AggType = &tmp + _field = &v } + p.BackendNum = _field return nil } - -func (p *TColumnDefinition) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TReportExecStatusParams) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err - } else { - p.DefaultValue = &v } + p.FragmentInstanceId = _field return nil } - -func (p *TColumnDefinition) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TColumnDefinition"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError +func (p *TReportExecStatusParams) ReadField5(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err } + p.Status = _field return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } +func (p *TReportExecStatusParams) ReadField6(iprot thrift.TProtocol) error { -func (p *TColumnDefinition) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("columnName", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.ColumnName); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.Done = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } - -func (p *TColumnDefinition) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("columnType", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.ColumnType.Write(oprot); err != nil { +func (p *TReportExecStatusParams) ReadField7(iprot thrift.TProtocol) error { + _field := runtimeprofile.NewTRuntimeProfileTree() + if err := _field.Read(iprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.Profile = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } +func (p *TReportExecStatusParams) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { -func (p *TColumnDefinition) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetAggType() { - if err = oprot.WriteFieldBegin("aggType", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.AggType)); err != nil { + var _elem string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _elem = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err } + p.ErrorLog = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TReportExecStatusParams) ReadField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { -func (p *TColumnDefinition) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultValue() { - if err = oprot.WriteFieldBegin("defaultValue", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.DefaultValue); err != nil { + var _elem string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _elem = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err } + p.DeltaUrls = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } - -func (p *TColumnDefinition) String() string { - if p == nil { - return "" +func (p *TReportExecStatusParams) ReadField11(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err } - return fmt.Sprintf("TColumnDefinition(%+v)", *p) -} + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } -func (p *TColumnDefinition) DeepEqual(ano *TColumnDefinition) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.ColumnName) { - return false - } - if !p.Field2DeepEqual(ano.ColumnType) { - return false - } - if !p.Field3DeepEqual(ano.AggType) { - return false + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val } - if !p.Field4DeepEqual(ano.DefaultValue) { - return false + if err := iprot.ReadMapEnd(); err != nil { + return err } - return true + p.LoadCounters = _field + return nil } +func (p *TReportExecStatusParams) ReadField12(iprot thrift.TProtocol) error { -func (p *TColumnDefinition) Field1DeepEqual(src string) bool { - - if strings.Compare(p.ColumnName, src) != 0 { - return false + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return true + p.TrackingUrl = _field + return nil } -func (p *TColumnDefinition) Field2DeepEqual(src *types.TColumnType) bool { - - if !p.ColumnType.DeepEqual(src) { - return false +func (p *TReportExecStatusParams) ReadField13(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err } - return true -} -func (p *TColumnDefinition) Field3DeepEqual(src *types.TAggregationType) bool { + _field := make([]string, 0, size) + for i := 0; i < size; i++ { - if p.AggType == src { - return true - } else if p.AggType == nil || src == nil { - return false - } - if *p.AggType != *src { - return false - } - return true -} -func (p *TColumnDefinition) Field4DeepEqual(src *string) bool { + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } - if p.DefaultValue == src { - return true - } else if p.DefaultValue == nil || src == nil { - return false + _field = append(_field, _elem) } - if strings.Compare(*p.DefaultValue, *src) != 0 { - return false + if err := iprot.ReadListEnd(); err != nil { + return err } - return true -} - -type TShowResultSetMetaData struct { - Columns []*TColumnDefinition `thrift:"columns,1,required" frugal:"1,required,list" json:"columns"` -} - -func NewTShowResultSetMetaData() *TShowResultSetMetaData { - return &TShowResultSetMetaData{} -} - -func (p *TShowResultSetMetaData) InitDefault() { - *p = TShowResultSetMetaData{} -} - -func (p *TShowResultSetMetaData) GetColumns() (v []*TColumnDefinition) { - return p.Columns -} -func (p *TShowResultSetMetaData) SetColumns(val []*TColumnDefinition) { - p.Columns = val -} - -var fieldIDToName_TShowResultSetMetaData = map[int16]string{ - 1: "columns", + p.ExportFiles = _field + return nil } - -func (p *TShowResultSetMetaData) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetColumns bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TReportExecStatusParams) ReadField14(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err } + _field := make([]*types.TTabletCommitInfo, 0, size) + values := make([]types.TTabletCommitInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err := _elem.Read(iprot); err != nil { + return err } - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + _field = append(_field, _elem) } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err := iprot.ReadListEnd(); err != nil { + return err } + p.CommitInfos = _field + return nil +} +func (p *TReportExecStatusParams) ReadField15(iprot thrift.TProtocol) error { - if !issetColumns { - fieldId = 1 - goto RequiredFieldNotSetError + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.LoadedRows = _field return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSetMetaData[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} +func (p *TReportExecStatusParams) ReadField16(iprot thrift.TProtocol) error { -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSetMetaData[fieldId])) + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BackendId = _field + return nil } +func (p *TReportExecStatusParams) ReadField17(iprot thrift.TProtocol) error { -func (p *TShowResultSetMetaData) ReadField1(iprot thrift.TProtocol) error { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LoadedBytes = _field + return nil +} +func (p *TReportExecStatusParams) ReadField18(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]*TColumnDefinition, 0, size) + _field := make([]*types.TErrorTabletInfo, 0, size) + values := make([]types.TErrorTabletInfo, size) for i := 0; i < size; i++ { - _elem := NewTColumnDefinition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ErrorTabletInfos = _field return nil } +func (p *TReportExecStatusParams) ReadField19(iprot thrift.TProtocol) error { -func (p *TShowResultSetMetaData) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TShowResultSetMetaData"); err != nil { - goto WriteStructBeginError + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } + p.FragmentId = _field + return nil +} +func (p *TReportExecStatusParams) ReadField20(iprot thrift.TProtocol) error { + var _field *palointernalservice.TQueryType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := palointernalservice.TQueryType(v) + _field = &tmp } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + p.QueryType = _field + return nil +} +func (p *TReportExecStatusParams) ReadField21(iprot thrift.TProtocol) error { + _field := runtimeprofile.NewTRuntimeProfileTree() + if err := _field.Read(iprot); err != nil { + return err } + p.LoadChannelProfile = _field return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } +func (p *TReportExecStatusParams) ReadField22(iprot thrift.TProtocol) error { -func (p *TShowResultSetMetaData) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("columns", thrift.LIST, 1); err != nil { - goto WriteFieldBeginError + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { + p.FinishedScanRanges = _field + return nil +} +func (p *TReportExecStatusParams) ReadField23(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { + _field := make([]*TDetailedReportParams, 0, size) + values := make([]TDetailedReportParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err } + + _field = append(_field, _elem) } - if err := oprot.WriteListEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.DetailedReport = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } - -func (p *TShowResultSetMetaData) String() string { - if p == nil { - return "" +func (p *TReportExecStatusParams) ReadField24(iprot thrift.TProtocol) error { + _field := NewTQueryStatistics() + if err := _field.Read(iprot); err != nil { + return err } - return fmt.Sprintf("TShowResultSetMetaData(%+v)", *p) + p.QueryStatistics = _field + return nil } - -func (p *TShowResultSetMetaData) DeepEqual(ano *TShowResultSetMetaData) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Columns) { - return false +func (p *TReportExecStatusParams) ReadField25(iprot thrift.TProtocol) error { + _field := NewTReportWorkloadRuntimeStatusParams() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.ReportWorkloadRuntimeStatus = _field + return nil } +func (p *TReportExecStatusParams) ReadField26(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*datasinks.THivePartitionUpdate, 0, size) + values := make([]datasinks.THivePartitionUpdate, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TShowResultSetMetaData) Field1DeepEqual(src []*TColumnDefinition) bool { + if err := _elem.Read(iprot); err != nil { + return err + } - if len(p.Columns) != len(src) { - return false + _field = append(_field, _elem) } - for i, v := range p.Columns { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if err := iprot.ReadListEnd(); err != nil { + return err } - return true + p.HivePartitionUpdates = _field + return nil } - -type TShowResultSet struct { - MetaData *TShowResultSetMetaData `thrift:"metaData,1,required" frugal:"1,required,TShowResultSetMetaData" json:"metaData"` - ResultRows [][]string `thrift:"resultRows,2,required" frugal:"2,required,list>" json:"resultRows"` +func (p *TReportExecStatusParams) ReadField27(iprot thrift.TProtocol) error { + _field := NewTQueryProfile() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryProfile = _field + return nil } +func (p *TReportExecStatusParams) ReadField28(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*datasinks.TIcebergCommitData, 0, size) + values := make([]datasinks.TIcebergCommitData, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func NewTShowResultSet() *TShowResultSet { - return &TShowResultSet{} -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TShowResultSet) InitDefault() { - *p = TShowResultSet{} + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.IcebergCommitDatas = _field + return nil } +func (p *TReportExecStatusParams) ReadField29(iprot thrift.TProtocol) error { -var TShowResultSet_MetaData_DEFAULT *TShowResultSetMetaData - -func (p *TShowResultSet) GetMetaData() (v *TShowResultSetMetaData) { - if !p.IsSetMetaData() { - return TShowResultSet_MetaData_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return p.MetaData + p.TxnId = _field + return nil } +func (p *TReportExecStatusParams) ReadField30(iprot thrift.TProtocol) error { -func (p *TShowResultSet) GetResultRows() (v [][]string) { - return p.ResultRows -} -func (p *TShowResultSet) SetMetaData(val *TShowResultSetMetaData) { - p.MetaData = val -} -func (p *TShowResultSet) SetResultRows(val [][]string) { - p.ResultRows = val + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Label = _field + return nil } +func (p *TReportExecStatusParams) ReadField31(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TFragmentInstanceReport, 0, size) + values := make([]TFragmentInstanceReport, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -var fieldIDToName_TShowResultSet = map[int16]string{ - 1: "metaData", - 2: "resultRows", -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TShowResultSet) IsSetMetaData() bool { - return p.MetaData != nil + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FragmentInstanceReports = _field + return nil } -func (p *TShowResultSet) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType +func (p *TReportExecStatusParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - var issetMetaData bool = false - var issetResultRows bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError + if err = oprot.WriteStructBegin("TReportExecStatusParams"); err != nil { + goto WriteStructBeginError } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } - if fieldTypeId == thrift.STOP { - break + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetMetaData = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetResultRows = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - if !issetMetaData { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetResultRows { - fieldId = 2 - goto RequiredFieldNotSetError - } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSet[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSet[fieldId])) -} - -func (p *TShowResultSet) ReadField1(iprot thrift.TProtocol) error { - p.MetaData = NewTShowResultSetMetaData() - if err := p.MetaData.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TShowResultSet) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ResultRows = make([][]string, 0, size) - for i := 0; i < size; i++ { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError } - _elem := make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem1 string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem1 = v - } - - _elem = append(_elem, _elem1) + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError } - if err := iprot.ReadListEnd(); err != nil { - return err + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError } - - p.ResultRows = append(p.ResultRows, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TShowResultSet) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TShowResultSet"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 + if err = p.writeField9(oprot); err != nil { + fieldId = 9 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } + if err = p.writeField25(oprot); err != nil { + fieldId = 25 + goto WriteFieldError + } + if err = p.writeField26(oprot); err != nil { + fieldId = 26 + goto WriteFieldError + } + if err = p.writeField27(oprot); err != nil { + fieldId = 27 + goto WriteFieldError + } + if err = p.writeField28(oprot); err != nil { + fieldId = 28 + goto WriteFieldError + } + if err = p.writeField29(oprot); err != nil { + fieldId = 29 + goto WriteFieldError + } + if err = p.writeField30(oprot); err != nil { + fieldId = 30 + goto WriteFieldError + } + if err = p.writeField31(oprot); err != nil { + fieldId = 31 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18698,11 +17921,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TShowResultSet) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("metaData", thrift.STRUCT, 1); err != nil { +func (p *TReportExecStatusParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { goto WriteFieldBeginError } - if err := p.MetaData.Write(oprot); err != nil { + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18715,31 +17938,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TShowResultSet) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("resultRows", thrift.LIST, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.LIST, len(p.ResultRows)); err != nil { - return err - } - for _, v := range p.ResultRows { - if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { - return err - } - for _, v := range v { - if err := oprot.WriteString(v); err != nil { - return err - } +func (p *TReportExecStatusParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError } - if err := oprot.WriteListEnd(); err != nil { + if err := p.QueryId.Write(oprot); err != nil { return err } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -18748,383 +17957,562 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TShowResultSet) String() string { - if p == nil { - return "" +func (p *TReportExecStatusParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendNum() { + if err = oprot.WriteFieldBegin("backend_num", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BackendNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return fmt.Sprintf("TShowResultSet(%+v)", *p) + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TShowResultSet) DeepEqual(ano *TShowResultSet) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.MetaData) { - return false - } - if !p.Field2DeepEqual(ano.ResultRows) { - return false +func (p *TReportExecStatusParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentInstanceId() { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.FragmentInstanceId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TShowResultSet) Field1DeepEqual(src *TShowResultSetMetaData) bool { - - if !p.MetaData.DeepEqual(src) { - return false +func (p *TReportExecStatusParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TShowResultSet) Field2DeepEqual(src [][]string) bool { - if len(p.ResultRows) != len(src) { - return false - } - for i, v := range p.ResultRows { - _src := src[i] - if len(v) != len(_src) { - return false +func (p *TReportExecStatusParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetDone() { + if err = oprot.WriteFieldBegin("done", thrift.BOOL, 6); err != nil { + goto WriteFieldBeginError } - for i, v := range v { - _src1 := _src[i] - if strings.Compare(v, _src1) != 0 { - return false - } + if err := oprot.WriteBool(*p.Done); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -type TMasterOpResult_ struct { - MaxJournalId int64 `thrift:"maxJournalId,1,required" frugal:"1,required,i64" json:"maxJournalId"` - Packet []byte `thrift:"packet,2,required" frugal:"2,required,binary" json:"packet"` - ResultSet *TShowResultSet `thrift:"resultSet,3,optional" frugal:"3,optional,TShowResultSet" json:"resultSet,omitempty"` - QueryId *types.TUniqueId `thrift:"queryId,4,optional" frugal:"4,optional,types.TUniqueId" json:"queryId,omitempty"` - Status *string `thrift:"status,5,optional" frugal:"5,optional,string" json:"status,omitempty"` +func (p *TReportExecStatusParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetProfile() { + if err = oprot.WriteFieldBegin("profile", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.Profile.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func NewTMasterOpResult_() *TMasterOpResult_ { - return &TMasterOpResult_{} +func (p *TReportExecStatusParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetErrorLog() { + if err = oprot.WriteFieldBegin("error_log", thrift.LIST, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ErrorLog)); err != nil { + return err + } + for _, v := range p.ErrorLog { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TMasterOpResult_) InitDefault() { - *p = TMasterOpResult_{} +func (p *TReportExecStatusParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetDeltaUrls() { + if err = oprot.WriteFieldBegin("delta_urls", thrift.LIST, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.DeltaUrls)); err != nil { + return err + } + for _, v := range p.DeltaUrls { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TMasterOpResult_) GetMaxJournalId() (v int64) { - return p.MaxJournalId +func (p *TReportExecStatusParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadCounters() { + if err = oprot.WriteFieldBegin("load_counters", thrift.MAP, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.LoadCounters)); err != nil { + return err + } + for k, v := range p.LoadCounters { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TMasterOpResult_) GetPacket() (v []byte) { - return p.Packet +func (p *TReportExecStatusParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetTrackingUrl() { + if err = oprot.WriteFieldBegin("tracking_url", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TrackingUrl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } -var TMasterOpResult__ResultSet_DEFAULT *TShowResultSet - -func (p *TMasterOpResult_) GetResultSet() (v *TShowResultSet) { - if !p.IsSetResultSet() { - return TMasterOpResult__ResultSet_DEFAULT +func (p *TReportExecStatusParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetExportFiles() { + if err = oprot.WriteFieldBegin("export_files", thrift.LIST, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ExportFiles)); err != nil { + return err + } + for _, v := range p.ExportFiles { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.ResultSet + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -var TMasterOpResult__QueryId_DEFAULT *types.TUniqueId - -func (p *TMasterOpResult_) GetQueryId() (v *types.TUniqueId) { - if !p.IsSetQueryId() { - return TMasterOpResult__QueryId_DEFAULT +func (p *TReportExecStatusParams) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetCommitInfos() { + if err = oprot.WriteFieldBegin("commitInfos", thrift.LIST, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommitInfos)); err != nil { + return err + } + for _, v := range p.CommitInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.QueryId + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } -var TMasterOpResult__Status_DEFAULT string - -func (p *TMasterOpResult_) GetStatus() (v string) { - if !p.IsSetStatus() { - return TMasterOpResult__Status_DEFAULT +func (p *TReportExecStatusParams) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedRows() { + if err = oprot.WriteFieldBegin("loaded_rows", thrift.I64, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadedRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.Status -} -func (p *TMasterOpResult_) SetMaxJournalId(val int64) { - p.MaxJournalId = val -} -func (p *TMasterOpResult_) SetPacket(val []byte) { - p.Packet = val -} -func (p *TMasterOpResult_) SetResultSet(val *TShowResultSet) { - p.ResultSet = val -} -func (p *TMasterOpResult_) SetQueryId(val *types.TUniqueId) { - p.QueryId = val -} -func (p *TMasterOpResult_) SetStatus(val *string) { - p.Status = val -} - -var fieldIDToName_TMasterOpResult_ = map[int16]string{ - 1: "maxJournalId", - 2: "packet", - 3: "resultSet", - 4: "queryId", - 5: "status", -} - -func (p *TMasterOpResult_) IsSetResultSet() bool { - return p.ResultSet != nil -} - -func (p *TMasterOpResult_) IsSetQueryId() bool { - return p.QueryId != nil + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } -func (p *TMasterOpResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TReportExecStatusParams) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } -func (p *TMasterOpResult_) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetMaxJournalId bool = false - var issetPacket bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TReportExecStatusParams) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedBytes() { + if err = oprot.WriteFieldBegin("loaded_bytes", thrift.I64, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadedBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError +func (p *TReportExecStatusParams) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetErrorTabletInfos() { + if err = oprot.WriteFieldBegin("errorTabletInfos", thrift.LIST, 18); err != nil { + goto WriteFieldBeginError } - if fieldTypeId == thrift.STOP { - break + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ErrorTabletInfos)); err != nil { + return err } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetMaxJournalId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetPacket = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + for _, v := range p.ErrorTabletInfos { + if err := v.Write(oprot); err != nil { + return err } } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} - if !issetMaxJournalId { - fieldId = 1 - goto RequiredFieldNotSetError +func (p *TReportExecStatusParams) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentId() { + if err = oprot.WriteFieldBegin("fragment_id", thrift.I32, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FragmentId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} - if !issetPacket { - fieldId = 2 - goto RequiredFieldNotSetError +func (p *TReportExecStatusParams) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryType() { + if err = oprot.WriteFieldBegin("query_type", thrift.I32, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.QueryType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpResult_[fieldId])) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } -func (p *TMasterOpResult_) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MaxJournalId = v +func (p *TReportExecStatusParams) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadChannelProfile() { + if err = oprot.WriteFieldBegin("loadChannelProfile", thrift.STRUCT, 21); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadChannelProfile.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) } -func (p *TMasterOpResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return err - } else { - p.Packet = []byte(v) +func (p *TReportExecStatusParams) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetFinishedScanRanges() { + if err = oprot.WriteFieldBegin("finished_scan_ranges", thrift.I32, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FinishedScanRanges); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) } -func (p *TMasterOpResult_) ReadField3(iprot thrift.TProtocol) error { - p.ResultSet = NewTShowResultSet() - if err := p.ResultSet.Read(iprot); err != nil { - return err +func (p *TReportExecStatusParams) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetDetailedReport() { + if err = oprot.WriteFieldBegin("detailed_report", thrift.LIST, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DetailedReport)); err != nil { + return err + } + for _, v := range p.DetailedReport { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) } -func (p *TMasterOpResult_) ReadField4(iprot thrift.TProtocol) error { - p.QueryId = types.NewTUniqueId() - if err := p.QueryId.Read(iprot); err != nil { - return err +func (p *TReportExecStatusParams) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryStatistics() { + if err = oprot.WriteFieldBegin("query_statistics", thrift.STRUCT, 24); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryStatistics.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) } -func (p *TMasterOpResult_) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Status = &v +func (p *TReportExecStatusParams) writeField25(oprot thrift.TProtocol) (err error) { + if p.IsSetReportWorkloadRuntimeStatus() { + if err = oprot.WriteFieldBegin("report_workload_runtime_status", thrift.STRUCT, 25); err != nil { + goto WriteFieldBeginError + } + if err := p.ReportWorkloadRuntimeStatus.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) } -func (p *TMasterOpResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TMasterOpResult"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError +func (p *TReportExecStatusParams) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetHivePartitionUpdates() { + if err = oprot.WriteFieldBegin("hive_partition_updates", thrift.LIST, 26); err != nil { + goto WriteFieldBeginError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HivePartitionUpdates)); err != nil { + return err } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError + for _, v := range p.HivePartitionUpdates { + if err := v.Write(oprot); err != nil { + return err + } } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError + if err := oprot.WriteListEnd(); err != nil { + return err } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError } return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) } -func (p *TMasterOpResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("maxJournalId", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MaxJournalId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TReportExecStatusParams) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryProfile() { + if err = oprot.WriteFieldBegin("query_profile", thrift.STRUCT, 27); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryProfile.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) } -func (p *TMasterOpResult_) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("packet", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBinary([]byte(p.Packet)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TReportExecStatusParams) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetIcebergCommitDatas() { + if err = oprot.WriteFieldBegin("iceberg_commit_datas", thrift.LIST, 28); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.IcebergCommitDatas)); err != nil { + return err + } + for _, v := range p.IcebergCommitDatas { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) } -func (p *TMasterOpResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetResultSet() { - if err = oprot.WriteFieldBegin("resultSet", thrift.STRUCT, 3); err != nil { +func (p *TReportExecStatusParams) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 29); err != nil { goto WriteFieldBeginError } - if err := p.ResultSet.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.TxnId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19133,17 +18521,17 @@ func (p *TMasterOpResult_) writeField3(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) } -func (p *TMasterOpResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryId() { - if err = oprot.WriteFieldBegin("queryId", thrift.STRUCT, 4); err != nil { +func (p *TReportExecStatusParams) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 30); err != nil { goto WriteFieldBeginError } - if err := p.QueryId.Write(oprot); err != nil { + if err := oprot.WriteString(*p.Label); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19152,17 +18540,25 @@ func (p *TMasterOpResult_) writeField4(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) } -func (p *TMasterOpResult_) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRING, 5); err != nil { +func (p *TReportExecStatusParams) writeField31(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentInstanceReports() { + if err = oprot.WriteFieldBegin("fragment_instance_reports", thrift.LIST, 31); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Status); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FragmentInstanceReports)); err != nil { + return err + } + for _, v := range p.FragmentInstanceReports { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19171,149 +18567,526 @@ func (p *TMasterOpResult_) writeField5(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) } -func (p *TMasterOpResult_) String() string { +func (p *TReportExecStatusParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TMasterOpResult_(%+v)", *p) + return fmt.Sprintf("TReportExecStatusParams(%+v)", *p) + } -func (p *TMasterOpResult_) DeepEqual(ano *TMasterOpResult_) bool { +func (p *TReportExecStatusParams) DeepEqual(ano *TReportExecStatusParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.MaxJournalId) { + if !p.Field1DeepEqual(ano.ProtocolVersion) { return false } - if !p.Field2DeepEqual(ano.Packet) { + if !p.Field2DeepEqual(ano.QueryId) { return false } - if !p.Field3DeepEqual(ano.ResultSet) { + if !p.Field3DeepEqual(ano.BackendNum) { return false } - if !p.Field4DeepEqual(ano.QueryId) { + if !p.Field4DeepEqual(ano.FragmentInstanceId) { return false } if !p.Field5DeepEqual(ano.Status) { return false } - return true -} - -func (p *TMasterOpResult_) Field1DeepEqual(src int64) bool { - - if p.MaxJournalId != src { + if !p.Field6DeepEqual(ano.Done) { return false } - return true -} -func (p *TMasterOpResult_) Field2DeepEqual(src []byte) bool { - - if bytes.Compare(p.Packet, src) != 0 { + if !p.Field7DeepEqual(ano.Profile) { return false } - return true -} -func (p *TMasterOpResult_) Field3DeepEqual(src *TShowResultSet) bool { - - if !p.ResultSet.DeepEqual(src) { + if !p.Field9DeepEqual(ano.ErrorLog) { return false } - return true -} -func (p *TMasterOpResult_) Field4DeepEqual(src *types.TUniqueId) bool { - + if !p.Field10DeepEqual(ano.DeltaUrls) { + return false + } + if !p.Field11DeepEqual(ano.LoadCounters) { + return false + } + if !p.Field12DeepEqual(ano.TrackingUrl) { + return false + } + if !p.Field13DeepEqual(ano.ExportFiles) { + return false + } + if !p.Field14DeepEqual(ano.CommitInfos) { + return false + } + if !p.Field15DeepEqual(ano.LoadedRows) { + return false + } + if !p.Field16DeepEqual(ano.BackendId) { + return false + } + if !p.Field17DeepEqual(ano.LoadedBytes) { + return false + } + if !p.Field18DeepEqual(ano.ErrorTabletInfos) { + return false + } + if !p.Field19DeepEqual(ano.FragmentId) { + return false + } + if !p.Field20DeepEqual(ano.QueryType) { + return false + } + if !p.Field21DeepEqual(ano.LoadChannelProfile) { + return false + } + if !p.Field22DeepEqual(ano.FinishedScanRanges) { + return false + } + if !p.Field23DeepEqual(ano.DetailedReport) { + return false + } + if !p.Field24DeepEqual(ano.QueryStatistics) { + return false + } + if !p.Field25DeepEqual(ano.ReportWorkloadRuntimeStatus) { + return false + } + if !p.Field26DeepEqual(ano.HivePartitionUpdates) { + return false + } + if !p.Field27DeepEqual(ano.QueryProfile) { + return false + } + if !p.Field28DeepEqual(ano.IcebergCommitDatas) { + return false + } + if !p.Field29DeepEqual(ano.TxnId) { + return false + } + if !p.Field30DeepEqual(ano.Label) { + return false + } + if !p.Field31DeepEqual(ano.FragmentInstanceReports) { + return false + } + return true +} + +func (p *TReportExecStatusParams) Field1DeepEqual(src FrontendServiceVersion) bool { + + if p.ProtocolVersion != src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field2DeepEqual(src *types.TUniqueId) bool { + if !p.QueryId.DeepEqual(src) { return false } return true } -func (p *TMasterOpResult_) Field5DeepEqual(src *string) bool { +func (p *TReportExecStatusParams) Field3DeepEqual(src *int32) bool { - if p.Status == src { + if p.BackendNum == src { return true - } else if p.Status == nil || src == nil { + } else if p.BackendNum == nil || src == nil { return false } - if strings.Compare(*p.Status, *src) != 0 { + if *p.BackendNum != *src { return false } return true } +func (p *TReportExecStatusParams) Field4DeepEqual(src *types.TUniqueId) bool { -type TUpdateExportTaskStatusRequest struct { - ProtocolVersion FrontendServiceVersion `thrift:"protocolVersion,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocolVersion"` - TaskId *types.TUniqueId `thrift:"taskId,2,required" frugal:"2,required,types.TUniqueId" json:"taskId"` - TaskStatus *palointernalservice.TExportStatusResult_ `thrift:"taskStatus,3,required" frugal:"3,required,palointernalservice.TExportStatusResult_" json:"taskStatus"` + if !p.FragmentInstanceId.DeepEqual(src) { + return false + } + return true } +func (p *TReportExecStatusParams) Field5DeepEqual(src *status.TStatus) bool { -func NewTUpdateExportTaskStatusRequest() *TUpdateExportTaskStatusRequest { - return &TUpdateExportTaskStatusRequest{} + if !p.Status.DeepEqual(src) { + return false + } + return true } +func (p *TReportExecStatusParams) Field6DeepEqual(src *bool) bool { -func (p *TUpdateExportTaskStatusRequest) InitDefault() { - *p = TUpdateExportTaskStatusRequest{} + if p.Done == src { + return true + } else if p.Done == nil || src == nil { + return false + } + if *p.Done != *src { + return false + } + return true } +func (p *TReportExecStatusParams) Field7DeepEqual(src *runtimeprofile.TRuntimeProfileTree) bool { -func (p *TUpdateExportTaskStatusRequest) GetProtocolVersion() (v FrontendServiceVersion) { + if !p.Profile.DeepEqual(src) { + return false + } + return true +} +func (p *TReportExecStatusParams) Field9DeepEqual(src []string) bool { + + if len(p.ErrorLog) != len(src) { + return false + } + for i, v := range p.ErrorLog { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field10DeepEqual(src []string) bool { + + if len(p.DeltaUrls) != len(src) { + return false + } + for i, v := range p.DeltaUrls { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field11DeepEqual(src map[string]string) bool { + + if len(p.LoadCounters) != len(src) { + return false + } + for k, v := range p.LoadCounters { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field12DeepEqual(src *string) bool { + + if p.TrackingUrl == src { + return true + } else if p.TrackingUrl == nil || src == nil { + return false + } + if strings.Compare(*p.TrackingUrl, *src) != 0 { + return false + } + return true +} +func (p *TReportExecStatusParams) Field13DeepEqual(src []string) bool { + + if len(p.ExportFiles) != len(src) { + return false + } + for i, v := range p.ExportFiles { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field14DeepEqual(src []*types.TTabletCommitInfo) bool { + + if len(p.CommitInfos) != len(src) { + return false + } + for i, v := range p.CommitInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field15DeepEqual(src *int64) bool { + + if p.LoadedRows == src { + return true + } else if p.LoadedRows == nil || src == nil { + return false + } + if *p.LoadedRows != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field16DeepEqual(src *int64) bool { + + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field17DeepEqual(src *int64) bool { + + if p.LoadedBytes == src { + return true + } else if p.LoadedBytes == nil || src == nil { + return false + } + if *p.LoadedBytes != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field18DeepEqual(src []*types.TErrorTabletInfo) bool { + + if len(p.ErrorTabletInfos) != len(src) { + return false + } + for i, v := range p.ErrorTabletInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field19DeepEqual(src *int32) bool { + + if p.FragmentId == src { + return true + } else if p.FragmentId == nil || src == nil { + return false + } + if *p.FragmentId != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field20DeepEqual(src *palointernalservice.TQueryType) bool { + + if p.QueryType == src { + return true + } else if p.QueryType == nil || src == nil { + return false + } + if *p.QueryType != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field21DeepEqual(src *runtimeprofile.TRuntimeProfileTree) bool { + + if !p.LoadChannelProfile.DeepEqual(src) { + return false + } + return true +} +func (p *TReportExecStatusParams) Field22DeepEqual(src *int32) bool { + + if p.FinishedScanRanges == src { + return true + } else if p.FinishedScanRanges == nil || src == nil { + return false + } + if *p.FinishedScanRanges != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field23DeepEqual(src []*TDetailedReportParams) bool { + + if len(p.DetailedReport) != len(src) { + return false + } + for i, v := range p.DetailedReport { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field24DeepEqual(src *TQueryStatistics) bool { + + if !p.QueryStatistics.DeepEqual(src) { + return false + } + return true +} +func (p *TReportExecStatusParams) Field25DeepEqual(src *TReportWorkloadRuntimeStatusParams) bool { + + if !p.ReportWorkloadRuntimeStatus.DeepEqual(src) { + return false + } + return true +} +func (p *TReportExecStatusParams) Field26DeepEqual(src []*datasinks.THivePartitionUpdate) bool { + + if len(p.HivePartitionUpdates) != len(src) { + return false + } + for i, v := range p.HivePartitionUpdates { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field27DeepEqual(src *TQueryProfile) bool { + + if !p.QueryProfile.DeepEqual(src) { + return false + } + return true +} +func (p *TReportExecStatusParams) Field28DeepEqual(src []*datasinks.TIcebergCommitData) bool { + + if len(p.IcebergCommitDatas) != len(src) { + return false + } + for i, v := range p.IcebergCommitDatas { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TReportExecStatusParams) Field29DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TReportExecStatusParams) Field30DeepEqual(src *string) bool { + + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true +} +func (p *TReportExecStatusParams) Field31DeepEqual(src []*TFragmentInstanceReport) bool { + + if len(p.FragmentInstanceReports) != len(src) { + return false + } + for i, v := range p.FragmentInstanceReports { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TFeResult_ struct { + ProtocolVersion FrontendServiceVersion `thrift:"protocolVersion,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocolVersion"` + Status *status.TStatus `thrift:"status,2,required" frugal:"2,required,status.TStatus" json:"status"` + CloudCluster *string `thrift:"cloud_cluster,1000,optional" frugal:"1000,optional,string" json:"cloud_cluster,omitempty"` + NoAuth *bool `thrift:"noAuth,1001,optional" frugal:"1001,optional,bool" json:"noAuth,omitempty"` +} + +func NewTFeResult_() *TFeResult_ { + return &TFeResult_{} +} + +func (p *TFeResult_) InitDefault() { +} + +func (p *TFeResult_) GetProtocolVersion() (v FrontendServiceVersion) { return p.ProtocolVersion } -var TUpdateExportTaskStatusRequest_TaskId_DEFAULT *types.TUniqueId +var TFeResult__Status_DEFAULT *status.TStatus -func (p *TUpdateExportTaskStatusRequest) GetTaskId() (v *types.TUniqueId) { - if !p.IsSetTaskId() { - return TUpdateExportTaskStatusRequest_TaskId_DEFAULT +func (p *TFeResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TFeResult__Status_DEFAULT } - return p.TaskId + return p.Status } -var TUpdateExportTaskStatusRequest_TaskStatus_DEFAULT *palointernalservice.TExportStatusResult_ +var TFeResult__CloudCluster_DEFAULT string -func (p *TUpdateExportTaskStatusRequest) GetTaskStatus() (v *palointernalservice.TExportStatusResult_) { - if !p.IsSetTaskStatus() { - return TUpdateExportTaskStatusRequest_TaskStatus_DEFAULT +func (p *TFeResult_) GetCloudCluster() (v string) { + if !p.IsSetCloudCluster() { + return TFeResult__CloudCluster_DEFAULT } - return p.TaskStatus + return *p.CloudCluster } -func (p *TUpdateExportTaskStatusRequest) SetProtocolVersion(val FrontendServiceVersion) { + +var TFeResult__NoAuth_DEFAULT bool + +func (p *TFeResult_) GetNoAuth() (v bool) { + if !p.IsSetNoAuth() { + return TFeResult__NoAuth_DEFAULT + } + return *p.NoAuth +} +func (p *TFeResult_) SetProtocolVersion(val FrontendServiceVersion) { p.ProtocolVersion = val } -func (p *TUpdateExportTaskStatusRequest) SetTaskId(val *types.TUniqueId) { - p.TaskId = val +func (p *TFeResult_) SetStatus(val *status.TStatus) { + p.Status = val } -func (p *TUpdateExportTaskStatusRequest) SetTaskStatus(val *palointernalservice.TExportStatusResult_) { - p.TaskStatus = val +func (p *TFeResult_) SetCloudCluster(val *string) { + p.CloudCluster = val +} +func (p *TFeResult_) SetNoAuth(val *bool) { + p.NoAuth = val } -var fieldIDToName_TUpdateExportTaskStatusRequest = map[int16]string{ - 1: "protocolVersion", - 2: "taskId", - 3: "taskStatus", +var fieldIDToName_TFeResult_ = map[int16]string{ + 1: "protocolVersion", + 2: "status", + 1000: "cloud_cluster", + 1001: "noAuth", } -func (p *TUpdateExportTaskStatusRequest) IsSetTaskId() bool { - return p.TaskId != nil +func (p *TFeResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TUpdateExportTaskStatusRequest) IsSetTaskStatus() bool { - return p.TaskStatus != nil +func (p *TFeResult_) IsSetCloudCluster() bool { + return p.CloudCluster != nil } -func (p *TUpdateExportTaskStatusRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TFeResult_) IsSetNoAuth() bool { + return p.NoAuth != nil +} + +func (p *TFeResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 var issetProtocolVersion bool = false - var issetTaskId bool = false - var issetTaskStatus bool = false + var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -19335,39 +19108,39 @@ func (p *TUpdateExportTaskStatusRequest) Read(iprot thrift.TProtocol) (err error goto ReadFieldError } issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetTaskId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 3: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField3(iprot); err != nil { + case 1000: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1000(iprot); err != nil { goto ReadFieldError } - issetTaskStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1001: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1001(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19381,22 +19154,17 @@ func (p *TUpdateExportTaskStatusRequest) Read(iprot thrift.TProtocol) (err error goto RequiredFieldNotSetError } - if !issetTaskId { + if !issetStatus { fieldId = 2 goto RequiredFieldNotSetError } - - if !issetTaskStatus { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateExportTaskStatusRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFeResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19405,37 +19173,54 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TUpdateExportTaskStatusRequest[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFeResult_[fieldId])) } -func (p *TUpdateExportTaskStatusRequest) ReadField1(iprot thrift.TProtocol) error { +func (p *TFeResult_) ReadField1(iprot thrift.TProtocol) error { + + var _field FrontendServiceVersion if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ProtocolVersion = FrontendServiceVersion(v) + _field = FrontendServiceVersion(v) + } + p.ProtocolVersion = _field + return nil +} +func (p *TFeResult_) ReadField2(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err } + p.Status = _field return nil } +func (p *TFeResult_) ReadField1000(iprot thrift.TProtocol) error { -func (p *TUpdateExportTaskStatusRequest) ReadField2(iprot thrift.TProtocol) error { - p.TaskId = types.NewTUniqueId() - if err := p.TaskId.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.CloudCluster = _field return nil } +func (p *TFeResult_) ReadField1001(iprot thrift.TProtocol) error { -func (p *TUpdateExportTaskStatusRequest) ReadField3(iprot thrift.TProtocol) error { - p.TaskStatus = palointernalservice.NewTExportStatusResult_() - if err := p.TaskStatus.Read(iprot); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } + p.NoAuth = _field return nil } -func (p *TUpdateExportTaskStatusRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TFeResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TUpdateExportTaskStatusRequest"); err != nil { + if err = oprot.WriteStructBegin("TFeResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19447,11 +19232,14 @@ func (p *TUpdateExportTaskStatusRequest) Write(oprot thrift.TProtocol) (err erro fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -19470,7 +19258,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TUpdateExportTaskStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TFeResult_) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("protocolVersion", thrift.I32, 1); err != nil { goto WriteFieldBeginError } @@ -19487,11 +19275,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TUpdateExportTaskStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("taskId", thrift.STRUCT, 2); err != nil { +func (p *TFeResult_) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := p.TaskId.Write(oprot); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19504,31 +19292,53 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TUpdateExportTaskStatusRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("taskStatus", thrift.STRUCT, 3); err != nil { - goto WriteFieldBeginError - } - if err := p.TaskStatus.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TFeResult_) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetCloudCluster() { + if err = oprot.WriteFieldBegin("cloud_cluster", thrift.STRING, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CloudCluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) } -func (p *TUpdateExportTaskStatusRequest) String() string { - if p == nil { - return "" +func (p *TFeResult_) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetNoAuth() { + if err = oprot.WriteFieldBegin("noAuth", thrift.BOOL, 1001); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.NoAuth); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return fmt.Sprintf("TUpdateExportTaskStatusRequest(%+v)", *p) + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) } -func (p *TUpdateExportTaskStatusRequest) DeepEqual(ano *TUpdateExportTaskStatusRequest) bool { +func (p *TFeResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFeResult_(%+v)", *p) + +} + +func (p *TFeResult_) DeepEqual(ano *TFeResult_) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19537,231 +19347,146 @@ func (p *TUpdateExportTaskStatusRequest) DeepEqual(ano *TUpdateExportTaskStatusR if !p.Field1DeepEqual(ano.ProtocolVersion) { return false } - if !p.Field2DeepEqual(ano.TaskId) { + if !p.Field2DeepEqual(ano.Status) { return false } - if !p.Field3DeepEqual(ano.TaskStatus) { + if !p.Field1000DeepEqual(ano.CloudCluster) { + return false + } + if !p.Field1001DeepEqual(ano.NoAuth) { return false } return true } -func (p *TUpdateExportTaskStatusRequest) Field1DeepEqual(src FrontendServiceVersion) bool { +func (p *TFeResult_) Field1DeepEqual(src FrontendServiceVersion) bool { if p.ProtocolVersion != src { return false } return true } -func (p *TUpdateExportTaskStatusRequest) Field2DeepEqual(src *types.TUniqueId) bool { +func (p *TFeResult_) Field2DeepEqual(src *status.TStatus) bool { - if !p.TaskId.DeepEqual(src) { + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TUpdateExportTaskStatusRequest) Field3DeepEqual(src *palointernalservice.TExportStatusResult_) bool { +func (p *TFeResult_) Field1000DeepEqual(src *string) bool { - if !p.TaskStatus.DeepEqual(src) { + if p.CloudCluster == src { + return true + } else if p.CloudCluster == nil || src == nil { + return false + } + if strings.Compare(*p.CloudCluster, *src) != 0 { return false } return true } +func (p *TFeResult_) Field1001DeepEqual(src *bool) bool { -type TLoadTxnBeginRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` - Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` - Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` - Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` - UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` - Label string `thrift:"label,7,required" frugal:"7,required,string" json:"label"` - Timestamp *int64 `thrift:"timestamp,8,optional" frugal:"8,optional,i64" json:"timestamp,omitempty"` - AuthCode *int64 `thrift:"auth_code,9,optional" frugal:"9,optional,i64" json:"auth_code,omitempty"` - Timeout *int64 `thrift:"timeout,10,optional" frugal:"10,optional,i64" json:"timeout,omitempty"` - RequestId *types.TUniqueId `thrift:"request_id,11,optional" frugal:"11,optional,types.TUniqueId" json:"request_id,omitempty"` - Token *string `thrift:"token,12,optional" frugal:"12,optional,string" json:"token,omitempty"` -} - -func NewTLoadTxnBeginRequest() *TLoadTxnBeginRequest { - return &TLoadTxnBeginRequest{} -} - -func (p *TLoadTxnBeginRequest) InitDefault() { - *p = TLoadTxnBeginRequest{} -} - -var TLoadTxnBeginRequest_Cluster_DEFAULT string - -func (p *TLoadTxnBeginRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TLoadTxnBeginRequest_Cluster_DEFAULT + if p.NoAuth == src { + return true + } else if p.NoAuth == nil || src == nil { + return false } - return *p.Cluster -} - -func (p *TLoadTxnBeginRequest) GetUser() (v string) { - return p.User -} - -func (p *TLoadTxnBeginRequest) GetPasswd() (v string) { - return p.Passwd -} - -func (p *TLoadTxnBeginRequest) GetDb() (v string) { - return p.Db -} - -func (p *TLoadTxnBeginRequest) GetTbl() (v string) { - return p.Tbl -} - -var TLoadTxnBeginRequest_UserIp_DEFAULT string - -func (p *TLoadTxnBeginRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TLoadTxnBeginRequest_UserIp_DEFAULT + if *p.NoAuth != *src { + return false } - return *p.UserIp + return true } -func (p *TLoadTxnBeginRequest) GetLabel() (v string) { - return p.Label +type TSubTxnInfo struct { + SubTxnId *int64 `thrift:"sub_txn_id,1,optional" frugal:"1,optional,i64" json:"sub_txn_id,omitempty"` + TableId *int64 `thrift:"table_id,2,optional" frugal:"2,optional,i64" json:"table_id,omitempty"` + TabletCommitInfos []*types.TTabletCommitInfo `thrift:"tablet_commit_infos,3,optional" frugal:"3,optional,list" json:"tablet_commit_infos,omitempty"` + SubTxnType *TSubTxnType `thrift:"sub_txn_type,4,optional" frugal:"4,optional,TSubTxnType" json:"sub_txn_type,omitempty"` } -var TLoadTxnBeginRequest_Timestamp_DEFAULT int64 +func NewTSubTxnInfo() *TSubTxnInfo { + return &TSubTxnInfo{} +} -func (p *TLoadTxnBeginRequest) GetTimestamp() (v int64) { - if !p.IsSetTimestamp() { - return TLoadTxnBeginRequest_Timestamp_DEFAULT - } - return *p.Timestamp +func (p *TSubTxnInfo) InitDefault() { } -var TLoadTxnBeginRequest_AuthCode_DEFAULT int64 +var TSubTxnInfo_SubTxnId_DEFAULT int64 -func (p *TLoadTxnBeginRequest) GetAuthCode() (v int64) { - if !p.IsSetAuthCode() { - return TLoadTxnBeginRequest_AuthCode_DEFAULT +func (p *TSubTxnInfo) GetSubTxnId() (v int64) { + if !p.IsSetSubTxnId() { + return TSubTxnInfo_SubTxnId_DEFAULT } - return *p.AuthCode + return *p.SubTxnId } -var TLoadTxnBeginRequest_Timeout_DEFAULT int64 +var TSubTxnInfo_TableId_DEFAULT int64 -func (p *TLoadTxnBeginRequest) GetTimeout() (v int64) { - if !p.IsSetTimeout() { - return TLoadTxnBeginRequest_Timeout_DEFAULT +func (p *TSubTxnInfo) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TSubTxnInfo_TableId_DEFAULT } - return *p.Timeout + return *p.TableId } -var TLoadTxnBeginRequest_RequestId_DEFAULT *types.TUniqueId +var TSubTxnInfo_TabletCommitInfos_DEFAULT []*types.TTabletCommitInfo -func (p *TLoadTxnBeginRequest) GetRequestId() (v *types.TUniqueId) { - if !p.IsSetRequestId() { - return TLoadTxnBeginRequest_RequestId_DEFAULT +func (p *TSubTxnInfo) GetTabletCommitInfos() (v []*types.TTabletCommitInfo) { + if !p.IsSetTabletCommitInfos() { + return TSubTxnInfo_TabletCommitInfos_DEFAULT } - return p.RequestId + return p.TabletCommitInfos } -var TLoadTxnBeginRequest_Token_DEFAULT string +var TSubTxnInfo_SubTxnType_DEFAULT TSubTxnType -func (p *TLoadTxnBeginRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TLoadTxnBeginRequest_Token_DEFAULT +func (p *TSubTxnInfo) GetSubTxnType() (v TSubTxnType) { + if !p.IsSetSubTxnType() { + return TSubTxnInfo_SubTxnType_DEFAULT } - return *p.Token -} -func (p *TLoadTxnBeginRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TLoadTxnBeginRequest) SetUser(val string) { - p.User = val -} -func (p *TLoadTxnBeginRequest) SetPasswd(val string) { - p.Passwd = val -} -func (p *TLoadTxnBeginRequest) SetDb(val string) { - p.Db = val -} -func (p *TLoadTxnBeginRequest) SetTbl(val string) { - p.Tbl = val -} -func (p *TLoadTxnBeginRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TLoadTxnBeginRequest) SetLabel(val string) { - p.Label = val -} -func (p *TLoadTxnBeginRequest) SetTimestamp(val *int64) { - p.Timestamp = val -} -func (p *TLoadTxnBeginRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TLoadTxnBeginRequest) SetTimeout(val *int64) { - p.Timeout = val + return *p.SubTxnType } -func (p *TLoadTxnBeginRequest) SetRequestId(val *types.TUniqueId) { - p.RequestId = val -} -func (p *TLoadTxnBeginRequest) SetToken(val *string) { - p.Token = val +func (p *TSubTxnInfo) SetSubTxnId(val *int64) { + p.SubTxnId = val } - -var fieldIDToName_TLoadTxnBeginRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "tbl", - 6: "user_ip", - 7: "label", - 8: "timestamp", - 9: "auth_code", - 10: "timeout", - 11: "request_id", - 12: "token", +func (p *TSubTxnInfo) SetTableId(val *int64) { + p.TableId = val } - -func (p *TLoadTxnBeginRequest) IsSetCluster() bool { - return p.Cluster != nil +func (p *TSubTxnInfo) SetTabletCommitInfos(val []*types.TTabletCommitInfo) { + p.TabletCommitInfos = val } - -func (p *TLoadTxnBeginRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TSubTxnInfo) SetSubTxnType(val *TSubTxnType) { + p.SubTxnType = val } -func (p *TLoadTxnBeginRequest) IsSetTimestamp() bool { - return p.Timestamp != nil +var fieldIDToName_TSubTxnInfo = map[int16]string{ + 1: "sub_txn_id", + 2: "table_id", + 3: "tablet_commit_infos", + 4: "sub_txn_type", } -func (p *TLoadTxnBeginRequest) IsSetAuthCode() bool { - return p.AuthCode != nil +func (p *TSubTxnInfo) IsSetSubTxnId() bool { + return p.SubTxnId != nil } -func (p *TLoadTxnBeginRequest) IsSetTimeout() bool { - return p.Timeout != nil +func (p *TSubTxnInfo) IsSetTableId() bool { + return p.TableId != nil } -func (p *TLoadTxnBeginRequest) IsSetRequestId() bool { - return p.RequestId != nil +func (p *TSubTxnInfo) IsSetTabletCommitInfos() bool { + return p.TabletCommitInfos != nil } -func (p *TLoadTxnBeginRequest) IsSetToken() bool { - return p.Token != nil +func (p *TSubTxnInfo) IsSetSubTxnType() bool { + return p.SubTxnType != nil } -func (p *TLoadTxnBeginRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TSubTxnInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetLabel bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -19778,136 +19503,42 @@ func (p *TLoadTxnBeginRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - issetTbl = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - issetLabel = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19916,37 +19547,13 @@ func (p *TLoadTxnBeginRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetLabel { - fieldId = 7 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSubTxnInfo[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19954,120 +19561,69 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginRequest[fieldId])) -} - -func (p *TLoadTxnBeginRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} - -func (p *TLoadTxnBeginRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = v - } - return nil -} - -func (p *TLoadTxnBeginRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = v - } - return nil -} - -func (p *TLoadTxnBeginRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = v - } - return nil -} - -func (p *TLoadTxnBeginRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Tbl = v - } - return nil -} - -func (p *TLoadTxnBeginRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v - } - return nil } -func (p *TLoadTxnBeginRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Label = v - } - return nil -} +func (p *TSubTxnInfo) ReadField1(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginRequest) ReadField8(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Timestamp = &v + _field = &v } + p.SubTxnId = _field return nil } +func (p *TSubTxnInfo) ReadField2(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginRequest) ReadField9(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AuthCode = &v + _field = &v } + p.TableId = _field return nil } - -func (p *TLoadTxnBeginRequest) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TSubTxnInfo) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.Timeout = &v } - return nil -} + _field := make([]*types.TTabletCommitInfo, 0, size) + values := make([]types.TTabletCommitInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TLoadTxnBeginRequest) ReadField11(iprot thrift.TProtocol) error { - p.RequestId = types.NewTUniqueId() - if err := p.RequestId.Read(iprot); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletCommitInfos = _field return nil } +func (p *TSubTxnInfo) ReadField4(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *TSubTxnType + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Token = &v + tmp := TSubTxnType(v) + _field = &tmp } + p.SubTxnType = _field return nil } -func (p *TLoadTxnBeginRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TSubTxnInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxnBeginRequest"); err != nil { + if err = oprot.WriteStructBegin("TSubTxnInfo"); err != nil { goto WriteStructBeginError } if p != nil { @@ -20087,39 +19643,6 @@ func (p *TLoadTxnBeginRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -20138,12 +19661,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxnBeginRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *TSubTxnInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnId() { + if err = oprot.WriteFieldBegin("sub_txn_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := oprot.WriteI64(*p.SubTxnId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -20157,15 +19680,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxnBeginRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TSubTxnInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -20174,15 +19699,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TLoadTxnBeginRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TSubTxnInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletCommitInfos() { + if err = oprot.WriteFieldBegin("tablet_commit_infos", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TabletCommitInfos)); err != nil { + return err + } + for _, v := range p.TabletCommitInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -20191,46 +19726,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TLoadTxnBeginRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Tbl); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { +func (p *TSubTxnInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnType() { + if err = oprot.WriteFieldBegin("sub_txn_type", thrift.I32, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.UserIp); err != nil { + if err := oprot.WriteI32(int32(*p.SubTxnType)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -20239,381 +19740,215 @@ func (p *TLoadTxnBeginRequest) writeField6(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TLoadTxnBeginRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("label", thrift.STRING, 7); err != nil { - goto WriteFieldBeginError +func (p *TSubTxnInfo) String() string { + if p == nil { + return "" } - if err := oprot.WriteString(p.Label); err != nil { - return err + return fmt.Sprintf("TSubTxnInfo(%+v)", *p) + +} + +func (p *TSubTxnInfo) DeepEqual(ano *TSubTxnInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err = oprot.WriteFieldBegin("timestamp", thrift.I64, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Timestamp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.AuthCode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeout() { - if err = oprot.WriteFieldBegin("timeout", thrift.I64, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Timeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetRequestId() { - if err = oprot.WriteFieldBegin("request_id", thrift.STRUCT, 11); err != nil { - goto WriteFieldBeginError - } - if err := p.RequestId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) -} - -func (p *TLoadTxnBeginRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TLoadTxnBeginRequest(%+v)", *p) -} - -func (p *TLoadTxnBeginRequest) DeepEqual(ano *TLoadTxnBeginRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.Tbl) { - return false - } - if !p.Field6DeepEqual(ano.UserIp) { - return false - } - if !p.Field7DeepEqual(ano.Label) { - return false - } - if !p.Field8DeepEqual(ano.Timestamp) { - return false - } - if !p.Field9DeepEqual(ano.AuthCode) { + if !p.Field1DeepEqual(ano.SubTxnId) { return false } - if !p.Field10DeepEqual(ano.Timeout) { + if !p.Field2DeepEqual(ano.TableId) { return false } - if !p.Field11DeepEqual(ano.RequestId) { + if !p.Field3DeepEqual(ano.TabletCommitInfos) { return false } - if !p.Field12DeepEqual(ano.Token) { + if !p.Field4DeepEqual(ano.SubTxnType) { return false } return true } -func (p *TLoadTxnBeginRequest) Field1DeepEqual(src *string) bool { +func (p *TSubTxnInfo) Field1DeepEqual(src *int64) bool { - if p.Cluster == src { + if p.SubTxnId == src { return true - } else if p.Cluster == nil || src == nil { + } else if p.SubTxnId == nil || src == nil { return false } - if strings.Compare(*p.Cluster, *src) != 0 { + if *p.SubTxnId != *src { return false } return true } -func (p *TLoadTxnBeginRequest) Field2DeepEqual(src string) bool { +func (p *TSubTxnInfo) Field2DeepEqual(src *int64) bool { - if strings.Compare(p.User, src) != 0 { + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { return false } - return true -} -func (p *TLoadTxnBeginRequest) Field3DeepEqual(src string) bool { - - if strings.Compare(p.Passwd, src) != 0 { + if *p.TableId != *src { return false } return true } -func (p *TLoadTxnBeginRequest) Field4DeepEqual(src string) bool { +func (p *TSubTxnInfo) Field3DeepEqual(src []*types.TTabletCommitInfo) bool { - if strings.Compare(p.Db, src) != 0 { + if len(p.TabletCommitInfos) != len(src) { return false } - return true -} -func (p *TLoadTxnBeginRequest) Field5DeepEqual(src string) bool { - - if strings.Compare(p.Tbl, src) != 0 { - return false + for i, v := range p.TabletCommitInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TLoadTxnBeginRequest) Field6DeepEqual(src *string) bool { +func (p *TSubTxnInfo) Field4DeepEqual(src *TSubTxnType) bool { - if p.UserIp == src { + if p.SubTxnType == src { return true - } else if p.UserIp == nil || src == nil { - return false - } - if strings.Compare(*p.UserIp, *src) != 0 { + } else if p.SubTxnType == nil || src == nil { return false } - return true -} -func (p *TLoadTxnBeginRequest) Field7DeepEqual(src string) bool { - - if strings.Compare(p.Label, src) != 0 { + if *p.SubTxnType != *src { return false } return true } -func (p *TLoadTxnBeginRequest) Field8DeepEqual(src *int64) bool { - if p.Timestamp == src { - return true - } else if p.Timestamp == nil || src == nil { - return false - } - if *p.Timestamp != *src { - return false - } - return true +type TTxnLoadInfo struct { + Label *string `thrift:"label,1,optional" frugal:"1,optional,string" json:"label,omitempty"` + DbId *int64 `thrift:"dbId,2,optional" frugal:"2,optional,i64" json:"dbId,omitempty"` + TxnId *int64 `thrift:"txnId,3,optional" frugal:"3,optional,i64" json:"txnId,omitempty"` + TimeoutTimestamp *int64 `thrift:"timeoutTimestamp,4,optional" frugal:"4,optional,i64" json:"timeoutTimestamp,omitempty"` + AllSubTxnNum *int64 `thrift:"allSubTxnNum,5,optional" frugal:"5,optional,i64" json:"allSubTxnNum,omitempty"` + SubTxnInfos []*TSubTxnInfo `thrift:"subTxnInfos,6,optional" frugal:"6,optional,list" json:"subTxnInfos,omitempty"` } -func (p *TLoadTxnBeginRequest) Field9DeepEqual(src *int64) bool { - if p.AuthCode == src { - return true - } else if p.AuthCode == nil || src == nil { - return false - } - if *p.AuthCode != *src { - return false - } - return true +func NewTTxnLoadInfo() *TTxnLoadInfo { + return &TTxnLoadInfo{} } -func (p *TLoadTxnBeginRequest) Field10DeepEqual(src *int64) bool { - if p.Timeout == src { - return true - } else if p.Timeout == nil || src == nil { - return false - } - if *p.Timeout != *src { - return false - } - return true +func (p *TTxnLoadInfo) InitDefault() { } -func (p *TLoadTxnBeginRequest) Field11DeepEqual(src *types.TUniqueId) bool { - if !p.RequestId.DeepEqual(src) { - return false - } - return true -} -func (p *TLoadTxnBeginRequest) Field12DeepEqual(src *string) bool { +var TTxnLoadInfo_Label_DEFAULT string - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false +func (p *TTxnLoadInfo) GetLabel() (v string) { + if !p.IsSetLabel() { + return TTxnLoadInfo_Label_DEFAULT } - return true -} - -type TLoadTxnBeginResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` - TxnId *int64 `thrift:"txnId,2,optional" frugal:"2,optional,i64" json:"txnId,omitempty"` - JobStatus *string `thrift:"job_status,3,optional" frugal:"3,optional,string" json:"job_status,omitempty"` - DbId *int64 `thrift:"db_id,4,optional" frugal:"4,optional,i64" json:"db_id,omitempty"` + return *p.Label } -func NewTLoadTxnBeginResult_() *TLoadTxnBeginResult_ { - return &TLoadTxnBeginResult_{} -} +var TTxnLoadInfo_DbId_DEFAULT int64 -func (p *TLoadTxnBeginResult_) InitDefault() { - *p = TLoadTxnBeginResult_{} +func (p *TTxnLoadInfo) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TTxnLoadInfo_DbId_DEFAULT + } + return *p.DbId } -var TLoadTxnBeginResult__Status_DEFAULT *status.TStatus +var TTxnLoadInfo_TxnId_DEFAULT int64 -func (p *TLoadTxnBeginResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TLoadTxnBeginResult__Status_DEFAULT +func (p *TTxnLoadInfo) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TTxnLoadInfo_TxnId_DEFAULT } - return p.Status + return *p.TxnId } -var TLoadTxnBeginResult__TxnId_DEFAULT int64 +var TTxnLoadInfo_TimeoutTimestamp_DEFAULT int64 -func (p *TLoadTxnBeginResult_) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TLoadTxnBeginResult__TxnId_DEFAULT +func (p *TTxnLoadInfo) GetTimeoutTimestamp() (v int64) { + if !p.IsSetTimeoutTimestamp() { + return TTxnLoadInfo_TimeoutTimestamp_DEFAULT } - return *p.TxnId + return *p.TimeoutTimestamp } -var TLoadTxnBeginResult__JobStatus_DEFAULT string +var TTxnLoadInfo_AllSubTxnNum_DEFAULT int64 -func (p *TLoadTxnBeginResult_) GetJobStatus() (v string) { - if !p.IsSetJobStatus() { - return TLoadTxnBeginResult__JobStatus_DEFAULT +func (p *TTxnLoadInfo) GetAllSubTxnNum() (v int64) { + if !p.IsSetAllSubTxnNum() { + return TTxnLoadInfo_AllSubTxnNum_DEFAULT } - return *p.JobStatus + return *p.AllSubTxnNum } -var TLoadTxnBeginResult__DbId_DEFAULT int64 +var TTxnLoadInfo_SubTxnInfos_DEFAULT []*TSubTxnInfo -func (p *TLoadTxnBeginResult_) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TLoadTxnBeginResult__DbId_DEFAULT +func (p *TTxnLoadInfo) GetSubTxnInfos() (v []*TSubTxnInfo) { + if !p.IsSetSubTxnInfos() { + return TTxnLoadInfo_SubTxnInfos_DEFAULT } - return *p.DbId + return p.SubTxnInfos } -func (p *TLoadTxnBeginResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TTxnLoadInfo) SetLabel(val *string) { + p.Label = val } -func (p *TLoadTxnBeginResult_) SetTxnId(val *int64) { +func (p *TTxnLoadInfo) SetDbId(val *int64) { + p.DbId = val +} +func (p *TTxnLoadInfo) SetTxnId(val *int64) { p.TxnId = val } -func (p *TLoadTxnBeginResult_) SetJobStatus(val *string) { - p.JobStatus = val +func (p *TTxnLoadInfo) SetTimeoutTimestamp(val *int64) { + p.TimeoutTimestamp = val } -func (p *TLoadTxnBeginResult_) SetDbId(val *int64) { - p.DbId = val +func (p *TTxnLoadInfo) SetAllSubTxnNum(val *int64) { + p.AllSubTxnNum = val +} +func (p *TTxnLoadInfo) SetSubTxnInfos(val []*TSubTxnInfo) { + p.SubTxnInfos = val } -var fieldIDToName_TLoadTxnBeginResult_ = map[int16]string{ - 1: "status", - 2: "txnId", - 3: "job_status", - 4: "db_id", +var fieldIDToName_TTxnLoadInfo = map[int16]string{ + 1: "label", + 2: "dbId", + 3: "txnId", + 4: "timeoutTimestamp", + 5: "allSubTxnNum", + 6: "subTxnInfos", } -func (p *TLoadTxnBeginResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TTxnLoadInfo) IsSetLabel() bool { + return p.Label != nil } -func (p *TLoadTxnBeginResult_) IsSetTxnId() bool { +func (p *TTxnLoadInfo) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TTxnLoadInfo) IsSetTxnId() bool { return p.TxnId != nil } -func (p *TLoadTxnBeginResult_) IsSetJobStatus() bool { - return p.JobStatus != nil +func (p *TTxnLoadInfo) IsSetTimeoutTimestamp() bool { + return p.TimeoutTimestamp != nil } -func (p *TLoadTxnBeginResult_) IsSetDbId() bool { - return p.DbId != nil +func (p *TTxnLoadInfo) IsSetAllSubTxnNum() bool { + return p.AllSubTxnNum != nil } -func (p *TLoadTxnBeginResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TTxnLoadInfo) IsSetSubTxnInfos() bool { + return p.SubTxnInfos != nil +} + +func (p *TTxnLoadInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -20630,52 +19965,58 @@ func (p *TLoadTxnBeginResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -20684,17 +20025,13 @@ func (p *TLoadTxnBeginResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnLoadInfo[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -20702,48 +20039,90 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginResult_[fieldId])) } -func (p *TLoadTxnBeginResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TTxnLoadInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Label = _field return nil } +func (p *TTxnLoadInfo) ReadField2(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginResult_) ReadField2(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnId = &v + _field = &v } + p.DbId = _field return nil } +func (p *TTxnLoadInfo) ReadField3(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.JobStatus = &v + _field = &v } + p.TxnId = _field return nil } +func (p *TTxnLoadInfo) ReadField4(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginResult_) ReadField4(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DbId = &v + _field = &v } + p.TimeoutTimestamp = _field return nil } +func (p *TTxnLoadInfo) ReadField5(iprot thrift.TProtocol) error { -func (p *TLoadTxnBeginResult_) Write(oprot thrift.TProtocol) (err error) { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AllSubTxnNum = _field + return nil +} +func (p *TTxnLoadInfo) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TSubTxnInfo, 0, size) + values := make([]TSubTxnInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SubTxnInfos = _field + return nil +} + +func (p *TTxnLoadInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxnBeginResult"); err != nil { + if err = oprot.WriteStructBegin("TTxnLoadInfo"); err != nil { goto WriteStructBeginError } if p != nil { @@ -20763,7 +20142,14 @@ func (p *TLoadTxnBeginResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -20782,15 +20168,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxnBeginResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TTxnLoadInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -20799,12 +20187,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxnBeginResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txnId", thrift.I64, 2); err != nil { +func (p *TTxnLoadInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TxnId); err != nil { + if err := oprot.WriteI64(*p.DbId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -20818,12 +20206,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TLoadTxnBeginResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetJobStatus() { - if err = oprot.WriteFieldBegin("job_status", thrift.STRING, 3); err != nil { +func (p *TTxnLoadInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.JobStatus); err != nil { + if err := oprot.WriteI64(*p.TxnId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -20837,12 +20225,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TLoadTxnBeginResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 4); err != nil { +func (p *TTxnLoadInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeoutTimestamp() { + if err = oprot.WriteFieldBegin("timeoutTimestamp", thrift.I64, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.DbId); err != nil { + if err := oprot.WriteI64(*p.TimeoutTimestamp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -20856,42 +20244,112 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TLoadTxnBeginResult_) String() string { +func (p *TTxnLoadInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetAllSubTxnNum() { + if err = oprot.WriteFieldBegin("allSubTxnNum", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AllSubTxnNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TTxnLoadInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnInfos() { + if err = oprot.WriteFieldBegin("subTxnInfos", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SubTxnInfos)); err != nil { + return err + } + for _, v := range p.SubTxnInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TTxnLoadInfo) String() string { if p == nil { return "" } - return fmt.Sprintf("TLoadTxnBeginResult_(%+v)", *p) + return fmt.Sprintf("TTxnLoadInfo(%+v)", *p) + } -func (p *TLoadTxnBeginResult_) DeepEqual(ano *TLoadTxnBeginResult_) bool { +func (p *TTxnLoadInfo) DeepEqual(ano *TTxnLoadInfo) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Label) { return false } - if !p.Field2DeepEqual(ano.TxnId) { + if !p.Field2DeepEqual(ano.DbId) { return false } - if !p.Field3DeepEqual(ano.JobStatus) { + if !p.Field3DeepEqual(ano.TxnId) { return false } - if !p.Field4DeepEqual(ano.DbId) { + if !p.Field4DeepEqual(ano.TimeoutTimestamp) { + return false + } + if !p.Field5DeepEqual(ano.AllSubTxnNum) { + return false + } + if !p.Field6DeepEqual(ano.SubTxnInfos) { return false } return true } -func (p *TLoadTxnBeginResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TTxnLoadInfo) Field1DeepEqual(src *string) bool { - if !p.Status.DeepEqual(src) { + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { return false } return true } -func (p *TLoadTxnBeginResult_) Field2DeepEqual(src *int64) bool { +func (p *TTxnLoadInfo) Field2DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TTxnLoadInfo) Field3DeepEqual(src *int64) bool { if p.TxnId == src { return true @@ -20903,244 +20361,166 @@ func (p *TLoadTxnBeginResult_) Field2DeepEqual(src *int64) bool { } return true } -func (p *TLoadTxnBeginResult_) Field3DeepEqual(src *string) bool { +func (p *TTxnLoadInfo) Field4DeepEqual(src *int64) bool { - if p.JobStatus == src { + if p.TimeoutTimestamp == src { return true - } else if p.JobStatus == nil || src == nil { + } else if p.TimeoutTimestamp == nil || src == nil { return false } - if strings.Compare(*p.JobStatus, *src) != 0 { + if *p.TimeoutTimestamp != *src { return false } return true } -func (p *TLoadTxnBeginResult_) Field4DeepEqual(src *int64) bool { +func (p *TTxnLoadInfo) Field5DeepEqual(src *int64) bool { - if p.DbId == src { + if p.AllSubTxnNum == src { return true - } else if p.DbId == nil || src == nil { + } else if p.AllSubTxnNum == nil || src == nil { return false } - if *p.DbId != *src { + if *p.AllSubTxnNum != *src { return false } return true } +func (p *TTxnLoadInfo) Field6DeepEqual(src []*TSubTxnInfo) bool { -type TBeginTxnRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - TableIds []int64 `thrift:"table_ids,5,optional" frugal:"5,optional,list" json:"table_ids,omitempty"` - UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` - Label *string `thrift:"label,7,optional" frugal:"7,optional,string" json:"label,omitempty"` - AuthCode *int64 `thrift:"auth_code,8,optional" frugal:"8,optional,i64" json:"auth_code,omitempty"` - Timeout *int64 `thrift:"timeout,9,optional" frugal:"9,optional,i64" json:"timeout,omitempty"` - RequestId *types.TUniqueId `thrift:"request_id,10,optional" frugal:"10,optional,types.TUniqueId" json:"request_id,omitempty"` - Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` + if len(p.SubTxnInfos) != len(src) { + return false + } + for i, v := range p.SubTxnInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } -func NewTBeginTxnRequest() *TBeginTxnRequest { - return &TBeginTxnRequest{} +type TGroupCommitInfo struct { + GetGroupCommitLoadBeId *bool `thrift:"getGroupCommitLoadBeId,1,optional" frugal:"1,optional,bool" json:"getGroupCommitLoadBeId,omitempty"` + GroupCommitLoadTableId *int64 `thrift:"groupCommitLoadTableId,2,optional" frugal:"2,optional,i64" json:"groupCommitLoadTableId,omitempty"` + Cluster *string `thrift:"cluster,3,optional" frugal:"3,optional,string" json:"cluster,omitempty"` + UpdateLoadData *bool `thrift:"updateLoadData,5,optional" frugal:"5,optional,bool" json:"updateLoadData,omitempty"` + TableId *int64 `thrift:"tableId,6,optional" frugal:"6,optional,i64" json:"tableId,omitempty"` + ReceiveData *int64 `thrift:"receiveData,7,optional" frugal:"7,optional,i64" json:"receiveData,omitempty"` } -func (p *TBeginTxnRequest) InitDefault() { - *p = TBeginTxnRequest{} -} - -var TBeginTxnRequest_Cluster_DEFAULT string - -func (p *TBeginTxnRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TBeginTxnRequest_Cluster_DEFAULT - } - return *p.Cluster -} - -var TBeginTxnRequest_User_DEFAULT string - -func (p *TBeginTxnRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TBeginTxnRequest_User_DEFAULT - } - return *p.User +func NewTGroupCommitInfo() *TGroupCommitInfo { + return &TGroupCommitInfo{} } -var TBeginTxnRequest_Passwd_DEFAULT string - -func (p *TBeginTxnRequest) GetPasswd() (v string) { - if !p.IsSetPasswd() { - return TBeginTxnRequest_Passwd_DEFAULT - } - return *p.Passwd +func (p *TGroupCommitInfo) InitDefault() { } -var TBeginTxnRequest_Db_DEFAULT string +var TGroupCommitInfo_GetGroupCommitLoadBeId_DEFAULT bool -func (p *TBeginTxnRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TBeginTxnRequest_Db_DEFAULT +func (p *TGroupCommitInfo) GetGetGroupCommitLoadBeId() (v bool) { + if !p.IsSetGetGroupCommitLoadBeId() { + return TGroupCommitInfo_GetGroupCommitLoadBeId_DEFAULT } - return *p.Db + return *p.GetGroupCommitLoadBeId } -var TBeginTxnRequest_TableIds_DEFAULT []int64 +var TGroupCommitInfo_GroupCommitLoadTableId_DEFAULT int64 -func (p *TBeginTxnRequest) GetTableIds() (v []int64) { - if !p.IsSetTableIds() { - return TBeginTxnRequest_TableIds_DEFAULT +func (p *TGroupCommitInfo) GetGroupCommitLoadTableId() (v int64) { + if !p.IsSetGroupCommitLoadTableId() { + return TGroupCommitInfo_GroupCommitLoadTableId_DEFAULT } - return p.TableIds + return *p.GroupCommitLoadTableId } -var TBeginTxnRequest_UserIp_DEFAULT string +var TGroupCommitInfo_Cluster_DEFAULT string -func (p *TBeginTxnRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TBeginTxnRequest_UserIp_DEFAULT +func (p *TGroupCommitInfo) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGroupCommitInfo_Cluster_DEFAULT } - return *p.UserIp + return *p.Cluster } -var TBeginTxnRequest_Label_DEFAULT string +var TGroupCommitInfo_UpdateLoadData_DEFAULT bool -func (p *TBeginTxnRequest) GetLabel() (v string) { - if !p.IsSetLabel() { - return TBeginTxnRequest_Label_DEFAULT +func (p *TGroupCommitInfo) GetUpdateLoadData() (v bool) { + if !p.IsSetUpdateLoadData() { + return TGroupCommitInfo_UpdateLoadData_DEFAULT } - return *p.Label + return *p.UpdateLoadData } -var TBeginTxnRequest_AuthCode_DEFAULT int64 +var TGroupCommitInfo_TableId_DEFAULT int64 -func (p *TBeginTxnRequest) GetAuthCode() (v int64) { - if !p.IsSetAuthCode() { - return TBeginTxnRequest_AuthCode_DEFAULT +func (p *TGroupCommitInfo) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TGroupCommitInfo_TableId_DEFAULT } - return *p.AuthCode + return *p.TableId } -var TBeginTxnRequest_Timeout_DEFAULT int64 +var TGroupCommitInfo_ReceiveData_DEFAULT int64 -func (p *TBeginTxnRequest) GetTimeout() (v int64) { - if !p.IsSetTimeout() { - return TBeginTxnRequest_Timeout_DEFAULT +func (p *TGroupCommitInfo) GetReceiveData() (v int64) { + if !p.IsSetReceiveData() { + return TGroupCommitInfo_ReceiveData_DEFAULT } - return *p.Timeout + return *p.ReceiveData } - -var TBeginTxnRequest_RequestId_DEFAULT *types.TUniqueId - -func (p *TBeginTxnRequest) GetRequestId() (v *types.TUniqueId) { - if !p.IsSetRequestId() { - return TBeginTxnRequest_RequestId_DEFAULT - } - return p.RequestId +func (p *TGroupCommitInfo) SetGetGroupCommitLoadBeId(val *bool) { + p.GetGroupCommitLoadBeId = val } - -var TBeginTxnRequest_Token_DEFAULT string - -func (p *TBeginTxnRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TBeginTxnRequest_Token_DEFAULT - } - return *p.Token +func (p *TGroupCommitInfo) SetGroupCommitLoadTableId(val *int64) { + p.GroupCommitLoadTableId = val } -func (p *TBeginTxnRequest) SetCluster(val *string) { +func (p *TGroupCommitInfo) SetCluster(val *string) { p.Cluster = val } -func (p *TBeginTxnRequest) SetUser(val *string) { - p.User = val -} -func (p *TBeginTxnRequest) SetPasswd(val *string) { - p.Passwd = val -} -func (p *TBeginTxnRequest) SetDb(val *string) { - p.Db = val -} -func (p *TBeginTxnRequest) SetTableIds(val []int64) { - p.TableIds = val -} -func (p *TBeginTxnRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TBeginTxnRequest) SetLabel(val *string) { - p.Label = val -} -func (p *TBeginTxnRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TBeginTxnRequest) SetTimeout(val *int64) { - p.Timeout = val -} -func (p *TBeginTxnRequest) SetRequestId(val *types.TUniqueId) { - p.RequestId = val -} -func (p *TBeginTxnRequest) SetToken(val *string) { - p.Token = val -} - -var fieldIDToName_TBeginTxnRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "table_ids", - 6: "user_ip", - 7: "label", - 8: "auth_code", - 9: "timeout", - 10: "request_id", - 11: "token", -} - -func (p *TBeginTxnRequest) IsSetCluster() bool { - return p.Cluster != nil -} - -func (p *TBeginTxnRequest) IsSetUser() bool { - return p.User != nil +func (p *TGroupCommitInfo) SetUpdateLoadData(val *bool) { + p.UpdateLoadData = val } - -func (p *TBeginTxnRequest) IsSetPasswd() bool { - return p.Passwd != nil +func (p *TGroupCommitInfo) SetTableId(val *int64) { + p.TableId = val } - -func (p *TBeginTxnRequest) IsSetDb() bool { - return p.Db != nil +func (p *TGroupCommitInfo) SetReceiveData(val *int64) { + p.ReceiveData = val } -func (p *TBeginTxnRequest) IsSetTableIds() bool { - return p.TableIds != nil +var fieldIDToName_TGroupCommitInfo = map[int16]string{ + 1: "getGroupCommitLoadBeId", + 2: "groupCommitLoadTableId", + 3: "cluster", + 5: "updateLoadData", + 6: "tableId", + 7: "receiveData", } -func (p *TBeginTxnRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TGroupCommitInfo) IsSetGetGroupCommitLoadBeId() bool { + return p.GetGroupCommitLoadBeId != nil } -func (p *TBeginTxnRequest) IsSetLabel() bool { - return p.Label != nil +func (p *TGroupCommitInfo) IsSetGroupCommitLoadTableId() bool { + return p.GroupCommitLoadTableId != nil } -func (p *TBeginTxnRequest) IsSetAuthCode() bool { - return p.AuthCode != nil +func (p *TGroupCommitInfo) IsSetCluster() bool { + return p.Cluster != nil } -func (p *TBeginTxnRequest) IsSetTimeout() bool { - return p.Timeout != nil +func (p *TGroupCommitInfo) IsSetUpdateLoadData() bool { + return p.UpdateLoadData != nil } -func (p *TBeginTxnRequest) IsSetRequestId() bool { - return p.RequestId != nil +func (p *TGroupCommitInfo) IsSetTableId() bool { + return p.TableId != nil } -func (p *TBeginTxnRequest) IsSetToken() bool { - return p.Token != nil +func (p *TGroupCommitInfo) IsSetReceiveData() bool { + return p.ReceiveData != nil } -func (p *TBeginTxnRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGroupCommitInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -21160,121 +20540,58 @@ func (p *TBeginTxnRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { + if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -21289,7 +20606,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGroupCommitInfo[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -21299,120 +20616,76 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBeginTxnRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} - -func (p *TBeginTxnRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = &v - } - return nil -} +func (p *TGroupCommitInfo) ReadField1(iprot thrift.TProtocol) error { -func (p *TBeginTxnRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Passwd = &v + _field = &v } + p.GetGroupCommitLoadBeId = _field return nil } +func (p *TGroupCommitInfo) ReadField2(iprot thrift.TProtocol) error { -func (p *TBeginTxnRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Db = &v - } - return nil -} - -func (p *TBeginTxnRequest) ReadField5(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.TableIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _elem = v - } - - p.TableIds = append(p.TableIds, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + _field = &v } + p.GroupCommitLoadTableId = _field return nil } +func (p *TGroupCommitInfo) ReadField3(iprot thrift.TProtocol) error { -func (p *TBeginTxnRequest) ReadField6(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.Cluster = _field return nil } +func (p *TGroupCommitInfo) ReadField5(iprot thrift.TProtocol) error { -func (p *TBeginTxnRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Label = &v + _field = &v } + p.UpdateLoadData = _field return nil } +func (p *TGroupCommitInfo) ReadField6(iprot thrift.TProtocol) error { -func (p *TBeginTxnRequest) ReadField8(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AuthCode = &v + _field = &v } + p.TableId = _field return nil } +func (p *TGroupCommitInfo) ReadField7(iprot thrift.TProtocol) error { -func (p *TBeginTxnRequest) ReadField9(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Timeout = &v - } - return nil -} - -func (p *TBeginTxnRequest) ReadField10(iprot thrift.TProtocol) error { - p.RequestId = types.NewTUniqueId() - if err := p.RequestId.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TBeginTxnRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v + _field = &v } + p.ReceiveData = _field return nil } -func (p *TBeginTxnRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TGroupCommitInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TBeginTxnRequest"); err != nil { + if err = oprot.WriteStructBegin("TGroupCommitInfo"); err != nil { goto WriteStructBeginError } if p != nil { @@ -21428,10 +20701,6 @@ func (p *TBeginTxnRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } if err = p.writeField5(oprot); err != nil { fieldId = 5 goto WriteFieldError @@ -21444,23 +20713,6 @@ func (p *TBeginTxnRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -21479,12 +20731,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TBeginTxnRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *TGroupCommitInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetGetGroupCommitLoadBeId() { + if err = oprot.WriteFieldBegin("getGroupCommitLoadBeId", thrift.BOOL, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := oprot.WriteBool(*p.GetGroupCommitLoadBeId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21498,12 +20750,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TBeginTxnRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { +func (p *TGroupCommitInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitLoadTableId() { + if err = oprot.WriteFieldBegin("groupCommitLoadTableId", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.User); err != nil { + if err := oprot.WriteI64(*p.GroupCommitLoadTableId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21517,12 +20769,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TBeginTxnRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPasswd() { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { +func (p *TGroupCommitInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Passwd); err != nil { + if err := oprot.WriteString(*p.Cluster); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21536,39 +20788,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TBeginTxnRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TBeginTxnRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTableIds() { - if err = oprot.WriteFieldBegin("table_ids", thrift.LIST, 5); err != nil { +func (p *TGroupCommitInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetUpdateLoadData() { + if err = oprot.WriteFieldBegin("updateLoadData", thrift.BOOL, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.I64, len(p.TableIds)); err != nil { - return err - } - for _, v := range p.TableIds { - if err := oprot.WriteI64(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteBool(*p.UpdateLoadData); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21582,12 +20807,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TBeginTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { +func (p *TGroupCommitInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("tableId", thrift.I64, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.UserIp); err != nil { + if err := oprot.WriteI64(*p.TableId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21601,12 +20826,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TBeginTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetLabel() { - if err = oprot.WriteFieldBegin("label", thrift.STRING, 7); err != nil { +func (p *TGroupCommitInfo) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetReceiveData() { + if err = oprot.WriteFieldBegin("receiveData", thrift.I64, 7); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Label); err != nil { + if err := oprot.WriteI64(*p.ReceiveData); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21620,1576 +20845,683 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TBeginTxnRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.AuthCode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TBeginTxnRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeout() { - if err = oprot.WriteFieldBegin("timeout", thrift.I64, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Timeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TBeginTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetRequestId() { - if err = oprot.WriteFieldBegin("request_id", thrift.STRUCT, 10); err != nil { - goto WriteFieldBeginError - } - if err := p.RequestId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) -} - -func (p *TBeginTxnRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) -} - -func (p *TBeginTxnRequest) String() string { +func (p *TGroupCommitInfo) String() string { if p == nil { return "" } - return fmt.Sprintf("TBeginTxnRequest(%+v)", *p) + return fmt.Sprintf("TGroupCommitInfo(%+v)", *p) + } -func (p *TBeginTxnRequest) DeepEqual(ano *TBeginTxnRequest) bool { +func (p *TGroupCommitInfo) DeepEqual(ano *TGroupCommitInfo) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.TableIds) { - return false - } - if !p.Field6DeepEqual(ano.UserIp) { + if !p.Field1DeepEqual(ano.GetGroupCommitLoadBeId) { return false } - if !p.Field7DeepEqual(ano.Label) { + if !p.Field2DeepEqual(ano.GroupCommitLoadTableId) { return false } - if !p.Field8DeepEqual(ano.AuthCode) { + if !p.Field3DeepEqual(ano.Cluster) { return false } - if !p.Field9DeepEqual(ano.Timeout) { + if !p.Field5DeepEqual(ano.UpdateLoadData) { return false } - if !p.Field10DeepEqual(ano.RequestId) { + if !p.Field6DeepEqual(ano.TableId) { return false } - if !p.Field11DeepEqual(ano.Token) { + if !p.Field7DeepEqual(ano.ReceiveData) { return false } return true } -func (p *TBeginTxnRequest) Field1DeepEqual(src *string) bool { +func (p *TGroupCommitInfo) Field1DeepEqual(src *bool) bool { - if p.Cluster == src { + if p.GetGroupCommitLoadBeId == src { return true - } else if p.Cluster == nil || src == nil { + } else if p.GetGroupCommitLoadBeId == nil || src == nil { return false } - if strings.Compare(*p.Cluster, *src) != 0 { + if *p.GetGroupCommitLoadBeId != *src { return false } return true } -func (p *TBeginTxnRequest) Field2DeepEqual(src *string) bool { +func (p *TGroupCommitInfo) Field2DeepEqual(src *int64) bool { - if p.User == src { + if p.GroupCommitLoadTableId == src { return true - } else if p.User == nil || src == nil { + } else if p.GroupCommitLoadTableId == nil || src == nil { return false } - if strings.Compare(*p.User, *src) != 0 { + if *p.GroupCommitLoadTableId != *src { return false } return true } -func (p *TBeginTxnRequest) Field3DeepEqual(src *string) bool { +func (p *TGroupCommitInfo) Field3DeepEqual(src *string) bool { - if p.Passwd == src { + if p.Cluster == src { return true - } else if p.Passwd == nil || src == nil { + } else if p.Cluster == nil || src == nil { return false } - if strings.Compare(*p.Passwd, *src) != 0 { + if strings.Compare(*p.Cluster, *src) != 0 { return false } return true } -func (p *TBeginTxnRequest) Field4DeepEqual(src *string) bool { +func (p *TGroupCommitInfo) Field5DeepEqual(src *bool) bool { - if p.Db == src { + if p.UpdateLoadData == src { return true - } else if p.Db == nil || src == nil { - return false - } - if strings.Compare(*p.Db, *src) != 0 { + } else if p.UpdateLoadData == nil || src == nil { return false } - return true -} -func (p *TBeginTxnRequest) Field5DeepEqual(src []int64) bool { - - if len(p.TableIds) != len(src) { + if *p.UpdateLoadData != *src { return false } - for i, v := range p.TableIds { - _src := src[i] - if v != _src { - return false - } - } return true } -func (p *TBeginTxnRequest) Field6DeepEqual(src *string) bool { +func (p *TGroupCommitInfo) Field6DeepEqual(src *int64) bool { - if p.UserIp == src { + if p.TableId == src { return true - } else if p.UserIp == nil || src == nil { + } else if p.TableId == nil || src == nil { return false } - if strings.Compare(*p.UserIp, *src) != 0 { + if *p.TableId != *src { return false } return true } -func (p *TBeginTxnRequest) Field7DeepEqual(src *string) bool { +func (p *TGroupCommitInfo) Field7DeepEqual(src *int64) bool { - if p.Label == src { + if p.ReceiveData == src { return true - } else if p.Label == nil || src == nil { + } else if p.ReceiveData == nil || src == nil { return false } - if strings.Compare(*p.Label, *src) != 0 { + if *p.ReceiveData != *src { return false } return true } -func (p *TBeginTxnRequest) Field8DeepEqual(src *int64) bool { - if p.AuthCode == src { - return true - } else if p.AuthCode == nil || src == nil { - return false - } - if *p.AuthCode != *src { - return false - } - return true +type TMasterOpRequest struct { + User string `thrift:"user,1,required" frugal:"1,required,string" json:"user"` + Db string `thrift:"db,2,required" frugal:"2,required,string" json:"db"` + Sql string `thrift:"sql,3,required" frugal:"3,required,string" json:"sql"` + ResourceInfo *types.TResourceInfo `thrift:"resourceInfo,4,optional" frugal:"4,optional,types.TResourceInfo" json:"resourceInfo,omitempty"` + Cluster *string `thrift:"cluster,5,optional" frugal:"5,optional,string" json:"cluster,omitempty"` + ExecMemLimit *int64 `thrift:"execMemLimit,6,optional" frugal:"6,optional,i64" json:"execMemLimit,omitempty"` + QueryTimeout *int32 `thrift:"queryTimeout,7,optional" frugal:"7,optional,i32" json:"queryTimeout,omitempty"` + UserIp *string `thrift:"user_ip,8,optional" frugal:"8,optional,string" json:"user_ip,omitempty"` + TimeZone *string `thrift:"time_zone,9,optional" frugal:"9,optional,string" json:"time_zone,omitempty"` + StmtId *int64 `thrift:"stmt_id,10,optional" frugal:"10,optional,i64" json:"stmt_id,omitempty"` + SqlMode *int64 `thrift:"sqlMode,11,optional" frugal:"11,optional,i64" json:"sqlMode,omitempty"` + LoadMemLimit *int64 `thrift:"loadMemLimit,12,optional" frugal:"12,optional,i64" json:"loadMemLimit,omitempty"` + EnableStrictMode *bool `thrift:"enableStrictMode,13,optional" frugal:"13,optional,bool" json:"enableStrictMode,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,14,optional" frugal:"14,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` + StmtIdx *int32 `thrift:"stmtIdx,15,optional" frugal:"15,optional,i32" json:"stmtIdx,omitempty"` + QueryOptions *palointernalservice.TQueryOptions `thrift:"query_options,16,optional" frugal:"16,optional,palointernalservice.TQueryOptions" json:"query_options,omitempty"` + QueryId *types.TUniqueId `thrift:"query_id,17,optional" frugal:"17,optional,types.TUniqueId" json:"query_id,omitempty"` + InsertVisibleTimeoutMs *int64 `thrift:"insert_visible_timeout_ms,18,optional" frugal:"18,optional,i64" json:"insert_visible_timeout_ms,omitempty"` + SessionVariables map[string]string `thrift:"session_variables,19,optional" frugal:"19,optional,map" json:"session_variables,omitempty"` + FoldConstantByBe *bool `thrift:"foldConstantByBe,20,optional" frugal:"20,optional,bool" json:"foldConstantByBe,omitempty"` + TraceCarrier map[string]string `thrift:"trace_carrier,21,optional" frugal:"21,optional,map" json:"trace_carrier,omitempty"` + ClientNodeHost *string `thrift:"clientNodeHost,22,optional" frugal:"22,optional,string" json:"clientNodeHost,omitempty"` + ClientNodePort *int32 `thrift:"clientNodePort,23,optional" frugal:"23,optional,i32" json:"clientNodePort,omitempty"` + SyncJournalOnly *bool `thrift:"syncJournalOnly,24,optional" frugal:"24,optional,bool" json:"syncJournalOnly,omitempty"` + DefaultCatalog *string `thrift:"defaultCatalog,25,optional" frugal:"25,optional,string" json:"defaultCatalog,omitempty"` + DefaultDatabase *string `thrift:"defaultDatabase,26,optional" frugal:"26,optional,string" json:"defaultDatabase,omitempty"` + CancelQeury *bool `thrift:"cancel_qeury,27,optional" frugal:"27,optional,bool" json:"cancel_qeury,omitempty"` + UserVariables map[string]*exprs.TExprNode `thrift:"user_variables,28,optional" frugal:"28,optional,map" json:"user_variables,omitempty"` + TxnLoadInfo *TTxnLoadInfo `thrift:"txnLoadInfo,29,optional" frugal:"29,optional,TTxnLoadInfo" json:"txnLoadInfo,omitempty"` + GroupCommitInfo *TGroupCommitInfo `thrift:"groupCommitInfo,30,optional" frugal:"30,optional,TGroupCommitInfo" json:"groupCommitInfo,omitempty"` + CloudCluster *string `thrift:"cloud_cluster,1000,optional" frugal:"1000,optional,string" json:"cloud_cluster,omitempty"` + NoAuth *bool `thrift:"noAuth,1001,optional" frugal:"1001,optional,bool" json:"noAuth,omitempty"` } -func (p *TBeginTxnRequest) Field9DeepEqual(src *int64) bool { - if p.Timeout == src { - return true - } else if p.Timeout == nil || src == nil { - return false - } - if *p.Timeout != *src { - return false - } - return true +func NewTMasterOpRequest() *TMasterOpRequest { + return &TMasterOpRequest{} } -func (p *TBeginTxnRequest) Field10DeepEqual(src *types.TUniqueId) bool { - if !p.RequestId.DeepEqual(src) { - return false - } - return true +func (p *TMasterOpRequest) InitDefault() { } -func (p *TBeginTxnRequest) Field11DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false - } - return true +func (p *TMasterOpRequest) GetUser() (v string) { + return p.User } -type TBeginTxnResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - TxnId *int64 `thrift:"txn_id,2,optional" frugal:"2,optional,i64" json:"txn_id,omitempty"` - JobStatus *string `thrift:"job_status,3,optional" frugal:"3,optional,string" json:"job_status,omitempty"` - DbId *int64 `thrift:"db_id,4,optional" frugal:"4,optional,i64" json:"db_id,omitempty"` +func (p *TMasterOpRequest) GetDb() (v string) { + return p.Db } -func NewTBeginTxnResult_() *TBeginTxnResult_ { - return &TBeginTxnResult_{} +func (p *TMasterOpRequest) GetSql() (v string) { + return p.Sql } -func (p *TBeginTxnResult_) InitDefault() { - *p = TBeginTxnResult_{} +var TMasterOpRequest_ResourceInfo_DEFAULT *types.TResourceInfo + +func (p *TMasterOpRequest) GetResourceInfo() (v *types.TResourceInfo) { + if !p.IsSetResourceInfo() { + return TMasterOpRequest_ResourceInfo_DEFAULT + } + return p.ResourceInfo } -var TBeginTxnResult__Status_DEFAULT *status.TStatus +var TMasterOpRequest_Cluster_DEFAULT string -func (p *TBeginTxnResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TBeginTxnResult__Status_DEFAULT +func (p *TMasterOpRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TMasterOpRequest_Cluster_DEFAULT } - return p.Status + return *p.Cluster } -var TBeginTxnResult__TxnId_DEFAULT int64 +var TMasterOpRequest_ExecMemLimit_DEFAULT int64 -func (p *TBeginTxnResult_) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TBeginTxnResult__TxnId_DEFAULT +func (p *TMasterOpRequest) GetExecMemLimit() (v int64) { + if !p.IsSetExecMemLimit() { + return TMasterOpRequest_ExecMemLimit_DEFAULT } - return *p.TxnId + return *p.ExecMemLimit } -var TBeginTxnResult__JobStatus_DEFAULT string +var TMasterOpRequest_QueryTimeout_DEFAULT int32 -func (p *TBeginTxnResult_) GetJobStatus() (v string) { - if !p.IsSetJobStatus() { - return TBeginTxnResult__JobStatus_DEFAULT +func (p *TMasterOpRequest) GetQueryTimeout() (v int32) { + if !p.IsSetQueryTimeout() { + return TMasterOpRequest_QueryTimeout_DEFAULT } - return *p.JobStatus + return *p.QueryTimeout } -var TBeginTxnResult__DbId_DEFAULT int64 +var TMasterOpRequest_UserIp_DEFAULT string -func (p *TBeginTxnResult_) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TBeginTxnResult__DbId_DEFAULT +func (p *TMasterOpRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TMasterOpRequest_UserIp_DEFAULT } - return *p.DbId -} -func (p *TBeginTxnResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TBeginTxnResult_) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TBeginTxnResult_) SetJobStatus(val *string) { - p.JobStatus = val -} -func (p *TBeginTxnResult_) SetDbId(val *int64) { - p.DbId = val + return *p.UserIp } -var fieldIDToName_TBeginTxnResult_ = map[int16]string{ - 1: "status", - 2: "txn_id", - 3: "job_status", - 4: "db_id", -} +var TMasterOpRequest_TimeZone_DEFAULT string -func (p *TBeginTxnResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TMasterOpRequest) GetTimeZone() (v string) { + if !p.IsSetTimeZone() { + return TMasterOpRequest_TimeZone_DEFAULT + } + return *p.TimeZone } -func (p *TBeginTxnResult_) IsSetTxnId() bool { - return p.TxnId != nil -} +var TMasterOpRequest_StmtId_DEFAULT int64 -func (p *TBeginTxnResult_) IsSetJobStatus() bool { - return p.JobStatus != nil +func (p *TMasterOpRequest) GetStmtId() (v int64) { + if !p.IsSetStmtId() { + return TMasterOpRequest_StmtId_DEFAULT + } + return *p.StmtId } -func (p *TBeginTxnResult_) IsSetDbId() bool { - return p.DbId != nil -} +var TMasterOpRequest_SqlMode_DEFAULT int64 -func (p *TBeginTxnResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TMasterOpRequest) GetSqlMode() (v int64) { + if !p.IsSetSqlMode() { + return TMasterOpRequest_SqlMode_DEFAULT + } + return *p.SqlMode +} - var fieldTypeId thrift.TType - var fieldId int16 +var TMasterOpRequest_LoadMemLimit_DEFAULT int64 - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TMasterOpRequest) GetLoadMemLimit() (v int64) { + if !p.IsSetLoadMemLimit() { + return TMasterOpRequest_LoadMemLimit_DEFAULT } + return *p.LoadMemLimit +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +var TMasterOpRequest_EnableStrictMode_DEFAULT bool - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError +func (p *TMasterOpRequest) GetEnableStrictMode() (v bool) { + if !p.IsSetEnableStrictMode() { + return TMasterOpRequest_EnableStrictMode_DEFAULT } + return *p.EnableStrictMode +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +var TMasterOpRequest_CurrentUserIdent_DEFAULT *types.TUserIdentity -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +func (p *TMasterOpRequest) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TMasterOpRequest_CurrentUserIdent_DEFAULT + } + return p.CurrentUserIdent } -func (p *TBeginTxnResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err +var TMasterOpRequest_StmtIdx_DEFAULT int32 + +func (p *TMasterOpRequest) GetStmtIdx() (v int32) { + if !p.IsSetStmtIdx() { + return TMasterOpRequest_StmtIdx_DEFAULT } - return nil + return *p.StmtIdx } -func (p *TBeginTxnResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = &v +var TMasterOpRequest_QueryOptions_DEFAULT *palointernalservice.TQueryOptions + +func (p *TMasterOpRequest) GetQueryOptions() (v *palointernalservice.TQueryOptions) { + if !p.IsSetQueryOptions() { + return TMasterOpRequest_QueryOptions_DEFAULT } - return nil + return p.QueryOptions } -func (p *TBeginTxnResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.JobStatus = &v +var TMasterOpRequest_QueryId_DEFAULT *types.TUniqueId + +func (p *TMasterOpRequest) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TMasterOpRequest_QueryId_DEFAULT } - return nil + return p.QueryId } -func (p *TBeginTxnResult_) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DbId = &v +var TMasterOpRequest_InsertVisibleTimeoutMs_DEFAULT int64 + +func (p *TMasterOpRequest) GetInsertVisibleTimeoutMs() (v int64) { + if !p.IsSetInsertVisibleTimeoutMs() { + return TMasterOpRequest_InsertVisibleTimeoutMs_DEFAULT } - return nil + return *p.InsertVisibleTimeoutMs } -func (p *TBeginTxnResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TBeginTxnResult"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } +var TMasterOpRequest_SessionVariables_DEFAULT map[string]string +func (p *TMasterOpRequest) GetSessionVariables() (v map[string]string) { + if !p.IsSetSessionVariables() { + return TMasterOpRequest_SessionVariables_DEFAULT } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return p.SessionVariables } -func (p *TBeginTxnResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TMasterOpRequest_FoldConstantByBe_DEFAULT bool + +func (p *TMasterOpRequest) GetFoldConstantByBe() (v bool) { + if !p.IsSetFoldConstantByBe() { + return TMasterOpRequest_FoldConstantByBe_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return *p.FoldConstantByBe } -func (p *TBeginTxnResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TMasterOpRequest_TraceCarrier_DEFAULT map[string]string + +func (p *TMasterOpRequest) GetTraceCarrier() (v map[string]string) { + if !p.IsSetTraceCarrier() { + return TMasterOpRequest_TraceCarrier_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return p.TraceCarrier } -func (p *TBeginTxnResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetJobStatus() { - if err = oprot.WriteFieldBegin("job_status", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.JobStatus); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TMasterOpRequest_ClientNodeHost_DEFAULT string + +func (p *TMasterOpRequest) GetClientNodeHost() (v string) { + if !p.IsSetClientNodeHost() { + return TMasterOpRequest_ClientNodeHost_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return *p.ClientNodeHost } -func (p *TBeginTxnResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TMasterOpRequest_ClientNodePort_DEFAULT int32 + +func (p *TMasterOpRequest) GetClientNodePort() (v int32) { + if !p.IsSetClientNodePort() { + return TMasterOpRequest_ClientNodePort_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return *p.ClientNodePort } -func (p *TBeginTxnResult_) String() string { - if p == nil { - return "" +var TMasterOpRequest_SyncJournalOnly_DEFAULT bool + +func (p *TMasterOpRequest) GetSyncJournalOnly() (v bool) { + if !p.IsSetSyncJournalOnly() { + return TMasterOpRequest_SyncJournalOnly_DEFAULT } - return fmt.Sprintf("TBeginTxnResult_(%+v)", *p) + return *p.SyncJournalOnly } -func (p *TBeginTxnResult_) DeepEqual(ano *TBeginTxnResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.TxnId) { - return false - } - if !p.Field3DeepEqual(ano.JobStatus) { - return false - } - if !p.Field4DeepEqual(ano.DbId) { - return false +var TMasterOpRequest_DefaultCatalog_DEFAULT string + +func (p *TMasterOpRequest) GetDefaultCatalog() (v string) { + if !p.IsSetDefaultCatalog() { + return TMasterOpRequest_DefaultCatalog_DEFAULT } - return true + return *p.DefaultCatalog } -func (p *TBeginTxnResult_) Field1DeepEqual(src *status.TStatus) bool { +var TMasterOpRequest_DefaultDatabase_DEFAULT string - if !p.Status.DeepEqual(src) { - return false +func (p *TMasterOpRequest) GetDefaultDatabase() (v string) { + if !p.IsSetDefaultDatabase() { + return TMasterOpRequest_DefaultDatabase_DEFAULT } - return true + return *p.DefaultDatabase } -func (p *TBeginTxnResult_) Field2DeepEqual(src *int64) bool { - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false - } - if *p.TxnId != *src { - return false +var TMasterOpRequest_CancelQeury_DEFAULT bool + +func (p *TMasterOpRequest) GetCancelQeury() (v bool) { + if !p.IsSetCancelQeury() { + return TMasterOpRequest_CancelQeury_DEFAULT } - return true + return *p.CancelQeury } -func (p *TBeginTxnResult_) Field3DeepEqual(src *string) bool { - if p.JobStatus == src { - return true - } else if p.JobStatus == nil || src == nil { - return false +var TMasterOpRequest_UserVariables_DEFAULT map[string]*exprs.TExprNode + +func (p *TMasterOpRequest) GetUserVariables() (v map[string]*exprs.TExprNode) { + if !p.IsSetUserVariables() { + return TMasterOpRequest_UserVariables_DEFAULT } - if strings.Compare(*p.JobStatus, *src) != 0 { - return false + return p.UserVariables +} + +var TMasterOpRequest_TxnLoadInfo_DEFAULT *TTxnLoadInfo + +func (p *TMasterOpRequest) GetTxnLoadInfo() (v *TTxnLoadInfo) { + if !p.IsSetTxnLoadInfo() { + return TMasterOpRequest_TxnLoadInfo_DEFAULT } - return true + return p.TxnLoadInfo } -func (p *TBeginTxnResult_) Field4DeepEqual(src *int64) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false +var TMasterOpRequest_GroupCommitInfo_DEFAULT *TGroupCommitInfo + +func (p *TMasterOpRequest) GetGroupCommitInfo() (v *TGroupCommitInfo) { + if !p.IsSetGroupCommitInfo() { + return TMasterOpRequest_GroupCommitInfo_DEFAULT } - if *p.DbId != *src { - return false + return p.GroupCommitInfo +} + +var TMasterOpRequest_CloudCluster_DEFAULT string + +func (p *TMasterOpRequest) GetCloudCluster() (v string) { + if !p.IsSetCloudCluster() { + return TMasterOpRequest_CloudCluster_DEFAULT } - return true + return *p.CloudCluster } -type TStreamLoadPutRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` - Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` - Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` - Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` - UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` - LoadId *types.TUniqueId `thrift:"loadId,7,required" frugal:"7,required,types.TUniqueId" json:"loadId"` - TxnId int64 `thrift:"txnId,8,required" frugal:"8,required,i64" json:"txnId"` - FileType types.TFileType `thrift:"fileType,9,required" frugal:"9,required,TFileType" json:"fileType"` - FormatType plannodes.TFileFormatType `thrift:"formatType,10,required" frugal:"10,required,TFileFormatType" json:"formatType"` - Path *string `thrift:"path,11,optional" frugal:"11,optional,string" json:"path,omitempty"` - Columns *string `thrift:"columns,12,optional" frugal:"12,optional,string" json:"columns,omitempty"` - Where *string `thrift:"where,13,optional" frugal:"13,optional,string" json:"where,omitempty"` - ColumnSeparator *string `thrift:"columnSeparator,14,optional" frugal:"14,optional,string" json:"columnSeparator,omitempty"` - Partitions *string `thrift:"partitions,15,optional" frugal:"15,optional,string" json:"partitions,omitempty"` - AuthCode *int64 `thrift:"auth_code,16,optional" frugal:"16,optional,i64" json:"auth_code,omitempty"` - Negative *bool `thrift:"negative,17,optional" frugal:"17,optional,bool" json:"negative,omitempty"` - Timeout *int32 `thrift:"timeout,18,optional" frugal:"18,optional,i32" json:"timeout,omitempty"` - StrictMode *bool `thrift:"strictMode,19,optional" frugal:"19,optional,bool" json:"strictMode,omitempty"` - Timezone *string `thrift:"timezone,20,optional" frugal:"20,optional,string" json:"timezone,omitempty"` - ExecMemLimit *int64 `thrift:"execMemLimit,21,optional" frugal:"21,optional,i64" json:"execMemLimit,omitempty"` - IsTempPartition *bool `thrift:"isTempPartition,22,optional" frugal:"22,optional,bool" json:"isTempPartition,omitempty"` - StripOuterArray *bool `thrift:"strip_outer_array,23,optional" frugal:"23,optional,bool" json:"strip_outer_array,omitempty"` - Jsonpaths *string `thrift:"jsonpaths,24,optional" frugal:"24,optional,string" json:"jsonpaths,omitempty"` - ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,25,optional" frugal:"25,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` - JsonRoot *string `thrift:"json_root,26,optional" frugal:"26,optional,string" json:"json_root,omitempty"` - MergeType *types.TMergeType `thrift:"merge_type,27,optional" frugal:"27,optional,TMergeType" json:"merge_type,omitempty"` - DeleteCondition *string `thrift:"delete_condition,28,optional" frugal:"28,optional,string" json:"delete_condition,omitempty"` - SequenceCol *string `thrift:"sequence_col,29,optional" frugal:"29,optional,string" json:"sequence_col,omitempty"` - NumAsString *bool `thrift:"num_as_string,30,optional" frugal:"30,optional,bool" json:"num_as_string,omitempty"` - FuzzyParse *bool `thrift:"fuzzy_parse,31,optional" frugal:"31,optional,bool" json:"fuzzy_parse,omitempty"` - LineDelimiter *string `thrift:"line_delimiter,32,optional" frugal:"32,optional,string" json:"line_delimiter,omitempty"` - ReadJsonByLine *bool `thrift:"read_json_by_line,33,optional" frugal:"33,optional,bool" json:"read_json_by_line,omitempty"` - Token *string `thrift:"token,34,optional" frugal:"34,optional,string" json:"token,omitempty"` - SendBatchParallelism *int32 `thrift:"send_batch_parallelism,35,optional" frugal:"35,optional,i32" json:"send_batch_parallelism,omitempty"` - MaxFilterRatio *float64 `thrift:"max_filter_ratio,36,optional" frugal:"36,optional,double" json:"max_filter_ratio,omitempty"` - LoadToSingleTablet *bool `thrift:"load_to_single_tablet,37,optional" frugal:"37,optional,bool" json:"load_to_single_tablet,omitempty"` - HeaderType *string `thrift:"header_type,38,optional" frugal:"38,optional,string" json:"header_type,omitempty"` - HiddenColumns *string `thrift:"hidden_columns,39,optional" frugal:"39,optional,string" json:"hidden_columns,omitempty"` - CompressType *plannodes.TFileCompressType `thrift:"compress_type,40,optional" frugal:"40,optional,TFileCompressType" json:"compress_type,omitempty"` - FileSize *int64 `thrift:"file_size,41,optional" frugal:"41,optional,i64" json:"file_size,omitempty"` - TrimDoubleQuotes *bool `thrift:"trim_double_quotes,42,optional" frugal:"42,optional,bool" json:"trim_double_quotes,omitempty"` - SkipLines *int32 `thrift:"skip_lines,43,optional" frugal:"43,optional,i32" json:"skip_lines,omitempty"` - EnableProfile *bool `thrift:"enable_profile,44,optional" frugal:"44,optional,bool" json:"enable_profile,omitempty"` - PartialUpdate *bool `thrift:"partial_update,45,optional" frugal:"45,optional,bool" json:"partial_update,omitempty"` - TableNames []string `thrift:"table_names,46,optional" frugal:"46,optional,list" json:"table_names,omitempty"` - LoadSql *string `thrift:"load_sql,47,optional" frugal:"47,optional,string" json:"load_sql,omitempty"` - BackendId *int64 `thrift:"backend_id,48,optional" frugal:"48,optional,i64" json:"backend_id,omitempty"` - Version *int32 `thrift:"version,49,optional" frugal:"49,optional,i32" json:"version,omitempty"` - Label *string `thrift:"label,50,optional" frugal:"50,optional,string" json:"label,omitempty"` - Enclose *int8 `thrift:"enclose,51,optional" frugal:"51,optional,i8" json:"enclose,omitempty"` - Escape *int8 `thrift:"escape,52,optional" frugal:"52,optional,i8" json:"escape,omitempty"` - MemtableOnSinkNode *bool `thrift:"memtable_on_sink_node,53,optional" frugal:"53,optional,bool" json:"memtable_on_sink_node,omitempty"` -} +var TMasterOpRequest_NoAuth_DEFAULT bool -func NewTStreamLoadPutRequest() *TStreamLoadPutRequest { - return &TStreamLoadPutRequest{} +func (p *TMasterOpRequest) GetNoAuth() (v bool) { + if !p.IsSetNoAuth() { + return TMasterOpRequest_NoAuth_DEFAULT + } + return *p.NoAuth } - -func (p *TStreamLoadPutRequest) InitDefault() { - *p = TStreamLoadPutRequest{} +func (p *TMasterOpRequest) SetUser(val string) { + p.User = val } - -var TStreamLoadPutRequest_Cluster_DEFAULT string - -func (p *TStreamLoadPutRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TStreamLoadPutRequest_Cluster_DEFAULT - } - return *p.Cluster +func (p *TMasterOpRequest) SetDb(val string) { + p.Db = val } - -func (p *TStreamLoadPutRequest) GetUser() (v string) { - return p.User +func (p *TMasterOpRequest) SetSql(val string) { + p.Sql = val } - -func (p *TStreamLoadPutRequest) GetPasswd() (v string) { - return p.Passwd +func (p *TMasterOpRequest) SetResourceInfo(val *types.TResourceInfo) { + p.ResourceInfo = val } - -func (p *TStreamLoadPutRequest) GetDb() (v string) { - return p.Db +func (p *TMasterOpRequest) SetCluster(val *string) { + p.Cluster = val } - -func (p *TStreamLoadPutRequest) GetTbl() (v string) { - return p.Tbl +func (p *TMasterOpRequest) SetExecMemLimit(val *int64) { + p.ExecMemLimit = val } - -var TStreamLoadPutRequest_UserIp_DEFAULT string - -func (p *TStreamLoadPutRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TStreamLoadPutRequest_UserIp_DEFAULT - } - return *p.UserIp +func (p *TMasterOpRequest) SetQueryTimeout(val *int32) { + p.QueryTimeout = val } - -var TStreamLoadPutRequest_LoadId_DEFAULT *types.TUniqueId - -func (p *TStreamLoadPutRequest) GetLoadId() (v *types.TUniqueId) { - if !p.IsSetLoadId() { - return TStreamLoadPutRequest_LoadId_DEFAULT - } - return p.LoadId +func (p *TMasterOpRequest) SetUserIp(val *string) { + p.UserIp = val } - -func (p *TStreamLoadPutRequest) GetTxnId() (v int64) { - return p.TxnId +func (p *TMasterOpRequest) SetTimeZone(val *string) { + p.TimeZone = val } - -func (p *TStreamLoadPutRequest) GetFileType() (v types.TFileType) { - return p.FileType +func (p *TMasterOpRequest) SetStmtId(val *int64) { + p.StmtId = val } - -func (p *TStreamLoadPutRequest) GetFormatType() (v plannodes.TFileFormatType) { - return p.FormatType +func (p *TMasterOpRequest) SetSqlMode(val *int64) { + p.SqlMode = val } - -var TStreamLoadPutRequest_Path_DEFAULT string - -func (p *TStreamLoadPutRequest) GetPath() (v string) { - if !p.IsSetPath() { - return TStreamLoadPutRequest_Path_DEFAULT - } - return *p.Path +func (p *TMasterOpRequest) SetLoadMemLimit(val *int64) { + p.LoadMemLimit = val } - -var TStreamLoadPutRequest_Columns_DEFAULT string - -func (p *TStreamLoadPutRequest) GetColumns() (v string) { - if !p.IsSetColumns() { - return TStreamLoadPutRequest_Columns_DEFAULT - } - return *p.Columns +func (p *TMasterOpRequest) SetEnableStrictMode(val *bool) { + p.EnableStrictMode = val } - -var TStreamLoadPutRequest_Where_DEFAULT string - -func (p *TStreamLoadPutRequest) GetWhere() (v string) { - if !p.IsSetWhere() { - return TStreamLoadPutRequest_Where_DEFAULT - } - return *p.Where +func (p *TMasterOpRequest) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val } - -var TStreamLoadPutRequest_ColumnSeparator_DEFAULT string - -func (p *TStreamLoadPutRequest) GetColumnSeparator() (v string) { - if !p.IsSetColumnSeparator() { - return TStreamLoadPutRequest_ColumnSeparator_DEFAULT - } - return *p.ColumnSeparator +func (p *TMasterOpRequest) SetStmtIdx(val *int32) { + p.StmtIdx = val } - -var TStreamLoadPutRequest_Partitions_DEFAULT string - -func (p *TStreamLoadPutRequest) GetPartitions() (v string) { - if !p.IsSetPartitions() { - return TStreamLoadPutRequest_Partitions_DEFAULT - } - return *p.Partitions +func (p *TMasterOpRequest) SetQueryOptions(val *palointernalservice.TQueryOptions) { + p.QueryOptions = val } - -var TStreamLoadPutRequest_AuthCode_DEFAULT int64 - -func (p *TStreamLoadPutRequest) GetAuthCode() (v int64) { - if !p.IsSetAuthCode() { - return TStreamLoadPutRequest_AuthCode_DEFAULT - } - return *p.AuthCode +func (p *TMasterOpRequest) SetQueryId(val *types.TUniqueId) { + p.QueryId = val } - -var TStreamLoadPutRequest_Negative_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetNegative() (v bool) { - if !p.IsSetNegative() { - return TStreamLoadPutRequest_Negative_DEFAULT - } - return *p.Negative +func (p *TMasterOpRequest) SetInsertVisibleTimeoutMs(val *int64) { + p.InsertVisibleTimeoutMs = val } - -var TStreamLoadPutRequest_Timeout_DEFAULT int32 - -func (p *TStreamLoadPutRequest) GetTimeout() (v int32) { - if !p.IsSetTimeout() { - return TStreamLoadPutRequest_Timeout_DEFAULT - } - return *p.Timeout +func (p *TMasterOpRequest) SetSessionVariables(val map[string]string) { + p.SessionVariables = val } - -var TStreamLoadPutRequest_StrictMode_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetStrictMode() (v bool) { - if !p.IsSetStrictMode() { - return TStreamLoadPutRequest_StrictMode_DEFAULT - } - return *p.StrictMode +func (p *TMasterOpRequest) SetFoldConstantByBe(val *bool) { + p.FoldConstantByBe = val } - -var TStreamLoadPutRequest_Timezone_DEFAULT string - -func (p *TStreamLoadPutRequest) GetTimezone() (v string) { - if !p.IsSetTimezone() { - return TStreamLoadPutRequest_Timezone_DEFAULT - } - return *p.Timezone +func (p *TMasterOpRequest) SetTraceCarrier(val map[string]string) { + p.TraceCarrier = val } - -var TStreamLoadPutRequest_ExecMemLimit_DEFAULT int64 - -func (p *TStreamLoadPutRequest) GetExecMemLimit() (v int64) { - if !p.IsSetExecMemLimit() { - return TStreamLoadPutRequest_ExecMemLimit_DEFAULT - } - return *p.ExecMemLimit +func (p *TMasterOpRequest) SetClientNodeHost(val *string) { + p.ClientNodeHost = val } - -var TStreamLoadPutRequest_IsTempPartition_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetIsTempPartition() (v bool) { - if !p.IsSetIsTempPartition() { - return TStreamLoadPutRequest_IsTempPartition_DEFAULT - } - return *p.IsTempPartition +func (p *TMasterOpRequest) SetClientNodePort(val *int32) { + p.ClientNodePort = val } - -var TStreamLoadPutRequest_StripOuterArray_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetStripOuterArray() (v bool) { - if !p.IsSetStripOuterArray() { - return TStreamLoadPutRequest_StripOuterArray_DEFAULT - } - return *p.StripOuterArray +func (p *TMasterOpRequest) SetSyncJournalOnly(val *bool) { + p.SyncJournalOnly = val } - -var TStreamLoadPutRequest_Jsonpaths_DEFAULT string - -func (p *TStreamLoadPutRequest) GetJsonpaths() (v string) { - if !p.IsSetJsonpaths() { - return TStreamLoadPutRequest_Jsonpaths_DEFAULT - } - return *p.Jsonpaths +func (p *TMasterOpRequest) SetDefaultCatalog(val *string) { + p.DefaultCatalog = val } - -var TStreamLoadPutRequest_ThriftRpcTimeoutMs_DEFAULT int64 - -func (p *TStreamLoadPutRequest) GetThriftRpcTimeoutMs() (v int64) { - if !p.IsSetThriftRpcTimeoutMs() { - return TStreamLoadPutRequest_ThriftRpcTimeoutMs_DEFAULT - } - return *p.ThriftRpcTimeoutMs +func (p *TMasterOpRequest) SetDefaultDatabase(val *string) { + p.DefaultDatabase = val } - -var TStreamLoadPutRequest_JsonRoot_DEFAULT string - -func (p *TStreamLoadPutRequest) GetJsonRoot() (v string) { - if !p.IsSetJsonRoot() { - return TStreamLoadPutRequest_JsonRoot_DEFAULT - } - return *p.JsonRoot +func (p *TMasterOpRequest) SetCancelQeury(val *bool) { + p.CancelQeury = val } - -var TStreamLoadPutRequest_MergeType_DEFAULT types.TMergeType - -func (p *TStreamLoadPutRequest) GetMergeType() (v types.TMergeType) { - if !p.IsSetMergeType() { - return TStreamLoadPutRequest_MergeType_DEFAULT - } - return *p.MergeType +func (p *TMasterOpRequest) SetUserVariables(val map[string]*exprs.TExprNode) { + p.UserVariables = val } - -var TStreamLoadPutRequest_DeleteCondition_DEFAULT string - -func (p *TStreamLoadPutRequest) GetDeleteCondition() (v string) { - if !p.IsSetDeleteCondition() { - return TStreamLoadPutRequest_DeleteCondition_DEFAULT - } - return *p.DeleteCondition +func (p *TMasterOpRequest) SetTxnLoadInfo(val *TTxnLoadInfo) { + p.TxnLoadInfo = val } - -var TStreamLoadPutRequest_SequenceCol_DEFAULT string - -func (p *TStreamLoadPutRequest) GetSequenceCol() (v string) { - if !p.IsSetSequenceCol() { - return TStreamLoadPutRequest_SequenceCol_DEFAULT - } - return *p.SequenceCol +func (p *TMasterOpRequest) SetGroupCommitInfo(val *TGroupCommitInfo) { + p.GroupCommitInfo = val } - -var TStreamLoadPutRequest_NumAsString_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetNumAsString() (v bool) { - if !p.IsSetNumAsString() { - return TStreamLoadPutRequest_NumAsString_DEFAULT - } - return *p.NumAsString +func (p *TMasterOpRequest) SetCloudCluster(val *string) { + p.CloudCluster = val } - -var TStreamLoadPutRequest_FuzzyParse_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetFuzzyParse() (v bool) { - if !p.IsSetFuzzyParse() { - return TStreamLoadPutRequest_FuzzyParse_DEFAULT - } - return *p.FuzzyParse +func (p *TMasterOpRequest) SetNoAuth(val *bool) { + p.NoAuth = val } -var TStreamLoadPutRequest_LineDelimiter_DEFAULT string - -func (p *TStreamLoadPutRequest) GetLineDelimiter() (v string) { - if !p.IsSetLineDelimiter() { - return TStreamLoadPutRequest_LineDelimiter_DEFAULT - } - return *p.LineDelimiter +var fieldIDToName_TMasterOpRequest = map[int16]string{ + 1: "user", + 2: "db", + 3: "sql", + 4: "resourceInfo", + 5: "cluster", + 6: "execMemLimit", + 7: "queryTimeout", + 8: "user_ip", + 9: "time_zone", + 10: "stmt_id", + 11: "sqlMode", + 12: "loadMemLimit", + 13: "enableStrictMode", + 14: "current_user_ident", + 15: "stmtIdx", + 16: "query_options", + 17: "query_id", + 18: "insert_visible_timeout_ms", + 19: "session_variables", + 20: "foldConstantByBe", + 21: "trace_carrier", + 22: "clientNodeHost", + 23: "clientNodePort", + 24: "syncJournalOnly", + 25: "defaultCatalog", + 26: "defaultDatabase", + 27: "cancel_qeury", + 28: "user_variables", + 29: "txnLoadInfo", + 30: "groupCommitInfo", + 1000: "cloud_cluster", + 1001: "noAuth", } -var TStreamLoadPutRequest_ReadJsonByLine_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetReadJsonByLine() (v bool) { - if !p.IsSetReadJsonByLine() { - return TStreamLoadPutRequest_ReadJsonByLine_DEFAULT - } - return *p.ReadJsonByLine +func (p *TMasterOpRequest) IsSetResourceInfo() bool { + return p.ResourceInfo != nil } -var TStreamLoadPutRequest_Token_DEFAULT string - -func (p *TStreamLoadPutRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TStreamLoadPutRequest_Token_DEFAULT - } - return *p.Token +func (p *TMasterOpRequest) IsSetCluster() bool { + return p.Cluster != nil } -var TStreamLoadPutRequest_SendBatchParallelism_DEFAULT int32 - -func (p *TStreamLoadPutRequest) GetSendBatchParallelism() (v int32) { - if !p.IsSetSendBatchParallelism() { - return TStreamLoadPutRequest_SendBatchParallelism_DEFAULT - } - return *p.SendBatchParallelism +func (p *TMasterOpRequest) IsSetExecMemLimit() bool { + return p.ExecMemLimit != nil } -var TStreamLoadPutRequest_MaxFilterRatio_DEFAULT float64 - -func (p *TStreamLoadPutRequest) GetMaxFilterRatio() (v float64) { - if !p.IsSetMaxFilterRatio() { - return TStreamLoadPutRequest_MaxFilterRatio_DEFAULT - } - return *p.MaxFilterRatio +func (p *TMasterOpRequest) IsSetQueryTimeout() bool { + return p.QueryTimeout != nil } -var TStreamLoadPutRequest_LoadToSingleTablet_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetLoadToSingleTablet() (v bool) { - if !p.IsSetLoadToSingleTablet() { - return TStreamLoadPutRequest_LoadToSingleTablet_DEFAULT - } - return *p.LoadToSingleTablet +func (p *TMasterOpRequest) IsSetUserIp() bool { + return p.UserIp != nil } -var TStreamLoadPutRequest_HeaderType_DEFAULT string - -func (p *TStreamLoadPutRequest) GetHeaderType() (v string) { - if !p.IsSetHeaderType() { - return TStreamLoadPutRequest_HeaderType_DEFAULT - } - return *p.HeaderType +func (p *TMasterOpRequest) IsSetTimeZone() bool { + return p.TimeZone != nil } -var TStreamLoadPutRequest_HiddenColumns_DEFAULT string - -func (p *TStreamLoadPutRequest) GetHiddenColumns() (v string) { - if !p.IsSetHiddenColumns() { - return TStreamLoadPutRequest_HiddenColumns_DEFAULT - } - return *p.HiddenColumns +func (p *TMasterOpRequest) IsSetStmtId() bool { + return p.StmtId != nil } -var TStreamLoadPutRequest_CompressType_DEFAULT plannodes.TFileCompressType - -func (p *TStreamLoadPutRequest) GetCompressType() (v plannodes.TFileCompressType) { - if !p.IsSetCompressType() { - return TStreamLoadPutRequest_CompressType_DEFAULT - } - return *p.CompressType +func (p *TMasterOpRequest) IsSetSqlMode() bool { + return p.SqlMode != nil } -var TStreamLoadPutRequest_FileSize_DEFAULT int64 - -func (p *TStreamLoadPutRequest) GetFileSize() (v int64) { - if !p.IsSetFileSize() { - return TStreamLoadPutRequest_FileSize_DEFAULT - } - return *p.FileSize +func (p *TMasterOpRequest) IsSetLoadMemLimit() bool { + return p.LoadMemLimit != nil } -var TStreamLoadPutRequest_TrimDoubleQuotes_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetTrimDoubleQuotes() (v bool) { - if !p.IsSetTrimDoubleQuotes() { - return TStreamLoadPutRequest_TrimDoubleQuotes_DEFAULT - } - return *p.TrimDoubleQuotes +func (p *TMasterOpRequest) IsSetEnableStrictMode() bool { + return p.EnableStrictMode != nil } -var TStreamLoadPutRequest_SkipLines_DEFAULT int32 - -func (p *TStreamLoadPutRequest) GetSkipLines() (v int32) { - if !p.IsSetSkipLines() { - return TStreamLoadPutRequest_SkipLines_DEFAULT - } - return *p.SkipLines +func (p *TMasterOpRequest) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil } -var TStreamLoadPutRequest_EnableProfile_DEFAULT bool +func (p *TMasterOpRequest) IsSetStmtIdx() bool { + return p.StmtIdx != nil +} -func (p *TStreamLoadPutRequest) GetEnableProfile() (v bool) { - if !p.IsSetEnableProfile() { - return TStreamLoadPutRequest_EnableProfile_DEFAULT - } - return *p.EnableProfile +func (p *TMasterOpRequest) IsSetQueryOptions() bool { + return p.QueryOptions != nil } -var TStreamLoadPutRequest_PartialUpdate_DEFAULT bool +func (p *TMasterOpRequest) IsSetQueryId() bool { + return p.QueryId != nil +} -func (p *TStreamLoadPutRequest) GetPartialUpdate() (v bool) { - if !p.IsSetPartialUpdate() { - return TStreamLoadPutRequest_PartialUpdate_DEFAULT - } - return *p.PartialUpdate +func (p *TMasterOpRequest) IsSetInsertVisibleTimeoutMs() bool { + return p.InsertVisibleTimeoutMs != nil } -var TStreamLoadPutRequest_TableNames_DEFAULT []string +func (p *TMasterOpRequest) IsSetSessionVariables() bool { + return p.SessionVariables != nil +} -func (p *TStreamLoadPutRequest) GetTableNames() (v []string) { - if !p.IsSetTableNames() { - return TStreamLoadPutRequest_TableNames_DEFAULT - } - return p.TableNames +func (p *TMasterOpRequest) IsSetFoldConstantByBe() bool { + return p.FoldConstantByBe != nil } -var TStreamLoadPutRequest_LoadSql_DEFAULT string +func (p *TMasterOpRequest) IsSetTraceCarrier() bool { + return p.TraceCarrier != nil +} -func (p *TStreamLoadPutRequest) GetLoadSql() (v string) { - if !p.IsSetLoadSql() { - return TStreamLoadPutRequest_LoadSql_DEFAULT - } - return *p.LoadSql +func (p *TMasterOpRequest) IsSetClientNodeHost() bool { + return p.ClientNodeHost != nil } -var TStreamLoadPutRequest_BackendId_DEFAULT int64 +func (p *TMasterOpRequest) IsSetClientNodePort() bool { + return p.ClientNodePort != nil +} -func (p *TStreamLoadPutRequest) GetBackendId() (v int64) { - if !p.IsSetBackendId() { - return TStreamLoadPutRequest_BackendId_DEFAULT - } - return *p.BackendId +func (p *TMasterOpRequest) IsSetSyncJournalOnly() bool { + return p.SyncJournalOnly != nil } -var TStreamLoadPutRequest_Version_DEFAULT int32 +func (p *TMasterOpRequest) IsSetDefaultCatalog() bool { + return p.DefaultCatalog != nil +} -func (p *TStreamLoadPutRequest) GetVersion() (v int32) { - if !p.IsSetVersion() { - return TStreamLoadPutRequest_Version_DEFAULT - } - return *p.Version +func (p *TMasterOpRequest) IsSetDefaultDatabase() bool { + return p.DefaultDatabase != nil } -var TStreamLoadPutRequest_Label_DEFAULT string +func (p *TMasterOpRequest) IsSetCancelQeury() bool { + return p.CancelQeury != nil +} -func (p *TStreamLoadPutRequest) GetLabel() (v string) { - if !p.IsSetLabel() { - return TStreamLoadPutRequest_Label_DEFAULT - } - return *p.Label +func (p *TMasterOpRequest) IsSetUserVariables() bool { + return p.UserVariables != nil } -var TStreamLoadPutRequest_Enclose_DEFAULT int8 +func (p *TMasterOpRequest) IsSetTxnLoadInfo() bool { + return p.TxnLoadInfo != nil +} -func (p *TStreamLoadPutRequest) GetEnclose() (v int8) { - if !p.IsSetEnclose() { - return TStreamLoadPutRequest_Enclose_DEFAULT - } - return *p.Enclose +func (p *TMasterOpRequest) IsSetGroupCommitInfo() bool { + return p.GroupCommitInfo != nil } -var TStreamLoadPutRequest_Escape_DEFAULT int8 +func (p *TMasterOpRequest) IsSetCloudCluster() bool { + return p.CloudCluster != nil +} -func (p *TStreamLoadPutRequest) GetEscape() (v int8) { - if !p.IsSetEscape() { - return TStreamLoadPutRequest_Escape_DEFAULT - } - return *p.Escape +func (p *TMasterOpRequest) IsSetNoAuth() bool { + return p.NoAuth != nil } -var TStreamLoadPutRequest_MemtableOnSinkNode_DEFAULT bool - -func (p *TStreamLoadPutRequest) GetMemtableOnSinkNode() (v bool) { - if !p.IsSetMemtableOnSinkNode() { - return TStreamLoadPutRequest_MemtableOnSinkNode_DEFAULT - } - return *p.MemtableOnSinkNode -} -func (p *TStreamLoadPutRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TStreamLoadPutRequest) SetUser(val string) { - p.User = val -} -func (p *TStreamLoadPutRequest) SetPasswd(val string) { - p.Passwd = val -} -func (p *TStreamLoadPutRequest) SetDb(val string) { - p.Db = val -} -func (p *TStreamLoadPutRequest) SetTbl(val string) { - p.Tbl = val -} -func (p *TStreamLoadPutRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TStreamLoadPutRequest) SetLoadId(val *types.TUniqueId) { - p.LoadId = val -} -func (p *TStreamLoadPutRequest) SetTxnId(val int64) { - p.TxnId = val -} -func (p *TStreamLoadPutRequest) SetFileType(val types.TFileType) { - p.FileType = val -} -func (p *TStreamLoadPutRequest) SetFormatType(val plannodes.TFileFormatType) { - p.FormatType = val -} -func (p *TStreamLoadPutRequest) SetPath(val *string) { - p.Path = val -} -func (p *TStreamLoadPutRequest) SetColumns(val *string) { - p.Columns = val -} -func (p *TStreamLoadPutRequest) SetWhere(val *string) { - p.Where = val -} -func (p *TStreamLoadPutRequest) SetColumnSeparator(val *string) { - p.ColumnSeparator = val -} -func (p *TStreamLoadPutRequest) SetPartitions(val *string) { - p.Partitions = val -} -func (p *TStreamLoadPutRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TStreamLoadPutRequest) SetNegative(val *bool) { - p.Negative = val -} -func (p *TStreamLoadPutRequest) SetTimeout(val *int32) { - p.Timeout = val -} -func (p *TStreamLoadPutRequest) SetStrictMode(val *bool) { - p.StrictMode = val -} -func (p *TStreamLoadPutRequest) SetTimezone(val *string) { - p.Timezone = val -} -func (p *TStreamLoadPutRequest) SetExecMemLimit(val *int64) { - p.ExecMemLimit = val -} -func (p *TStreamLoadPutRequest) SetIsTempPartition(val *bool) { - p.IsTempPartition = val -} -func (p *TStreamLoadPutRequest) SetStripOuterArray(val *bool) { - p.StripOuterArray = val -} -func (p *TStreamLoadPutRequest) SetJsonpaths(val *string) { - p.Jsonpaths = val -} -func (p *TStreamLoadPutRequest) SetThriftRpcTimeoutMs(val *int64) { - p.ThriftRpcTimeoutMs = val -} -func (p *TStreamLoadPutRequest) SetJsonRoot(val *string) { - p.JsonRoot = val -} -func (p *TStreamLoadPutRequest) SetMergeType(val *types.TMergeType) { - p.MergeType = val -} -func (p *TStreamLoadPutRequest) SetDeleteCondition(val *string) { - p.DeleteCondition = val -} -func (p *TStreamLoadPutRequest) SetSequenceCol(val *string) { - p.SequenceCol = val -} -func (p *TStreamLoadPutRequest) SetNumAsString(val *bool) { - p.NumAsString = val -} -func (p *TStreamLoadPutRequest) SetFuzzyParse(val *bool) { - p.FuzzyParse = val -} -func (p *TStreamLoadPutRequest) SetLineDelimiter(val *string) { - p.LineDelimiter = val -} -func (p *TStreamLoadPutRequest) SetReadJsonByLine(val *bool) { - p.ReadJsonByLine = val -} -func (p *TStreamLoadPutRequest) SetToken(val *string) { - p.Token = val -} -func (p *TStreamLoadPutRequest) SetSendBatchParallelism(val *int32) { - p.SendBatchParallelism = val -} -func (p *TStreamLoadPutRequest) SetMaxFilterRatio(val *float64) { - p.MaxFilterRatio = val -} -func (p *TStreamLoadPutRequest) SetLoadToSingleTablet(val *bool) { - p.LoadToSingleTablet = val -} -func (p *TStreamLoadPutRequest) SetHeaderType(val *string) { - p.HeaderType = val -} -func (p *TStreamLoadPutRequest) SetHiddenColumns(val *string) { - p.HiddenColumns = val -} -func (p *TStreamLoadPutRequest) SetCompressType(val *plannodes.TFileCompressType) { - p.CompressType = val -} -func (p *TStreamLoadPutRequest) SetFileSize(val *int64) { - p.FileSize = val -} -func (p *TStreamLoadPutRequest) SetTrimDoubleQuotes(val *bool) { - p.TrimDoubleQuotes = val -} -func (p *TStreamLoadPutRequest) SetSkipLines(val *int32) { - p.SkipLines = val -} -func (p *TStreamLoadPutRequest) SetEnableProfile(val *bool) { - p.EnableProfile = val -} -func (p *TStreamLoadPutRequest) SetPartialUpdate(val *bool) { - p.PartialUpdate = val -} -func (p *TStreamLoadPutRequest) SetTableNames(val []string) { - p.TableNames = val -} -func (p *TStreamLoadPutRequest) SetLoadSql(val *string) { - p.LoadSql = val -} -func (p *TStreamLoadPutRequest) SetBackendId(val *int64) { - p.BackendId = val -} -func (p *TStreamLoadPutRequest) SetVersion(val *int32) { - p.Version = val -} -func (p *TStreamLoadPutRequest) SetLabel(val *string) { - p.Label = val -} -func (p *TStreamLoadPutRequest) SetEnclose(val *int8) { - p.Enclose = val -} -func (p *TStreamLoadPutRequest) SetEscape(val *int8) { - p.Escape = val -} -func (p *TStreamLoadPutRequest) SetMemtableOnSinkNode(val *bool) { - p.MemtableOnSinkNode = val -} - -var fieldIDToName_TStreamLoadPutRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "tbl", - 6: "user_ip", - 7: "loadId", - 8: "txnId", - 9: "fileType", - 10: "formatType", - 11: "path", - 12: "columns", - 13: "where", - 14: "columnSeparator", - 15: "partitions", - 16: "auth_code", - 17: "negative", - 18: "timeout", - 19: "strictMode", - 20: "timezone", - 21: "execMemLimit", - 22: "isTempPartition", - 23: "strip_outer_array", - 24: "jsonpaths", - 25: "thrift_rpc_timeout_ms", - 26: "json_root", - 27: "merge_type", - 28: "delete_condition", - 29: "sequence_col", - 30: "num_as_string", - 31: "fuzzy_parse", - 32: "line_delimiter", - 33: "read_json_by_line", - 34: "token", - 35: "send_batch_parallelism", - 36: "max_filter_ratio", - 37: "load_to_single_tablet", - 38: "header_type", - 39: "hidden_columns", - 40: "compress_type", - 41: "file_size", - 42: "trim_double_quotes", - 43: "skip_lines", - 44: "enable_profile", - 45: "partial_update", - 46: "table_names", - 47: "load_sql", - 48: "backend_id", - 49: "version", - 50: "label", - 51: "enclose", - 52: "escape", - 53: "memtable_on_sink_node", -} - -func (p *TStreamLoadPutRequest) IsSetCluster() bool { - return p.Cluster != nil -} - -func (p *TStreamLoadPutRequest) IsSetUserIp() bool { - return p.UserIp != nil -} - -func (p *TStreamLoadPutRequest) IsSetLoadId() bool { - return p.LoadId != nil -} - -func (p *TStreamLoadPutRequest) IsSetPath() bool { - return p.Path != nil -} - -func (p *TStreamLoadPutRequest) IsSetColumns() bool { - return p.Columns != nil -} - -func (p *TStreamLoadPutRequest) IsSetWhere() bool { - return p.Where != nil -} - -func (p *TStreamLoadPutRequest) IsSetColumnSeparator() bool { - return p.ColumnSeparator != nil -} - -func (p *TStreamLoadPutRequest) IsSetPartitions() bool { - return p.Partitions != nil -} - -func (p *TStreamLoadPutRequest) IsSetAuthCode() bool { - return p.AuthCode != nil -} - -func (p *TStreamLoadPutRequest) IsSetNegative() bool { - return p.Negative != nil -} - -func (p *TStreamLoadPutRequest) IsSetTimeout() bool { - return p.Timeout != nil -} - -func (p *TStreamLoadPutRequest) IsSetStrictMode() bool { - return p.StrictMode != nil -} - -func (p *TStreamLoadPutRequest) IsSetTimezone() bool { - return p.Timezone != nil -} - -func (p *TStreamLoadPutRequest) IsSetExecMemLimit() bool { - return p.ExecMemLimit != nil -} - -func (p *TStreamLoadPutRequest) IsSetIsTempPartition() bool { - return p.IsTempPartition != nil -} - -func (p *TStreamLoadPutRequest) IsSetStripOuterArray() bool { - return p.StripOuterArray != nil -} - -func (p *TStreamLoadPutRequest) IsSetJsonpaths() bool { - return p.Jsonpaths != nil -} - -func (p *TStreamLoadPutRequest) IsSetThriftRpcTimeoutMs() bool { - return p.ThriftRpcTimeoutMs != nil -} - -func (p *TStreamLoadPutRequest) IsSetJsonRoot() bool { - return p.JsonRoot != nil -} - -func (p *TStreamLoadPutRequest) IsSetMergeType() bool { - return p.MergeType != nil -} - -func (p *TStreamLoadPutRequest) IsSetDeleteCondition() bool { - return p.DeleteCondition != nil -} - -func (p *TStreamLoadPutRequest) IsSetSequenceCol() bool { - return p.SequenceCol != nil -} - -func (p *TStreamLoadPutRequest) IsSetNumAsString() bool { - return p.NumAsString != nil -} - -func (p *TStreamLoadPutRequest) IsSetFuzzyParse() bool { - return p.FuzzyParse != nil -} - -func (p *TStreamLoadPutRequest) IsSetLineDelimiter() bool { - return p.LineDelimiter != nil -} - -func (p *TStreamLoadPutRequest) IsSetReadJsonByLine() bool { - return p.ReadJsonByLine != nil -} - -func (p *TStreamLoadPutRequest) IsSetToken() bool { - return p.Token != nil -} - -func (p *TStreamLoadPutRequest) IsSetSendBatchParallelism() bool { - return p.SendBatchParallelism != nil -} - -func (p *TStreamLoadPutRequest) IsSetMaxFilterRatio() bool { - return p.MaxFilterRatio != nil -} - -func (p *TStreamLoadPutRequest) IsSetLoadToSingleTablet() bool { - return p.LoadToSingleTablet != nil -} - -func (p *TStreamLoadPutRequest) IsSetHeaderType() bool { - return p.HeaderType != nil -} - -func (p *TStreamLoadPutRequest) IsSetHiddenColumns() bool { - return p.HiddenColumns != nil -} - -func (p *TStreamLoadPutRequest) IsSetCompressType() bool { - return p.CompressType != nil -} - -func (p *TStreamLoadPutRequest) IsSetFileSize() bool { - return p.FileSize != nil -} - -func (p *TStreamLoadPutRequest) IsSetTrimDoubleQuotes() bool { - return p.TrimDoubleQuotes != nil -} - -func (p *TStreamLoadPutRequest) IsSetSkipLines() bool { - return p.SkipLines != nil -} - -func (p *TStreamLoadPutRequest) IsSetEnableProfile() bool { - return p.EnableProfile != nil -} - -func (p *TStreamLoadPutRequest) IsSetPartialUpdate() bool { - return p.PartialUpdate != nil -} - -func (p *TStreamLoadPutRequest) IsSetTableNames() bool { - return p.TableNames != nil -} - -func (p *TStreamLoadPutRequest) IsSetLoadSql() bool { - return p.LoadSql != nil -} - -func (p *TStreamLoadPutRequest) IsSetBackendId() bool { - return p.BackendId != nil -} - -func (p *TStreamLoadPutRequest) IsSetVersion() bool { - return p.Version != nil -} - -func (p *TStreamLoadPutRequest) IsSetLabel() bool { - return p.Label != nil -} - -func (p *TStreamLoadPutRequest) IsSetEnclose() bool { - return p.Enclose != nil -} - -func (p *TStreamLoadPutRequest) IsSetEscape() bool { - return p.Escape != nil -} - -func (p *TStreamLoadPutRequest) IsSetMemtableOnSinkNode() bool { - return p.MemtableOnSinkNode != nil -} - -func (p *TStreamLoadPutRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TMasterOpRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 var issetUser bool = false - var issetPasswd bool = false var issetDb bool = false - var issetTbl bool = false - var issetLoadId bool = false - var issetTxnId bool = false - var issetFileType bool = false - var issetFormatType bool = false + var issetSql bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -23210,545 +21542,265 @@ func (p *TStreamLoadPutRequest) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetDb = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetSql = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - issetTbl = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - issetLoadId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - issetTxnId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - issetFileType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - issetFormatType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.MAP { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.MAP { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 22: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRING { if err = p.ReadField22(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 23: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I32 { if err = p.ReadField23(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 24: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 25: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField25(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 26: if fieldTypeId == thrift.STRING { if err = p.ReadField26(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 27: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField27(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 28: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.MAP { if err = p.ReadField28(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 29: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField29(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 30: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField30(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 31: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField31(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 32: - if fieldTypeId == thrift.STRING { - if err = p.ReadField32(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 33: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField33(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 34: - if fieldTypeId == thrift.STRING { - if err = p.ReadField34(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 35: - if fieldTypeId == thrift.I32 { - if err = p.ReadField35(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 36: - if fieldTypeId == thrift.DOUBLE { - if err = p.ReadField36(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 37: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField37(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 38: - if fieldTypeId == thrift.STRING { - if err = p.ReadField38(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 39: - if fieldTypeId == thrift.STRING { - if err = p.ReadField39(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 40: - if fieldTypeId == thrift.I32 { - if err = p.ReadField40(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 41: - if fieldTypeId == thrift.I64 { - if err = p.ReadField41(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 42: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField42(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 43: - if fieldTypeId == thrift.I32 { - if err = p.ReadField43(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 44: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField44(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 45: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField45(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 46: - if fieldTypeId == thrift.LIST { - if err = p.ReadField46(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 47: - if fieldTypeId == thrift.STRING { - if err = p.ReadField47(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 48: - if fieldTypeId == thrift.I64 { - if err = p.ReadField48(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 49: - if fieldTypeId == thrift.I32 { - if err = p.ReadField49(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 50: + case 1000: if fieldTypeId == thrift.STRING { - if err = p.ReadField50(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 51: - if fieldTypeId == thrift.BYTE { - if err = p.ReadField51(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 52: - if fieldTypeId == thrift.BYTE { - if err = p.ReadField52(iprot); err != nil { + if err = p.ReadField1000(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 53: + case 1001: if fieldTypeId == thrift.BOOL { - if err = p.ReadField53(iprot); err != nil { + if err = p.ReadField1001(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -23758,42 +21810,17 @@ func (p *TStreamLoadPutRequest) Read(iprot thrift.TProtocol) (err error) { } if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 + fieldId = 1 goto RequiredFieldNotSetError } if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetLoadId { - fieldId = 7 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 8 - goto RequiredFieldNotSetError - } - - if !issetFileType { - fieldId = 9 + fieldId = 2 goto RequiredFieldNotSetError } - if !issetFormatType { - fieldId = 10 + if !issetSql { + fieldId = 3 goto RequiredFieldNotSetError } return nil @@ -23802,7 +21829,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -23811,504 +21838,402 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutRequest[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpRequest[fieldId])) } -func (p *TStreamLoadPutRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} +func (p *TMasterOpRequest) ReadField1(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField2(iprot thrift.TProtocol) error { + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } +func (p *TMasterOpRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField3(iprot thrift.TProtocol) error { + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = v } + p.Db = _field return nil } +func (p *TMasterOpRequest) ReadField3(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField4(iprot thrift.TProtocol) error { + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = v } + p.Sql = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TMasterOpRequest) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTResourceInfo() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Tbl = v } + p.ResourceInfo = _field return nil } +func (p *TMasterOpRequest) ReadField5(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField6(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField7(iprot thrift.TProtocol) error { - p.LoadId = types.NewTUniqueId() - if err := p.LoadId.Read(iprot); err != nil { - return err + _field = &v } + p.Cluster = _field return nil } +func (p *TMasterOpRequest) ReadField6(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField8(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnId = v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.FileType = types.TFileType(v) + _field = &v } + p.ExecMemLimit = _field return nil } +func (p *TMasterOpRequest) ReadField7(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField10(iprot thrift.TProtocol) error { + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FormatType = plannodes.TFileFormatType(v) - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Path = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Columns = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Where = &v + _field = &v } + p.QueryTimeout = _field return nil } +func (p *TMasterOpRequest) ReadField8(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField14(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnSeparator = &v + _field = &v } + p.UserIp = _field return nil } +func (p *TMasterOpRequest) ReadField9(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField15(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Partitions = &v + _field = &v } + p.TimeZone = _field return nil } +func (p *TMasterOpRequest) ReadField10(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField16(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.AuthCode = &v + _field = &v } + p.StmtId = _field return nil } +func (p *TMasterOpRequest) ReadField11(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField17(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Negative = &v + _field = &v } + p.SqlMode = _field return nil } +func (p *TMasterOpRequest) ReadField12(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField18(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Timeout = &v + _field = &v } + p.LoadMemLimit = _field return nil } +func (p *TMasterOpRequest) ReadField13(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField19(iprot thrift.TProtocol) error { + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.StrictMode = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField20(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Timezone = &v + _field = &v } + p.EnableStrictMode = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField21(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TMasterOpRequest) ReadField14(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err - } else { - p.ExecMemLimit = &v } + p.CurrentUserIdent = _field return nil } +func (p *TMasterOpRequest) ReadField15(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField22(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.IsTempPartition = &v + _field = &v } + p.StmtIdx = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField23(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TMasterOpRequest) ReadField16(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTQueryOptions() + if err := _field.Read(iprot); err != nil { return err - } else { - p.StripOuterArray = &v } + p.QueryOptions = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField24(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TMasterOpRequest) ReadField17(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Jsonpaths = &v } + p.QueryId = _field return nil } +func (p *TMasterOpRequest) ReadField18(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField25(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ThriftRpcTimeoutMs = &v + _field = &v } + p.InsertVisibleTimeoutMs = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField26(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TMasterOpRequest) ReadField19(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { return err - } else { - p.JsonRoot = &v } - return nil -} + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } -func (p *TStreamLoadPutRequest) ReadField27(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := types.TMergeType(v) - p.MergeType = &tmp - } - return nil -} + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } -func (p *TStreamLoadPutRequest) ReadField28(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.DeleteCondition = &v + _field[_key] = _val } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField29(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.SequenceCol = &v } + p.SessionVariables = _field return nil } +func (p *TMasterOpRequest) ReadField20(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField30(iprot thrift.TProtocol) error { + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NumAsString = &v + _field = &v } + p.FoldConstantByBe = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField31(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TMasterOpRequest) ReadField21(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { return err - } else { - p.FuzzyParse = &v } - return nil -} + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } -func (p *TStreamLoadPutRequest) ReadField32(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.LineDelimiter = &v - } - return nil -} + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } -func (p *TStreamLoadPutRequest) ReadField33(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.ReadJsonByLine = &v } + p.TraceCarrier = _field return nil } +func (p *TMasterOpRequest) ReadField22(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField34(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Token = &v + _field = &v } + p.ClientNodeHost = _field return nil } +func (p *TMasterOpRequest) ReadField23(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField35(iprot thrift.TProtocol) error { + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SendBatchParallelism = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField36(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return err - } else { - p.MaxFilterRatio = &v + _field = &v } + p.ClientNodePort = _field return nil } +func (p *TMasterOpRequest) ReadField24(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField37(iprot thrift.TProtocol) error { + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.LoadToSingleTablet = &v + _field = &v } + p.SyncJournalOnly = _field return nil } +func (p *TMasterOpRequest) ReadField25(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField38(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HeaderType = &v + _field = &v } + p.DefaultCatalog = _field return nil } +func (p *TMasterOpRequest) ReadField26(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField39(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HiddenColumns = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField40(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := plannodes.TFileCompressType(v) - p.CompressType = &tmp - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField41(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.FileSize = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField42(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.TrimDoubleQuotes = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField43(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.SkipLines = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField44(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableProfile = &v + _field = &v } + p.DefaultDatabase = _field return nil } +func (p *TMasterOpRequest) ReadField27(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField45(iprot thrift.TProtocol) error { + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.PartialUpdate = &v + _field = &v } + p.CancelQeury = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField46(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *TMasterOpRequest) ReadField28(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.TableNames = make([]string, 0, size) + _field := make(map[string]*exprs.TExprNode, size) + values := make([]exprs.TExprNode, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, err := iprot.ReadString(); err != nil { return err } else { - _elem = v + _key = v } - p.TableNames = append(p.TableNames, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } -func (p *TStreamLoadPutRequest) ReadField47(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.LoadSql = &v } + p.UserVariables = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField48(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TMasterOpRequest) ReadField29(iprot thrift.TProtocol) error { + _field := NewTTxnLoadInfo() + if err := _field.Read(iprot); err != nil { return err - } else { - p.BackendId = &v } + p.TxnLoadInfo = _field return nil } - -func (p *TStreamLoadPutRequest) ReadField49(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TMasterOpRequest) ReadField30(iprot thrift.TProtocol) error { + _field := NewTGroupCommitInfo() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Version = &v } + p.GroupCommitInfo = _field return nil } +func (p *TMasterOpRequest) ReadField1000(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField50(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Label = &v + _field = &v } + p.CloudCluster = _field return nil } +func (p *TMasterOpRequest) ReadField1001(iprot thrift.TProtocol) error { -func (p *TStreamLoadPutRequest) ReadField51(iprot thrift.TProtocol) error { - if v, err := iprot.ReadByte(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Enclose = &v + _field = &v } + p.NoAuth = _field return nil } -func (p *TStreamLoadPutRequest) ReadField52(iprot thrift.TProtocol) error { - if v, err := iprot.ReadByte(); err != nil { - return err - } else { - p.Escape = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) ReadField53(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.MemtableOnSinkNode = &v - } - return nil -} - -func (p *TStreamLoadPutRequest) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TStreamLoadPutRequest"); err != nil { - goto WriteStructBeginError +func (p *TMasterOpRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TMasterOpRequest"); err != nil { + goto WriteStructBeginError } if p != nil { if err = p.writeField1(oprot); err != nil { @@ -24431,99 +22356,14 @@ func (p *TStreamLoadPutRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 30 goto WriteFieldError } - if err = p.writeField31(oprot); err != nil { - fieldId = 31 - goto WriteFieldError - } - if err = p.writeField32(oprot); err != nil { - fieldId = 32 - goto WriteFieldError - } - if err = p.writeField33(oprot); err != nil { - fieldId = 33 - goto WriteFieldError - } - if err = p.writeField34(oprot); err != nil { - fieldId = 34 - goto WriteFieldError - } - if err = p.writeField35(oprot); err != nil { - fieldId = 35 - goto WriteFieldError - } - if err = p.writeField36(oprot); err != nil { - fieldId = 36 - goto WriteFieldError - } - if err = p.writeField37(oprot); err != nil { - fieldId = 37 - goto WriteFieldError - } - if err = p.writeField38(oprot); err != nil { - fieldId = 38 - goto WriteFieldError - } - if err = p.writeField39(oprot); err != nil { - fieldId = 39 - goto WriteFieldError - } - if err = p.writeField40(oprot); err != nil { - fieldId = 40 - goto WriteFieldError - } - if err = p.writeField41(oprot); err != nil { - fieldId = 41 - goto WriteFieldError - } - if err = p.writeField42(oprot); err != nil { - fieldId = 42 - goto WriteFieldError - } - if err = p.writeField43(oprot); err != nil { - fieldId = 43 - goto WriteFieldError - } - if err = p.writeField44(oprot); err != nil { - fieldId = 44 - goto WriteFieldError - } - if err = p.writeField45(oprot); err != nil { - fieldId = 45 - goto WriteFieldError - } - if err = p.writeField46(oprot); err != nil { - fieldId = 46 - goto WriteFieldError - } - if err = p.writeField47(oprot); err != nil { - fieldId = 47 - goto WriteFieldError - } - if err = p.writeField48(oprot); err != nil { - fieldId = 48 - goto WriteFieldError - } - if err = p.writeField49(oprot); err != nil { - fieldId = 49 - goto WriteFieldError - } - if err = p.writeField50(oprot); err != nil { - fieldId = 50 - goto WriteFieldError - } - if err = p.writeField51(oprot); err != nil { - fieldId = 51 - goto WriteFieldError - } - if err = p.writeField52(oprot); err != nil { - fieldId = 52 + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 goto WriteFieldError } - if err = p.writeField53(oprot); err != nil { - fieldId = 53 + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -24542,17 +22382,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Cluster); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TMasterOpRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -24561,11 +22399,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { +func (p *TMasterOpRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.User); err != nil { + if err := oprot.WriteString(p.Db); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24578,11 +22416,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { +func (p *TMasterOpRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("sql", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.Passwd); err != nil { + if err := oprot.WriteString(p.Sql); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24595,15 +22433,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TMasterOpRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetResourceInfo() { + if err = oprot.WriteFieldBegin("resourceInfo", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.ResourceInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -24612,15 +22452,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Tbl); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TMasterOpRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -24629,12 +22471,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { +func (p *TMasterOpRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetExecMemLimit() { + if err = oprot.WriteFieldBegin("execMemLimit", thrift.I64, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.UserIp); err != nil { + if err := oprot.WriteI64(*p.ExecMemLimit); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24648,15 +22490,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("loadId", thrift.STRUCT, 7); err != nil { - goto WriteFieldBeginError - } - if err := p.LoadId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TMasterOpRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryTimeout() { + if err = oprot.WriteFieldBegin("queryTimeout", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.QueryTimeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -24665,15 +22509,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField8(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("txnId", thrift.I64, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TMasterOpRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -24682,15 +22528,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField9(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("fileType", thrift.I32, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.FileType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TMasterOpRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeZone() { + if err = oprot.WriteFieldBegin("time_zone", thrift.STRING, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TimeZone); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -24699,15 +22547,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField10(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("formatType", thrift.I32, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.FormatType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TMasterOpRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetStmtId() { + if err = oprot.WriteFieldBegin("stmt_id", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.StmtId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -24716,12 +22566,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetPath() { - if err = oprot.WriteFieldBegin("path", thrift.STRING, 11); err != nil { +func (p *TMasterOpRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetSqlMode() { + if err = oprot.WriteFieldBegin("sqlMode", thrift.I64, 11); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Path); err != nil { + if err := oprot.WriteI64(*p.SqlMode); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24735,12 +22585,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetColumns() { - if err = oprot.WriteFieldBegin("columns", thrift.STRING, 12); err != nil { +func (p *TMasterOpRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadMemLimit() { + if err = oprot.WriteFieldBegin("loadMemLimit", thrift.I64, 12); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Columns); err != nil { + if err := oprot.WriteI64(*p.LoadMemLimit); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24754,12 +22604,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetWhere() { - if err = oprot.WriteFieldBegin("where", thrift.STRING, 13); err != nil { +func (p *TMasterOpRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableStrictMode() { + if err = oprot.WriteFieldBegin("enableStrictMode", thrift.BOOL, 13); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Where); err != nil { + if err := oprot.WriteBool(*p.EnableStrictMode); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24773,12 +22623,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnSeparator() { - if err = oprot.WriteFieldBegin("columnSeparator", thrift.STRING, 14); err != nil { +func (p *TMasterOpRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 14); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.ColumnSeparator); err != nil { + if err := p.CurrentUserIdent.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24792,12 +22642,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetPartitions() { - if err = oprot.WriteFieldBegin("partitions", thrift.STRING, 15); err != nil { +func (p *TMasterOpRequest) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetStmtIdx() { + if err = oprot.WriteFieldBegin("stmtIdx", thrift.I32, 15); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Partitions); err != nil { + if err := oprot.WriteI32(*p.StmtIdx); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24811,12 +22661,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 16); err != nil { +func (p *TMasterOpRequest) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryOptions() { + if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 16); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.AuthCode); err != nil { + if err := p.QueryOptions.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24830,12 +22680,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetNegative() { - if err = oprot.WriteFieldBegin("negative", thrift.BOOL, 17); err != nil { +func (p *TMasterOpRequest) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 17); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.Negative); err != nil { + if err := p.QueryId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24849,12 +22699,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeout() { - if err = oprot.WriteFieldBegin("timeout", thrift.I32, 18); err != nil { +func (p *TMasterOpRequest) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetInsertVisibleTimeoutMs() { + if err = oprot.WriteFieldBegin("insert_visible_timeout_ms", thrift.I64, 18); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.Timeout); err != nil { + if err := oprot.WriteI64(*p.InsertVisibleTimeoutMs); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24868,12 +22718,23 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetStrictMode() { - if err = oprot.WriteFieldBegin("strictMode", thrift.BOOL, 19); err != nil { +func (p *TMasterOpRequest) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetSessionVariables() { + if err = oprot.WriteFieldBegin("session_variables", thrift.MAP, 19); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.StrictMode); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.SessionVariables)); err != nil { + return err + } + for k, v := range p.SessionVariables { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24887,12 +22748,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetTimezone() { - if err = oprot.WriteFieldBegin("timezone", thrift.STRING, 20); err != nil { +func (p *TMasterOpRequest) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetFoldConstantByBe() { + if err = oprot.WriteFieldBegin("foldConstantByBe", thrift.BOOL, 20); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Timezone); err != nil { + if err := oprot.WriteBool(*p.FoldConstantByBe); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24906,12 +22767,23 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetExecMemLimit() { - if err = oprot.WriteFieldBegin("execMemLimit", thrift.I64, 21); err != nil { +func (p *TMasterOpRequest) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetTraceCarrier() { + if err = oprot.WriteFieldBegin("trace_carrier", thrift.MAP, 21); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ExecMemLimit); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.TraceCarrier)); err != nil { + return err + } + for k, v := range p.TraceCarrier { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24925,12 +22797,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetIsTempPartition() { - if err = oprot.WriteFieldBegin("isTempPartition", thrift.BOOL, 22); err != nil { +func (p *TMasterOpRequest) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetClientNodeHost() { + if err = oprot.WriteFieldBegin("clientNodeHost", thrift.STRING, 22); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.IsTempPartition); err != nil { + if err := oprot.WriteString(*p.ClientNodeHost); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24944,12 +22816,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField23(oprot thrift.TProtocol) (err error) { - if p.IsSetStripOuterArray() { - if err = oprot.WriteFieldBegin("strip_outer_array", thrift.BOOL, 23); err != nil { +func (p *TMasterOpRequest) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetClientNodePort() { + if err = oprot.WriteFieldBegin("clientNodePort", thrift.I32, 23); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.StripOuterArray); err != nil { + if err := oprot.WriteI32(*p.ClientNodePort); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24963,12 +22835,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField24(oprot thrift.TProtocol) (err error) { - if p.IsSetJsonpaths() { - if err = oprot.WriteFieldBegin("jsonpaths", thrift.STRING, 24); err != nil { +func (p *TMasterOpRequest) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetSyncJournalOnly() { + if err = oprot.WriteFieldBegin("syncJournalOnly", thrift.BOOL, 24); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Jsonpaths); err != nil { + if err := oprot.WriteBool(*p.SyncJournalOnly); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -24982,12 +22854,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField25(oprot thrift.TProtocol) (err error) { - if p.IsSetThriftRpcTimeoutMs() { - if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 25); err != nil { +func (p *TMasterOpRequest) writeField25(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultCatalog() { + if err = oprot.WriteFieldBegin("defaultCatalog", thrift.STRING, 25); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + if err := oprot.WriteString(*p.DefaultCatalog); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25001,12 +22873,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField26(oprot thrift.TProtocol) (err error) { - if p.IsSetJsonRoot() { - if err = oprot.WriteFieldBegin("json_root", thrift.STRING, 26); err != nil { +func (p *TMasterOpRequest) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultDatabase() { + if err = oprot.WriteFieldBegin("defaultDatabase", thrift.STRING, 26); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.JsonRoot); err != nil { + if err := oprot.WriteString(*p.DefaultDatabase); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25020,12 +22892,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField27(oprot thrift.TProtocol) (err error) { - if p.IsSetMergeType() { - if err = oprot.WriteFieldBegin("merge_type", thrift.I32, 27); err != nil { +func (p *TMasterOpRequest) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetCancelQeury() { + if err = oprot.WriteFieldBegin("cancel_qeury", thrift.BOOL, 27); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.MergeType)); err != nil { + if err := oprot.WriteBool(*p.CancelQeury); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25039,12 +22911,23 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField28(oprot thrift.TProtocol) (err error) { - if p.IsSetDeleteCondition() { - if err = oprot.WriteFieldBegin("delete_condition", thrift.STRING, 28); err != nil { +func (p *TMasterOpRequest) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetUserVariables() { + if err = oprot.WriteFieldBegin("user_variables", thrift.MAP, 28); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.DeleteCondition); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRUCT, len(p.UserVariables)); err != nil { + return err + } + for k, v := range p.UserVariables { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25058,12 +22941,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField29(oprot thrift.TProtocol) (err error) { - if p.IsSetSequenceCol() { - if err = oprot.WriteFieldBegin("sequence_col", thrift.STRING, 29); err != nil { +func (p *TMasterOpRequest) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnLoadInfo() { + if err = oprot.WriteFieldBegin("txnLoadInfo", thrift.STRUCT, 29); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.SequenceCol); err != nil { + if err := p.TxnLoadInfo.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25077,12 +22960,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField30(oprot thrift.TProtocol) (err error) { - if p.IsSetNumAsString() { - if err = oprot.WriteFieldBegin("num_as_string", thrift.BOOL, 30); err != nil { +func (p *TMasterOpRequest) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitInfo() { + if err = oprot.WriteFieldBegin("groupCommitInfo", thrift.STRUCT, 30); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.NumAsString); err != nil { + if err := p.GroupCommitInfo.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25096,12 +22979,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField31(oprot thrift.TProtocol) (err error) { - if p.IsSetFuzzyParse() { - if err = oprot.WriteFieldBegin("fuzzy_parse", thrift.BOOL, 31); err != nil { +func (p *TMasterOpRequest) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetCloudCluster() { + if err = oprot.WriteFieldBegin("cloud_cluster", thrift.STRING, 1000); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.FuzzyParse); err != nil { + if err := oprot.WriteString(*p.CloudCluster); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25110,17 +22993,17 @@ func (p *TStreamLoadPutRequest) writeField31(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField32(oprot thrift.TProtocol) (err error) { - if p.IsSetLineDelimiter() { - if err = oprot.WriteFieldBegin("line_delimiter", thrift.STRING, 32); err != nil { +func (p *TMasterOpRequest) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetNoAuth() { + if err = oprot.WriteFieldBegin("noAuth", thrift.BOOL, 1001); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.LineDelimiter); err != nil { + if err := oprot.WriteBool(*p.NoAuth); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -25129,1282 +23012,888 @@ func (p *TStreamLoadPutRequest) writeField32(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 32 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 32 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField33(oprot thrift.TProtocol) (err error) { - if p.IsSetReadJsonByLine() { - if err = oprot.WriteFieldBegin("read_json_by_line", thrift.BOOL, 33); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.ReadJsonByLine); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TMasterOpRequest) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 33 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) -} + return fmt.Sprintf("TMasterOpRequest(%+v)", *p) -func (p *TStreamLoadPutRequest) writeField34(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 34); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) } -func (p *TStreamLoadPutRequest) writeField35(oprot thrift.TProtocol) (err error) { - if p.IsSetSendBatchParallelism() { - if err = oprot.WriteFieldBegin("send_batch_parallelism", thrift.I32, 35); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.SendBatchParallelism); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TMasterOpRequest) DeepEqual(ano *TMasterOpRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 35 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 35 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField36(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxFilterRatio() { - if err = oprot.WriteFieldBegin("max_filter_ratio", thrift.DOUBLE, 36); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteDouble(*p.MaxFilterRatio); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 36 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 36 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField37(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadToSingleTablet() { - if err = oprot.WriteFieldBegin("load_to_single_tablet", thrift.BOOL, 37); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.LoadToSingleTablet); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 37 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 37 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField38(oprot thrift.TProtocol) (err error) { - if p.IsSetHeaderType() { - if err = oprot.WriteFieldBegin("header_type", thrift.STRING, 38); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.HeaderType); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 38 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 38 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField39(oprot thrift.TProtocol) (err error) { - if p.IsSetHiddenColumns() { - if err = oprot.WriteFieldBegin("hidden_columns", thrift.STRING, 39); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.HiddenColumns); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 39 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 39 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField40(oprot thrift.TProtocol) (err error) { - if p.IsSetCompressType() { - if err = oprot.WriteFieldBegin("compress_type", thrift.I32, 40); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.CompressType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 40 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 40 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField41(oprot thrift.TProtocol) (err error) { - if p.IsSetFileSize() { - if err = oprot.WriteFieldBegin("file_size", thrift.I64, 41); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.FileSize); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 41 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 41 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField42(oprot thrift.TProtocol) (err error) { - if p.IsSetTrimDoubleQuotes() { - if err = oprot.WriteFieldBegin("trim_double_quotes", thrift.BOOL, 42); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.TrimDoubleQuotes); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 42 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 42 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField43(oprot thrift.TProtocol) (err error) { - if p.IsSetSkipLines() { - if err = oprot.WriteFieldBegin("skip_lines", thrift.I32, 43); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.SkipLines); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 43 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 43 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField44(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableProfile() { - if err = oprot.WriteFieldBegin("enable_profile", thrift.BOOL, 44); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.EnableProfile); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 44 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 44 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField45(oprot thrift.TProtocol) (err error) { - if p.IsSetPartialUpdate() { - if err = oprot.WriteFieldBegin("partial_update", thrift.BOOL, 45); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.PartialUpdate); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 45 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 45 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField46(oprot thrift.TProtocol) (err error) { - if p.IsSetTableNames() { - if err = oprot.WriteFieldBegin("table_names", thrift.LIST, 46); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.TableNames)); err != nil { - return err - } - for _, v := range p.TableNames { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 46 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 46 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField47(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadSql() { - if err = oprot.WriteFieldBegin("load_sql", thrift.STRING, 47); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.LoadSql); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 47 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 47 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField48(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendId() { - if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 48); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.BackendId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 48 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 48 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField49(oprot thrift.TProtocol) (err error) { - if p.IsSetVersion() { - if err = oprot.WriteFieldBegin("version", thrift.I32, 49); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.Version); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 49 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 49 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField50(oprot thrift.TProtocol) (err error) { - if p.IsSetLabel() { - if err = oprot.WriteFieldBegin("label", thrift.STRING, 50); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Label); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 50 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 50 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField51(oprot thrift.TProtocol) (err error) { - if p.IsSetEnclose() { - if err = oprot.WriteFieldBegin("enclose", thrift.BYTE, 51); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteByte(*p.Enclose); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 51 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 51 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField52(oprot thrift.TProtocol) (err error) { - if p.IsSetEscape() { - if err = oprot.WriteFieldBegin("escape", thrift.BYTE, 52); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteByte(*p.Escape); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 52 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 52 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) writeField53(oprot thrift.TProtocol) (err error) { - if p.IsSetMemtableOnSinkNode() { - if err = oprot.WriteFieldBegin("memtable_on_sink_node", thrift.BOOL, 53); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.MemtableOnSinkNode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 53 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 53 end error: ", p), err) -} - -func (p *TStreamLoadPutRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TStreamLoadPutRequest(%+v)", *p) -} - -func (p *TStreamLoadPutRequest) DeepEqual(ano *TStreamLoadPutRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { + if !p.Field1DeepEqual(ano.User) { return false } - if !p.Field1DeepEqual(ano.Cluster) { + if !p.Field2DeepEqual(ano.Db) { return false } - if !p.Field2DeepEqual(ano.User) { + if !p.Field3DeepEqual(ano.Sql) { return false } - if !p.Field3DeepEqual(ano.Passwd) { + if !p.Field4DeepEqual(ano.ResourceInfo) { return false } - if !p.Field4DeepEqual(ano.Db) { + if !p.Field5DeepEqual(ano.Cluster) { return false } - if !p.Field5DeepEqual(ano.Tbl) { + if !p.Field6DeepEqual(ano.ExecMemLimit) { return false } - if !p.Field6DeepEqual(ano.UserIp) { + if !p.Field7DeepEqual(ano.QueryTimeout) { return false } - if !p.Field7DeepEqual(ano.LoadId) { + if !p.Field8DeepEqual(ano.UserIp) { return false } - if !p.Field8DeepEqual(ano.TxnId) { + if !p.Field9DeepEqual(ano.TimeZone) { return false } - if !p.Field9DeepEqual(ano.FileType) { + if !p.Field10DeepEqual(ano.StmtId) { return false } - if !p.Field10DeepEqual(ano.FormatType) { + if !p.Field11DeepEqual(ano.SqlMode) { return false } - if !p.Field11DeepEqual(ano.Path) { + if !p.Field12DeepEqual(ano.LoadMemLimit) { return false } - if !p.Field12DeepEqual(ano.Columns) { + if !p.Field13DeepEqual(ano.EnableStrictMode) { return false } - if !p.Field13DeepEqual(ano.Where) { + if !p.Field14DeepEqual(ano.CurrentUserIdent) { return false } - if !p.Field14DeepEqual(ano.ColumnSeparator) { + if !p.Field15DeepEqual(ano.StmtIdx) { return false } - if !p.Field15DeepEqual(ano.Partitions) { + if !p.Field16DeepEqual(ano.QueryOptions) { return false } - if !p.Field16DeepEqual(ano.AuthCode) { + if !p.Field17DeepEqual(ano.QueryId) { return false } - if !p.Field17DeepEqual(ano.Negative) { + if !p.Field18DeepEqual(ano.InsertVisibleTimeoutMs) { return false } - if !p.Field18DeepEqual(ano.Timeout) { + if !p.Field19DeepEqual(ano.SessionVariables) { return false } - if !p.Field19DeepEqual(ano.StrictMode) { + if !p.Field20DeepEqual(ano.FoldConstantByBe) { return false } - if !p.Field20DeepEqual(ano.Timezone) { + if !p.Field21DeepEqual(ano.TraceCarrier) { return false } - if !p.Field21DeepEqual(ano.ExecMemLimit) { + if !p.Field22DeepEqual(ano.ClientNodeHost) { return false } - if !p.Field22DeepEqual(ano.IsTempPartition) { + if !p.Field23DeepEqual(ano.ClientNodePort) { return false } - if !p.Field23DeepEqual(ano.StripOuterArray) { + if !p.Field24DeepEqual(ano.SyncJournalOnly) { return false } - if !p.Field24DeepEqual(ano.Jsonpaths) { + if !p.Field25DeepEqual(ano.DefaultCatalog) { return false } - if !p.Field25DeepEqual(ano.ThriftRpcTimeoutMs) { + if !p.Field26DeepEqual(ano.DefaultDatabase) { return false } - if !p.Field26DeepEqual(ano.JsonRoot) { + if !p.Field27DeepEqual(ano.CancelQeury) { return false } - if !p.Field27DeepEqual(ano.MergeType) { + if !p.Field28DeepEqual(ano.UserVariables) { return false } - if !p.Field28DeepEqual(ano.DeleteCondition) { + if !p.Field29DeepEqual(ano.TxnLoadInfo) { return false } - if !p.Field29DeepEqual(ano.SequenceCol) { + if !p.Field30DeepEqual(ano.GroupCommitInfo) { return false } - if !p.Field30DeepEqual(ano.NumAsString) { + if !p.Field1000DeepEqual(ano.CloudCluster) { return false } - if !p.Field31DeepEqual(ano.FuzzyParse) { + if !p.Field1001DeepEqual(ano.NoAuth) { return false } - if !p.Field32DeepEqual(ano.LineDelimiter) { + return true +} + +func (p *TMasterOpRequest) Field1DeepEqual(src string) bool { + + if strings.Compare(p.User, src) != 0 { return false } - if !p.Field33DeepEqual(ano.ReadJsonByLine) { + return true +} +func (p *TMasterOpRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.Db, src) != 0 { return false } - if !p.Field34DeepEqual(ano.Token) { + return true +} +func (p *TMasterOpRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Sql, src) != 0 { return false } - if !p.Field35DeepEqual(ano.SendBatchParallelism) { + return true +} +func (p *TMasterOpRequest) Field4DeepEqual(src *types.TResourceInfo) bool { + + if !p.ResourceInfo.DeepEqual(src) { return false } - if !p.Field36DeepEqual(ano.MaxFilterRatio) { + return true +} +func (p *TMasterOpRequest) Field5DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { return false } - if !p.Field37DeepEqual(ano.LoadToSingleTablet) { + if strings.Compare(*p.Cluster, *src) != 0 { return false } - if !p.Field38DeepEqual(ano.HeaderType) { + return true +} +func (p *TMasterOpRequest) Field6DeepEqual(src *int64) bool { + + if p.ExecMemLimit == src { + return true + } else if p.ExecMemLimit == nil || src == nil { return false } - if !p.Field39DeepEqual(ano.HiddenColumns) { + if *p.ExecMemLimit != *src { return false } - if !p.Field40DeepEqual(ano.CompressType) { + return true +} +func (p *TMasterOpRequest) Field7DeepEqual(src *int32) bool { + + if p.QueryTimeout == src { + return true + } else if p.QueryTimeout == nil || src == nil { return false } - if !p.Field41DeepEqual(ano.FileSize) { + if *p.QueryTimeout != *src { return false } - if !p.Field42DeepEqual(ano.TrimDoubleQuotes) { + return true +} +func (p *TMasterOpRequest) Field8DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { return false } - if !p.Field43DeepEqual(ano.SkipLines) { + if strings.Compare(*p.UserIp, *src) != 0 { return false } - if !p.Field44DeepEqual(ano.EnableProfile) { + return true +} +func (p *TMasterOpRequest) Field9DeepEqual(src *string) bool { + + if p.TimeZone == src { + return true + } else if p.TimeZone == nil || src == nil { return false } - if !p.Field45DeepEqual(ano.PartialUpdate) { + if strings.Compare(*p.TimeZone, *src) != 0 { return false } - if !p.Field46DeepEqual(ano.TableNames) { + return true +} +func (p *TMasterOpRequest) Field10DeepEqual(src *int64) bool { + + if p.StmtId == src { + return true + } else if p.StmtId == nil || src == nil { return false } - if !p.Field47DeepEqual(ano.LoadSql) { + if *p.StmtId != *src { return false } - if !p.Field48DeepEqual(ano.BackendId) { + return true +} +func (p *TMasterOpRequest) Field11DeepEqual(src *int64) bool { + + if p.SqlMode == src { + return true + } else if p.SqlMode == nil || src == nil { return false } - if !p.Field49DeepEqual(ano.Version) { - return false - } - if !p.Field50DeepEqual(ano.Label) { - return false - } - if !p.Field51DeepEqual(ano.Enclose) { - return false - } - if !p.Field52DeepEqual(ano.Escape) { - return false - } - if !p.Field53DeepEqual(ano.MemtableOnSinkNode) { + if *p.SqlMode != *src { return false } return true } +func (p *TMasterOpRequest) Field12DeepEqual(src *int64) bool { -func (p *TStreamLoadPutRequest) Field1DeepEqual(src *string) bool { - - if p.Cluster == src { + if p.LoadMemLimit == src { return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { + } else if p.LoadMemLimit == nil || src == nil { return false } - return true -} -func (p *TStreamLoadPutRequest) Field2DeepEqual(src string) bool { - - if strings.Compare(p.User, src) != 0 { + if *p.LoadMemLimit != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field3DeepEqual(src string) bool { +func (p *TMasterOpRequest) Field13DeepEqual(src *bool) bool { - if strings.Compare(p.Passwd, src) != 0 { + if p.EnableStrictMode == src { + return true + } else if p.EnableStrictMode == nil || src == nil { return false } - return true -} -func (p *TStreamLoadPutRequest) Field4DeepEqual(src string) bool { - - if strings.Compare(p.Db, src) != 0 { + if *p.EnableStrictMode != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field5DeepEqual(src string) bool { +func (p *TMasterOpRequest) Field14DeepEqual(src *types.TUserIdentity) bool { - if strings.Compare(p.Tbl, src) != 0 { + if !p.CurrentUserIdent.DeepEqual(src) { return false } return true } -func (p *TStreamLoadPutRequest) Field6DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field15DeepEqual(src *int32) bool { - if p.UserIp == src { + if p.StmtIdx == src { return true - } else if p.UserIp == nil || src == nil { - return false - } - if strings.Compare(*p.UserIp, *src) != 0 { - return false - } - return true -} -func (p *TStreamLoadPutRequest) Field7DeepEqual(src *types.TUniqueId) bool { - - if !p.LoadId.DeepEqual(src) { + } else if p.StmtIdx == nil || src == nil { return false } - return true -} -func (p *TStreamLoadPutRequest) Field8DeepEqual(src int64) bool { - - if p.TxnId != src { + if *p.StmtIdx != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field9DeepEqual(src types.TFileType) bool { +func (p *TMasterOpRequest) Field16DeepEqual(src *palointernalservice.TQueryOptions) bool { - if p.FileType != src { + if !p.QueryOptions.DeepEqual(src) { return false } return true } -func (p *TStreamLoadPutRequest) Field10DeepEqual(src plannodes.TFileFormatType) bool { +func (p *TMasterOpRequest) Field17DeepEqual(src *types.TUniqueId) bool { - if p.FormatType != src { + if !p.QueryId.DeepEqual(src) { return false } return true } -func (p *TStreamLoadPutRequest) Field11DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field18DeepEqual(src *int64) bool { - if p.Path == src { + if p.InsertVisibleTimeoutMs == src { return true - } else if p.Path == nil || src == nil { + } else if p.InsertVisibleTimeoutMs == nil || src == nil { return false } - if strings.Compare(*p.Path, *src) != 0 { + if *p.InsertVisibleTimeoutMs != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field12DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field19DeepEqual(src map[string]string) bool { - if p.Columns == src { - return true - } else if p.Columns == nil || src == nil { + if len(p.SessionVariables) != len(src) { return false } - if strings.Compare(*p.Columns, *src) != 0 { - return false + for k, v := range p.SessionVariables { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } } return true } -func (p *TStreamLoadPutRequest) Field13DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field20DeepEqual(src *bool) bool { - if p.Where == src { + if p.FoldConstantByBe == src { return true - } else if p.Where == nil || src == nil { + } else if p.FoldConstantByBe == nil || src == nil { return false } - if strings.Compare(*p.Where, *src) != 0 { + if *p.FoldConstantByBe != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field14DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field21DeepEqual(src map[string]string) bool { - if p.ColumnSeparator == src { - return true - } else if p.ColumnSeparator == nil || src == nil { + if len(p.TraceCarrier) != len(src) { return false } - if strings.Compare(*p.ColumnSeparator, *src) != 0 { - return false + for k, v := range p.TraceCarrier { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } } return true } -func (p *TStreamLoadPutRequest) Field15DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field22DeepEqual(src *string) bool { - if p.Partitions == src { + if p.ClientNodeHost == src { return true - } else if p.Partitions == nil || src == nil { + } else if p.ClientNodeHost == nil || src == nil { return false } - if strings.Compare(*p.Partitions, *src) != 0 { + if strings.Compare(*p.ClientNodeHost, *src) != 0 { return false } return true } -func (p *TStreamLoadPutRequest) Field16DeepEqual(src *int64) bool { +func (p *TMasterOpRequest) Field23DeepEqual(src *int32) bool { - if p.AuthCode == src { + if p.ClientNodePort == src { return true - } else if p.AuthCode == nil || src == nil { + } else if p.ClientNodePort == nil || src == nil { return false } - if *p.AuthCode != *src { + if *p.ClientNodePort != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field17DeepEqual(src *bool) bool { +func (p *TMasterOpRequest) Field24DeepEqual(src *bool) bool { - if p.Negative == src { + if p.SyncJournalOnly == src { return true - } else if p.Negative == nil || src == nil { + } else if p.SyncJournalOnly == nil || src == nil { return false } - if *p.Negative != *src { + if *p.SyncJournalOnly != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field18DeepEqual(src *int32) bool { +func (p *TMasterOpRequest) Field25DeepEqual(src *string) bool { - if p.Timeout == src { + if p.DefaultCatalog == src { return true - } else if p.Timeout == nil || src == nil { + } else if p.DefaultCatalog == nil || src == nil { return false } - if *p.Timeout != *src { + if strings.Compare(*p.DefaultCatalog, *src) != 0 { return false } return true } -func (p *TStreamLoadPutRequest) Field19DeepEqual(src *bool) bool { +func (p *TMasterOpRequest) Field26DeepEqual(src *string) bool { - if p.StrictMode == src { + if p.DefaultDatabase == src { return true - } else if p.StrictMode == nil || src == nil { + } else if p.DefaultDatabase == nil || src == nil { return false } - if *p.StrictMode != *src { + if strings.Compare(*p.DefaultDatabase, *src) != 0 { return false } return true } -func (p *TStreamLoadPutRequest) Field20DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field27DeepEqual(src *bool) bool { - if p.Timezone == src { + if p.CancelQeury == src { return true - } else if p.Timezone == nil || src == nil { + } else if p.CancelQeury == nil || src == nil { return false } - if strings.Compare(*p.Timezone, *src) != 0 { + if *p.CancelQeury != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field21DeepEqual(src *int64) bool { +func (p *TMasterOpRequest) Field28DeepEqual(src map[string]*exprs.TExprNode) bool { - if p.ExecMemLimit == src { - return true - } else if p.ExecMemLimit == nil || src == nil { + if len(p.UserVariables) != len(src) { return false } - if *p.ExecMemLimit != *src { - return false + for k, v := range p.UserVariables { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TStreamLoadPutRequest) Field22DeepEqual(src *bool) bool { +func (p *TMasterOpRequest) Field29DeepEqual(src *TTxnLoadInfo) bool { - if p.IsTempPartition == src { - return true - } else if p.IsTempPartition == nil || src == nil { - return false - } - if *p.IsTempPartition != *src { + if !p.TxnLoadInfo.DeepEqual(src) { return false } return true } -func (p *TStreamLoadPutRequest) Field23DeepEqual(src *bool) bool { +func (p *TMasterOpRequest) Field30DeepEqual(src *TGroupCommitInfo) bool { - if p.StripOuterArray == src { - return true - } else if p.StripOuterArray == nil || src == nil { - return false - } - if *p.StripOuterArray != *src { + if !p.GroupCommitInfo.DeepEqual(src) { return false } return true } -func (p *TStreamLoadPutRequest) Field24DeepEqual(src *string) bool { +func (p *TMasterOpRequest) Field1000DeepEqual(src *string) bool { - if p.Jsonpaths == src { + if p.CloudCluster == src { return true - } else if p.Jsonpaths == nil || src == nil { + } else if p.CloudCluster == nil || src == nil { return false } - if strings.Compare(*p.Jsonpaths, *src) != 0 { + if strings.Compare(*p.CloudCluster, *src) != 0 { return false } return true } -func (p *TStreamLoadPutRequest) Field25DeepEqual(src *int64) bool { +func (p *TMasterOpRequest) Field1001DeepEqual(src *bool) bool { - if p.ThriftRpcTimeoutMs == src { + if p.NoAuth == src { return true - } else if p.ThriftRpcTimeoutMs == nil || src == nil { + } else if p.NoAuth == nil || src == nil { return false } - if *p.ThriftRpcTimeoutMs != *src { + if *p.NoAuth != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field26DeepEqual(src *string) bool { - if p.JsonRoot == src { - return true - } else if p.JsonRoot == nil || src == nil { - return false - } - if strings.Compare(*p.JsonRoot, *src) != 0 { - return false - } - return true +type TColumnDefinition struct { + ColumnName string `thrift:"columnName,1,required" frugal:"1,required,string" json:"columnName"` + ColumnType *types.TColumnType `thrift:"columnType,2,required" frugal:"2,required,types.TColumnType" json:"columnType"` + AggType *types.TAggregationType `thrift:"aggType,3,optional" frugal:"3,optional,TAggregationType" json:"aggType,omitempty"` + DefaultValue *string `thrift:"defaultValue,4,optional" frugal:"4,optional,string" json:"defaultValue,omitempty"` } -func (p *TStreamLoadPutRequest) Field27DeepEqual(src *types.TMergeType) bool { - if p.MergeType == src { - return true - } else if p.MergeType == nil || src == nil { - return false - } - if *p.MergeType != *src { - return false - } - return true +func NewTColumnDefinition() *TColumnDefinition { + return &TColumnDefinition{} } -func (p *TStreamLoadPutRequest) Field28DeepEqual(src *string) bool { - if p.DeleteCondition == src { - return true - } else if p.DeleteCondition == nil || src == nil { - return false - } - if strings.Compare(*p.DeleteCondition, *src) != 0 { - return false - } - return true +func (p *TColumnDefinition) InitDefault() { } -func (p *TStreamLoadPutRequest) Field29DeepEqual(src *string) bool { - if p.SequenceCol == src { - return true - } else if p.SequenceCol == nil || src == nil { - return false - } - if strings.Compare(*p.SequenceCol, *src) != 0 { - return false - } - return true +func (p *TColumnDefinition) GetColumnName() (v string) { + return p.ColumnName } -func (p *TStreamLoadPutRequest) Field30DeepEqual(src *bool) bool { - if p.NumAsString == src { - return true - } else if p.NumAsString == nil || src == nil { - return false - } - if *p.NumAsString != *src { - return false +var TColumnDefinition_ColumnType_DEFAULT *types.TColumnType + +func (p *TColumnDefinition) GetColumnType() (v *types.TColumnType) { + if !p.IsSetColumnType() { + return TColumnDefinition_ColumnType_DEFAULT } - return true + return p.ColumnType } -func (p *TStreamLoadPutRequest) Field31DeepEqual(src *bool) bool { - if p.FuzzyParse == src { - return true - } else if p.FuzzyParse == nil || src == nil { - return false - } - if *p.FuzzyParse != *src { - return false +var TColumnDefinition_AggType_DEFAULT types.TAggregationType + +func (p *TColumnDefinition) GetAggType() (v types.TAggregationType) { + if !p.IsSetAggType() { + return TColumnDefinition_AggType_DEFAULT } - return true + return *p.AggType } -func (p *TStreamLoadPutRequest) Field32DeepEqual(src *string) bool { - if p.LineDelimiter == src { - return true - } else if p.LineDelimiter == nil || src == nil { - return false - } - if strings.Compare(*p.LineDelimiter, *src) != 0 { - return false +var TColumnDefinition_DefaultValue_DEFAULT string + +func (p *TColumnDefinition) GetDefaultValue() (v string) { + if !p.IsSetDefaultValue() { + return TColumnDefinition_DefaultValue_DEFAULT } - return true + return *p.DefaultValue +} +func (p *TColumnDefinition) SetColumnName(val string) { + p.ColumnName = val +} +func (p *TColumnDefinition) SetColumnType(val *types.TColumnType) { + p.ColumnType = val +} +func (p *TColumnDefinition) SetAggType(val *types.TAggregationType) { + p.AggType = val +} +func (p *TColumnDefinition) SetDefaultValue(val *string) { + p.DefaultValue = val } -func (p *TStreamLoadPutRequest) Field33DeepEqual(src *bool) bool { - if p.ReadJsonByLine == src { - return true - } else if p.ReadJsonByLine == nil || src == nil { - return false - } - if *p.ReadJsonByLine != *src { - return false - } - return true +var fieldIDToName_TColumnDefinition = map[int16]string{ + 1: "columnName", + 2: "columnType", + 3: "aggType", + 4: "defaultValue", } -func (p *TStreamLoadPutRequest) Field34DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false - } - return true +func (p *TColumnDefinition) IsSetColumnType() bool { + return p.ColumnType != nil } -func (p *TStreamLoadPutRequest) Field35DeepEqual(src *int32) bool { - if p.SendBatchParallelism == src { - return true - } else if p.SendBatchParallelism == nil || src == nil { - return false - } - if *p.SendBatchParallelism != *src { - return false - } - return true +func (p *TColumnDefinition) IsSetAggType() bool { + return p.AggType != nil } -func (p *TStreamLoadPutRequest) Field36DeepEqual(src *float64) bool { - if p.MaxFilterRatio == src { - return true - } else if p.MaxFilterRatio == nil || src == nil { - return false - } - if *p.MaxFilterRatio != *src { - return false - } - return true +func (p *TColumnDefinition) IsSetDefaultValue() bool { + return p.DefaultValue != nil } -func (p *TStreamLoadPutRequest) Field37DeepEqual(src *bool) bool { - if p.LoadToSingleTablet == src { - return true - } else if p.LoadToSingleTablet == nil || src == nil { - return false - } - if *p.LoadToSingleTablet != *src { - return false +func (p *TColumnDefinition) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetColumnName bool = false + var issetColumnType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return true -} -func (p *TStreamLoadPutRequest) Field38DeepEqual(src *string) bool { - if p.HeaderType == src { - return true - } else if p.HeaderType == nil || src == nil { - return false + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetColumnName = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetColumnType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if strings.Compare(*p.HeaderType, *src) != 0 { - return false + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true -} -func (p *TStreamLoadPutRequest) Field39DeepEqual(src *string) bool { - if p.HiddenColumns == src { - return true - } else if p.HiddenColumns == nil || src == nil { - return false + if !issetColumnName { + fieldId = 1 + goto RequiredFieldNotSetError } - if strings.Compare(*p.HiddenColumns, *src) != 0 { - return false + + if !issetColumnType { + fieldId = 2 + goto RequiredFieldNotSetError } - return true + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnDefinition[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TColumnDefinition[fieldId])) } -func (p *TStreamLoadPutRequest) Field40DeepEqual(src *plannodes.TFileCompressType) bool { - if p.CompressType == src { - return true - } else if p.CompressType == nil || src == nil { - return false +func (p *TColumnDefinition) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v } - if *p.CompressType != *src { - return false + p.ColumnName = _field + return nil +} +func (p *TColumnDefinition) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTColumnType() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.ColumnType = _field + return nil } -func (p *TStreamLoadPutRequest) Field41DeepEqual(src *int64) bool { +func (p *TColumnDefinition) ReadField3(iprot thrift.TProtocol) error { - if p.FileSize == src { - return true - } else if p.FileSize == nil || src == nil { - return false - } - if *p.FileSize != *src { - return false + var _field *types.TAggregationType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TAggregationType(v) + _field = &tmp } - return true + p.AggType = _field + return nil } -func (p *TStreamLoadPutRequest) Field42DeepEqual(src *bool) bool { +func (p *TColumnDefinition) ReadField4(iprot thrift.TProtocol) error { - if p.TrimDoubleQuotes == src { - return true - } else if p.TrimDoubleQuotes == nil || src == nil { - return false - } - if *p.TrimDoubleQuotes != *src { - return false + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return true + p.DefaultValue = _field + return nil } -func (p *TStreamLoadPutRequest) Field43DeepEqual(src *int32) bool { - if p.SkipLines == src { - return true - } else if p.SkipLines == nil || src == nil { - return false - } - if *p.SkipLines != *src { - return false +func (p *TColumnDefinition) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TColumnDefinition"); err != nil { + goto WriteStructBeginError } - return true + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TStreamLoadPutRequest) Field44DeepEqual(src *bool) bool { - if p.EnableProfile == src { - return true - } else if p.EnableProfile == nil || src == nil { - return false +func (p *TColumnDefinition) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("columnName", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError } - if *p.EnableProfile != *src { - return false + if err := oprot.WriteString(p.ColumnName); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TStreamLoadPutRequest) Field45DeepEqual(src *bool) bool { - if p.PartialUpdate == src { - return true - } else if p.PartialUpdate == nil || src == nil { - return false +func (p *TColumnDefinition) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("columnType", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError } - if *p.PartialUpdate != *src { - return false + if err := p.ColumnType.Write(oprot); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TStreamLoadPutRequest) Field46DeepEqual(src []string) bool { - if len(p.TableNames) != len(src) { - return false - } - for i, v := range p.TableNames { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false +func (p *TColumnDefinition) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetAggType() { + if err = oprot.WriteFieldBegin("aggType", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.AggType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TStreamLoadPutRequest) Field47DeepEqual(src *string) bool { - if p.LoadSql == src { - return true - } else if p.LoadSql == nil || src == nil { - return false +func (p *TColumnDefinition) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultValue() { + if err = oprot.WriteFieldBegin("defaultValue", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DefaultValue); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if strings.Compare(*p.LoadSql, *src) != 0 { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TColumnDefinition) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TColumnDefinition(%+v)", *p) + } -func (p *TStreamLoadPutRequest) Field48DeepEqual(src *int64) bool { - if p.BackendId == src { +func (p *TColumnDefinition) DeepEqual(ano *TColumnDefinition) bool { + if p == ano { return true - } else if p.BackendId == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.BackendId != *src { + if !p.Field1DeepEqual(ano.ColumnName) { return false } - return true -} -func (p *TStreamLoadPutRequest) Field49DeepEqual(src *int32) bool { - - if p.Version == src { - return true - } else if p.Version == nil || src == nil { + if !p.Field2DeepEqual(ano.ColumnType) { return false } - if *p.Version != *src { + if !p.Field3DeepEqual(ano.AggType) { + return false + } + if !p.Field4DeepEqual(ano.DefaultValue) { return false } return true } -func (p *TStreamLoadPutRequest) Field50DeepEqual(src *string) bool { - if p.Label == src { - return true - } else if p.Label == nil || src == nil { - return false - } - if strings.Compare(*p.Label, *src) != 0 { +func (p *TColumnDefinition) Field1DeepEqual(src string) bool { + + if strings.Compare(p.ColumnName, src) != 0 { return false } return true } -func (p *TStreamLoadPutRequest) Field51DeepEqual(src *int8) bool { +func (p *TColumnDefinition) Field2DeepEqual(src *types.TColumnType) bool { - if p.Enclose == src { - return true - } else if p.Enclose == nil || src == nil { - return false - } - if *p.Enclose != *src { + if !p.ColumnType.DeepEqual(src) { return false } return true } -func (p *TStreamLoadPutRequest) Field52DeepEqual(src *int8) bool { +func (p *TColumnDefinition) Field3DeepEqual(src *types.TAggregationType) bool { - if p.Escape == src { + if p.AggType == src { return true - } else if p.Escape == nil || src == nil { + } else if p.AggType == nil || src == nil { return false } - if *p.Escape != *src { + if *p.AggType != *src { return false } return true } -func (p *TStreamLoadPutRequest) Field53DeepEqual(src *bool) bool { +func (p *TColumnDefinition) Field4DeepEqual(src *string) bool { - if p.MemtableOnSinkNode == src { + if p.DefaultValue == src { return true - } else if p.MemtableOnSinkNode == nil || src == nil { + } else if p.DefaultValue == nil || src == nil { return false } - if *p.MemtableOnSinkNode != *src { + if strings.Compare(*p.DefaultValue, *src) != 0 { return false } return true } -type TStreamLoadPutResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` - Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,2,optional" frugal:"2,optional,palointernalservice.TExecPlanFragmentParams" json:"params,omitempty"` - PipelineParams *palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,3,optional" frugal:"3,optional,palointernalservice.TPipelineFragmentParams" json:"pipeline_params,omitempty"` - BaseSchemaVersion *int64 `thrift:"base_schema_version,4,optional" frugal:"4,optional,i64" json:"base_schema_version,omitempty"` -} - -func NewTStreamLoadPutResult_() *TStreamLoadPutResult_ { - return &TStreamLoadPutResult_{} -} - -func (p *TStreamLoadPutResult_) InitDefault() { - *p = TStreamLoadPutResult_{} -} - -var TStreamLoadPutResult__Status_DEFAULT *status.TStatus - -func (p *TStreamLoadPutResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TStreamLoadPutResult__Status_DEFAULT - } - return p.Status -} - -var TStreamLoadPutResult__Params_DEFAULT *palointernalservice.TExecPlanFragmentParams - -func (p *TStreamLoadPutResult_) GetParams() (v *palointernalservice.TExecPlanFragmentParams) { - if !p.IsSetParams() { - return TStreamLoadPutResult__Params_DEFAULT - } - return p.Params -} - -var TStreamLoadPutResult__PipelineParams_DEFAULT *palointernalservice.TPipelineFragmentParams - -func (p *TStreamLoadPutResult_) GetPipelineParams() (v *palointernalservice.TPipelineFragmentParams) { - if !p.IsSetPipelineParams() { - return TStreamLoadPutResult__PipelineParams_DEFAULT - } - return p.PipelineParams -} - -var TStreamLoadPutResult__BaseSchemaVersion_DEFAULT int64 - -func (p *TStreamLoadPutResult_) GetBaseSchemaVersion() (v int64) { - if !p.IsSetBaseSchemaVersion() { - return TStreamLoadPutResult__BaseSchemaVersion_DEFAULT - } - return *p.BaseSchemaVersion -} -func (p *TStreamLoadPutResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TStreamLoadPutResult_) SetParams(val *palointernalservice.TExecPlanFragmentParams) { - p.Params = val -} -func (p *TStreamLoadPutResult_) SetPipelineParams(val *palointernalservice.TPipelineFragmentParams) { - p.PipelineParams = val -} -func (p *TStreamLoadPutResult_) SetBaseSchemaVersion(val *int64) { - p.BaseSchemaVersion = val +type TShowResultSetMetaData struct { + Columns []*TColumnDefinition `thrift:"columns,1,required" frugal:"1,required,list" json:"columns"` } -var fieldIDToName_TStreamLoadPutResult_ = map[int16]string{ - 1: "status", - 2: "params", - 3: "pipeline_params", - 4: "base_schema_version", +func NewTShowResultSetMetaData() *TShowResultSetMetaData { + return &TShowResultSetMetaData{} } -func (p *TStreamLoadPutResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TShowResultSetMetaData) InitDefault() { } -func (p *TStreamLoadPutResult_) IsSetParams() bool { - return p.Params != nil +func (p *TShowResultSetMetaData) GetColumns() (v []*TColumnDefinition) { + return p.Columns } - -func (p *TStreamLoadPutResult_) IsSetPipelineParams() bool { - return p.PipelineParams != nil +func (p *TShowResultSetMetaData) SetColumns(val []*TColumnDefinition) { + p.Columns = val } -func (p *TStreamLoadPutResult_) IsSetBaseSchemaVersion() bool { - return p.BaseSchemaVersion != nil +var fieldIDToName_TShowResultSetMetaData = map[int16]string{ + 1: "columns", } -func (p *TStreamLoadPutResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TShowResultSetMetaData) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false + var issetColumns bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -26421,52 +23910,19 @@ func (p *TStreamLoadPutResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetColumns = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -26475,7 +23931,7 @@ func (p *TStreamLoadPutResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetStatus { + if !issetColumns { fieldId = 1 goto RequiredFieldNotSetError } @@ -26485,7 +23941,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSetMetaData[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -26494,45 +23950,36 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutResult_[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSetMetaData[fieldId])) } -func (p *TStreamLoadPutResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TShowResultSetMetaData) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } - return nil -} + _field := make([]*TColumnDefinition, 0, size) + values := make([]TColumnDefinition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TStreamLoadPutResult_) ReadField2(iprot thrift.TProtocol) error { - p.Params = palointernalservice.NewTExecPlanFragmentParams() - if err := p.Params.Read(iprot); err != nil { - return err - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TStreamLoadPutResult_) ReadField3(iprot thrift.TProtocol) error { - p.PipelineParams = palointernalservice.NewTPipelineFragmentParams() - if err := p.PipelineParams.Read(iprot); err != nil { - return err + _field = append(_field, _elem) } - return nil -} - -func (p *TStreamLoadPutResult_) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.BaseSchemaVersion = &v } + p.Columns = _field return nil } -func (p *TStreamLoadPutResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TShowResultSetMetaData) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TStreamLoadPutResult"); err != nil { + if err = oprot.WriteStructBegin("TShowResultSetMetaData"); err != nil { goto WriteStructBeginError } if p != nil { @@ -26540,19 +23987,6 @@ func (p *TStreamLoadPutResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -26571,215 +24005,111 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TStreamLoadPutResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TShowResultSetMetaData) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("columns", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TStreamLoadPutResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetParams() { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.Params.Write(oprot); err != nil { + for _, v := range p.Columns { + if err := v.Write(oprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TStreamLoadPutResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPipelineParams() { - if err = oprot.WriteFieldBegin("pipeline_params", thrift.STRUCT, 3); err != nil { - goto WriteFieldBeginError - } - if err := p.PipelineParams.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err := oprot.WriteListEnd(); err != nil { + return err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TStreamLoadPutResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetBaseSchemaVersion() { - if err = oprot.WriteFieldBegin("base_schema_version", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.BaseSchemaVersion); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TStreamLoadPutResult_) String() string { +func (p *TShowResultSetMetaData) String() string { if p == nil { return "" } - return fmt.Sprintf("TStreamLoadPutResult_(%+v)", *p) + return fmt.Sprintf("TShowResultSetMetaData(%+v)", *p) + } -func (p *TStreamLoadPutResult_) DeepEqual(ano *TStreamLoadPutResult_) bool { +func (p *TShowResultSetMetaData) DeepEqual(ano *TShowResultSetMetaData) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Params) { - return false - } - if !p.Field3DeepEqual(ano.PipelineParams) { - return false - } - if !p.Field4DeepEqual(ano.BaseSchemaVersion) { + if !p.Field1DeepEqual(ano.Columns) { return false } return true } -func (p *TStreamLoadPutResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TShowResultSetMetaData) Field1DeepEqual(src []*TColumnDefinition) bool { - if !p.Status.DeepEqual(src) { + if len(p.Columns) != len(src) { return false } + for i, v := range p.Columns { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } return true } -func (p *TStreamLoadPutResult_) Field2DeepEqual(src *palointernalservice.TExecPlanFragmentParams) bool { - if !p.Params.DeepEqual(src) { - return false - } - return true +type TShowResultSet struct { + MetaData *TShowResultSetMetaData `thrift:"metaData,1,required" frugal:"1,required,TShowResultSetMetaData" json:"metaData"` + ResultRows [][]string `thrift:"resultRows,2,required" frugal:"2,required,list>" json:"resultRows"` } -func (p *TStreamLoadPutResult_) Field3DeepEqual(src *palointernalservice.TPipelineFragmentParams) bool { - if !p.PipelineParams.DeepEqual(src) { - return false - } - return true +func NewTShowResultSet() *TShowResultSet { + return &TShowResultSet{} } -func (p *TStreamLoadPutResult_) Field4DeepEqual(src *int64) bool { - if p.BaseSchemaVersion == src { - return true - } else if p.BaseSchemaVersion == nil || src == nil { - return false - } - if *p.BaseSchemaVersion != *src { - return false +func (p *TShowResultSet) InitDefault() { +} + +var TShowResultSet_MetaData_DEFAULT *TShowResultSetMetaData + +func (p *TShowResultSet) GetMetaData() (v *TShowResultSetMetaData) { + if !p.IsSetMetaData() { + return TShowResultSet_MetaData_DEFAULT } - return true + return p.MetaData } -type TStreamLoadMultiTablePutResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` - Params []*palointernalservice.TExecPlanFragmentParams `thrift:"params,2,optional" frugal:"2,optional,list" json:"params,omitempty"` - PipelineParams []*palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,3,optional" frugal:"3,optional,list" json:"pipeline_params,omitempty"` +func (p *TShowResultSet) GetResultRows() (v [][]string) { + return p.ResultRows +} +func (p *TShowResultSet) SetMetaData(val *TShowResultSetMetaData) { + p.MetaData = val +} +func (p *TShowResultSet) SetResultRows(val [][]string) { + p.ResultRows = val } -func NewTStreamLoadMultiTablePutResult_() *TStreamLoadMultiTablePutResult_ { - return &TStreamLoadMultiTablePutResult_{} +var fieldIDToName_TShowResultSet = map[int16]string{ + 1: "metaData", + 2: "resultRows", } -func (p *TStreamLoadMultiTablePutResult_) InitDefault() { - *p = TStreamLoadMultiTablePutResult_{} +func (p *TShowResultSet) IsSetMetaData() bool { + return p.MetaData != nil } -var TStreamLoadMultiTablePutResult__Status_DEFAULT *status.TStatus - -func (p *TStreamLoadMultiTablePutResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TStreamLoadMultiTablePutResult__Status_DEFAULT - } - return p.Status -} - -var TStreamLoadMultiTablePutResult__Params_DEFAULT []*palointernalservice.TExecPlanFragmentParams - -func (p *TStreamLoadMultiTablePutResult_) GetParams() (v []*palointernalservice.TExecPlanFragmentParams) { - if !p.IsSetParams() { - return TStreamLoadMultiTablePutResult__Params_DEFAULT - } - return p.Params -} - -var TStreamLoadMultiTablePutResult__PipelineParams_DEFAULT []*palointernalservice.TPipelineFragmentParams - -func (p *TStreamLoadMultiTablePutResult_) GetPipelineParams() (v []*palointernalservice.TPipelineFragmentParams) { - if !p.IsSetPipelineParams() { - return TStreamLoadMultiTablePutResult__PipelineParams_DEFAULT - } - return p.PipelineParams -} -func (p *TStreamLoadMultiTablePutResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TStreamLoadMultiTablePutResult_) SetParams(val []*palointernalservice.TExecPlanFragmentParams) { - p.Params = val -} -func (p *TStreamLoadMultiTablePutResult_) SetPipelineParams(val []*palointernalservice.TPipelineFragmentParams) { - p.PipelineParams = val -} - -var fieldIDToName_TStreamLoadMultiTablePutResult_ = map[int16]string{ - 1: "status", - 2: "params", - 3: "pipeline_params", -} - -func (p *TStreamLoadMultiTablePutResult_) IsSetStatus() bool { - return p.Status != nil -} - -func (p *TStreamLoadMultiTablePutResult_) IsSetParams() bool { - return p.Params != nil -} - -func (p *TStreamLoadMultiTablePutResult_) IsSetPipelineParams() bool { - return p.PipelineParams != nil -} - -func (p *TStreamLoadMultiTablePutResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TShowResultSet) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false + var issetMetaData bool = false + var issetResultRows bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -26800,38 +24130,24 @@ func (p *TStreamLoadMultiTablePutResult_) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetMetaData = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetResultRows = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -26840,17 +24156,22 @@ func (p *TStreamLoadMultiTablePutResult_) Read(iprot thrift.TProtocol) (err erro goto ReadStructEndError } - if !issetStatus { + if !issetMetaData { fieldId = 1 goto RequiredFieldNotSetError } + + if !issetResultRows { + fieldId = 2 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSet[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -26859,60 +24180,56 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSet[fieldId])) } -func (p *TStreamLoadMultiTablePutResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TShowResultSet) ReadField1(iprot thrift.TProtocol) error { + _field := NewTShowResultSetMetaData() + if err := _field.Read(iprot); err != nil { return err } + p.MetaData = _field return nil } - -func (p *TStreamLoadMultiTablePutResult_) ReadField2(iprot thrift.TProtocol) error { +func (p *TShowResultSet) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Params = make([]*palointernalservice.TExecPlanFragmentParams, 0, size) + _field := make([][]string, 0, size) for i := 0; i < size; i++ { - _elem := palointernalservice.NewTExecPlanFragmentParams() - if err := _elem.Read(iprot); err != nil { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { - p.Params = append(p.Params, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} + var _elem1 string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem1 = v + } -func (p *TStreamLoadMultiTablePutResult_) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.PipelineParams = make([]*palointernalservice.TPipelineFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := palointernalservice.NewTPipelineFragmentParams() - if err := _elem.Read(iprot); err != nil { + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { return err } - p.PipelineParams = append(p.PipelineParams, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ResultRows = _field return nil } -func (p *TStreamLoadMultiTablePutResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TShowResultSet) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TStreamLoadMultiTablePutResult"); err != nil { + if err = oprot.WriteStructBegin("TShowResultSet"); err != nil { goto WriteStructBeginError } if p != nil { @@ -26924,11 +24241,6 @@ func (p *TStreamLoadMultiTablePutResult_) Write(oprot thrift.TProtocol) (err err fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -26947,11 +24259,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TStreamLoadMultiTablePutResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TShowResultSet) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("metaData", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := p.MetaData.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -26964,25 +24276,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TStreamLoadMultiTablePutResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetParams() { - if err = oprot.WriteFieldBegin("params", thrift.LIST, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Params)); err != nil { +func (p *TShowResultSet) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("resultRows", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.LIST, len(p.ResultRows)); err != nil { + return err + } + for _, v := range p.ResultRows { + if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { return err } - for _, v := range p.Params { - if err := v.Write(oprot); err != nil { + for _, v := range v { + if err := oprot.WriteString(v); err != nil { return err } } if err := oprot.WriteListEnd(); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -26991,218 +24309,237 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TStreamLoadMultiTablePutResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPipelineParams() { - if err = oprot.WriteFieldBegin("pipeline_params", thrift.LIST, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PipelineParams)); err != nil { - return err - } - for _, v := range p.PipelineParams { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TStreamLoadMultiTablePutResult_) String() string { +func (p *TShowResultSet) String() string { if p == nil { return "" } - return fmt.Sprintf("TStreamLoadMultiTablePutResult_(%+v)", *p) + return fmt.Sprintf("TShowResultSet(%+v)", *p) + } -func (p *TStreamLoadMultiTablePutResult_) DeepEqual(ano *TStreamLoadMultiTablePutResult_) bool { +func (p *TShowResultSet) DeepEqual(ano *TShowResultSet) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.MetaData) { return false } - if !p.Field3DeepEqual(ano.PipelineParams) { + if !p.Field2DeepEqual(ano.ResultRows) { return false } return true } -func (p *TStreamLoadMultiTablePutResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TShowResultSet) Field1DeepEqual(src *TShowResultSetMetaData) bool { - if !p.Status.DeepEqual(src) { + if !p.MetaData.DeepEqual(src) { return false } return true } -func (p *TStreamLoadMultiTablePutResult_) Field2DeepEqual(src []*palointernalservice.TExecPlanFragmentParams) bool { +func (p *TShowResultSet) Field2DeepEqual(src [][]string) bool { - if len(p.Params) != len(src) { + if len(p.ResultRows) != len(src) { return false } - for i, v := range p.Params { + for i, v := range p.ResultRows { _src := src[i] - if !v.DeepEqual(_src) { + if len(v) != len(_src) { return false } + for i, v := range v { + _src1 := _src[i] + if strings.Compare(v, _src1) != 0 { + return false + } + } } return true } -func (p *TStreamLoadMultiTablePutResult_) Field3DeepEqual(src []*palointernalservice.TPipelineFragmentParams) bool { - if len(p.PipelineParams) != len(src) { - return false - } - for i, v := range p.PipelineParams { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +type TMasterOpResult_ struct { + MaxJournalId int64 `thrift:"maxJournalId,1,required" frugal:"1,required,i64" json:"maxJournalId"` + Packet []byte `thrift:"packet,2,required" frugal:"2,required,binary" json:"packet"` + ResultSet *TShowResultSet `thrift:"resultSet,3,optional" frugal:"3,optional,TShowResultSet" json:"resultSet,omitempty"` + QueryId *types.TUniqueId `thrift:"queryId,4,optional" frugal:"4,optional,types.TUniqueId" json:"queryId,omitempty"` + Status *string `thrift:"status,5,optional" frugal:"5,optional,string" json:"status,omitempty"` + StatusCode *int32 `thrift:"statusCode,6,optional" frugal:"6,optional,i32" json:"statusCode,omitempty"` + ErrMessage *string `thrift:"errMessage,7,optional" frugal:"7,optional,string" json:"errMessage,omitempty"` + QueryResultBufList [][]byte `thrift:"queryResultBufList,8,optional" frugal:"8,optional,list" json:"queryResultBufList,omitempty"` + TxnLoadInfo *TTxnLoadInfo `thrift:"txnLoadInfo,9,optional" frugal:"9,optional,TTxnLoadInfo" json:"txnLoadInfo,omitempty"` + GroupCommitLoadBeId *int64 `thrift:"groupCommitLoadBeId,10,optional" frugal:"10,optional,i64" json:"groupCommitLoadBeId,omitempty"` } -type TStreamLoadWithLoadStatusResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - TxnId *int64 `thrift:"txn_id,2,optional" frugal:"2,optional,i64" json:"txn_id,omitempty"` - TotalRows *int64 `thrift:"total_rows,3,optional" frugal:"3,optional,i64" json:"total_rows,omitempty"` - LoadedRows *int64 `thrift:"loaded_rows,4,optional" frugal:"4,optional,i64" json:"loaded_rows,omitempty"` - FilteredRows *int64 `thrift:"filtered_rows,5,optional" frugal:"5,optional,i64" json:"filtered_rows,omitempty"` - UnselectedRows *int64 `thrift:"unselected_rows,6,optional" frugal:"6,optional,i64" json:"unselected_rows,omitempty"` +func NewTMasterOpResult_() *TMasterOpResult_ { + return &TMasterOpResult_{} } -func NewTStreamLoadWithLoadStatusResult_() *TStreamLoadWithLoadStatusResult_ { - return &TStreamLoadWithLoadStatusResult_{} +func (p *TMasterOpResult_) InitDefault() { } -func (p *TStreamLoadWithLoadStatusResult_) InitDefault() { - *p = TStreamLoadWithLoadStatusResult_{} +func (p *TMasterOpResult_) GetMaxJournalId() (v int64) { + return p.MaxJournalId } -var TStreamLoadWithLoadStatusResult__Status_DEFAULT *status.TStatus +func (p *TMasterOpResult_) GetPacket() (v []byte) { + return p.Packet +} -func (p *TStreamLoadWithLoadStatusResult_) GetStatus() (v *status.TStatus) { +var TMasterOpResult__ResultSet_DEFAULT *TShowResultSet + +func (p *TMasterOpResult_) GetResultSet() (v *TShowResultSet) { + if !p.IsSetResultSet() { + return TMasterOpResult__ResultSet_DEFAULT + } + return p.ResultSet +} + +var TMasterOpResult__QueryId_DEFAULT *types.TUniqueId + +func (p *TMasterOpResult_) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TMasterOpResult__QueryId_DEFAULT + } + return p.QueryId +} + +var TMasterOpResult__Status_DEFAULT string + +func (p *TMasterOpResult_) GetStatus() (v string) { if !p.IsSetStatus() { - return TStreamLoadWithLoadStatusResult__Status_DEFAULT + return TMasterOpResult__Status_DEFAULT } - return p.Status + return *p.Status } -var TStreamLoadWithLoadStatusResult__TxnId_DEFAULT int64 +var TMasterOpResult__StatusCode_DEFAULT int32 -func (p *TStreamLoadWithLoadStatusResult_) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TStreamLoadWithLoadStatusResult__TxnId_DEFAULT +func (p *TMasterOpResult_) GetStatusCode() (v int32) { + if !p.IsSetStatusCode() { + return TMasterOpResult__StatusCode_DEFAULT } - return *p.TxnId + return *p.StatusCode } -var TStreamLoadWithLoadStatusResult__TotalRows_DEFAULT int64 +var TMasterOpResult__ErrMessage_DEFAULT string -func (p *TStreamLoadWithLoadStatusResult_) GetTotalRows() (v int64) { - if !p.IsSetTotalRows() { - return TStreamLoadWithLoadStatusResult__TotalRows_DEFAULT +func (p *TMasterOpResult_) GetErrMessage() (v string) { + if !p.IsSetErrMessage() { + return TMasterOpResult__ErrMessage_DEFAULT } - return *p.TotalRows + return *p.ErrMessage } -var TStreamLoadWithLoadStatusResult__LoadedRows_DEFAULT int64 +var TMasterOpResult__QueryResultBufList_DEFAULT [][]byte -func (p *TStreamLoadWithLoadStatusResult_) GetLoadedRows() (v int64) { - if !p.IsSetLoadedRows() { - return TStreamLoadWithLoadStatusResult__LoadedRows_DEFAULT +func (p *TMasterOpResult_) GetQueryResultBufList() (v [][]byte) { + if !p.IsSetQueryResultBufList() { + return TMasterOpResult__QueryResultBufList_DEFAULT } - return *p.LoadedRows + return p.QueryResultBufList } -var TStreamLoadWithLoadStatusResult__FilteredRows_DEFAULT int64 +var TMasterOpResult__TxnLoadInfo_DEFAULT *TTxnLoadInfo -func (p *TStreamLoadWithLoadStatusResult_) GetFilteredRows() (v int64) { - if !p.IsSetFilteredRows() { - return TStreamLoadWithLoadStatusResult__FilteredRows_DEFAULT +func (p *TMasterOpResult_) GetTxnLoadInfo() (v *TTxnLoadInfo) { + if !p.IsSetTxnLoadInfo() { + return TMasterOpResult__TxnLoadInfo_DEFAULT } - return *p.FilteredRows + return p.TxnLoadInfo } -var TStreamLoadWithLoadStatusResult__UnselectedRows_DEFAULT int64 +var TMasterOpResult__GroupCommitLoadBeId_DEFAULT int64 -func (p *TStreamLoadWithLoadStatusResult_) GetUnselectedRows() (v int64) { - if !p.IsSetUnselectedRows() { - return TStreamLoadWithLoadStatusResult__UnselectedRows_DEFAULT +func (p *TMasterOpResult_) GetGroupCommitLoadBeId() (v int64) { + if !p.IsSetGroupCommitLoadBeId() { + return TMasterOpResult__GroupCommitLoadBeId_DEFAULT } - return *p.UnselectedRows + return *p.GroupCommitLoadBeId } -func (p *TStreamLoadWithLoadStatusResult_) SetStatus(val *status.TStatus) { +func (p *TMasterOpResult_) SetMaxJournalId(val int64) { + p.MaxJournalId = val +} +func (p *TMasterOpResult_) SetPacket(val []byte) { + p.Packet = val +} +func (p *TMasterOpResult_) SetResultSet(val *TShowResultSet) { + p.ResultSet = val +} +func (p *TMasterOpResult_) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TMasterOpResult_) SetStatus(val *string) { p.Status = val } -func (p *TStreamLoadWithLoadStatusResult_) SetTxnId(val *int64) { - p.TxnId = val +func (p *TMasterOpResult_) SetStatusCode(val *int32) { + p.StatusCode = val } -func (p *TStreamLoadWithLoadStatusResult_) SetTotalRows(val *int64) { - p.TotalRows = val +func (p *TMasterOpResult_) SetErrMessage(val *string) { + p.ErrMessage = val } -func (p *TStreamLoadWithLoadStatusResult_) SetLoadedRows(val *int64) { - p.LoadedRows = val +func (p *TMasterOpResult_) SetQueryResultBufList(val [][]byte) { + p.QueryResultBufList = val } -func (p *TStreamLoadWithLoadStatusResult_) SetFilteredRows(val *int64) { - p.FilteredRows = val +func (p *TMasterOpResult_) SetTxnLoadInfo(val *TTxnLoadInfo) { + p.TxnLoadInfo = val } -func (p *TStreamLoadWithLoadStatusResult_) SetUnselectedRows(val *int64) { - p.UnselectedRows = val +func (p *TMasterOpResult_) SetGroupCommitLoadBeId(val *int64) { + p.GroupCommitLoadBeId = val } -var fieldIDToName_TStreamLoadWithLoadStatusResult_ = map[int16]string{ - 1: "status", - 2: "txn_id", - 3: "total_rows", - 4: "loaded_rows", - 5: "filtered_rows", - 6: "unselected_rows", +var fieldIDToName_TMasterOpResult_ = map[int16]string{ + 1: "maxJournalId", + 2: "packet", + 3: "resultSet", + 4: "queryId", + 5: "status", + 6: "statusCode", + 7: "errMessage", + 8: "queryResultBufList", + 9: "txnLoadInfo", + 10: "groupCommitLoadBeId", } -func (p *TStreamLoadWithLoadStatusResult_) IsSetStatus() bool { +func (p *TMasterOpResult_) IsSetResultSet() bool { + return p.ResultSet != nil +} + +func (p *TMasterOpResult_) IsSetQueryId() bool { + return p.QueryId != nil +} + +func (p *TMasterOpResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TStreamLoadWithLoadStatusResult_) IsSetTxnId() bool { - return p.TxnId != nil +func (p *TMasterOpResult_) IsSetStatusCode() bool { + return p.StatusCode != nil } -func (p *TStreamLoadWithLoadStatusResult_) IsSetTotalRows() bool { - return p.TotalRows != nil +func (p *TMasterOpResult_) IsSetErrMessage() bool { + return p.ErrMessage != nil } -func (p *TStreamLoadWithLoadStatusResult_) IsSetLoadedRows() bool { - return p.LoadedRows != nil +func (p *TMasterOpResult_) IsSetQueryResultBufList() bool { + return p.QueryResultBufList != nil } -func (p *TStreamLoadWithLoadStatusResult_) IsSetFilteredRows() bool { - return p.FilteredRows != nil +func (p *TMasterOpResult_) IsSetTxnLoadInfo() bool { + return p.TxnLoadInfo != nil } -func (p *TStreamLoadWithLoadStatusResult_) IsSetUnselectedRows() bool { - return p.UnselectedRows != nil +func (p *TMasterOpResult_) IsSetGroupCommitLoadBeId() bool { + return p.GroupCommitLoadBeId != nil } -func (p *TStreamLoadWithLoadStatusResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TMasterOpResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetMaxJournalId bool = false + var issetPacket bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -27219,71 +24556,92 @@ func (p *TStreamLoadWithLoadStatusResult_) Read(iprot thrift.TProtocol) (err err switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetMaxJournalId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPacket = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.LIST { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -27292,13 +24650,22 @@ func (p *TStreamLoadWithLoadStatusResult_) Read(iprot thrift.TProtocol) (err err goto ReadStructEndError } + if !issetMaxJournalId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetPacket { + fieldId = 2 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadWithLoadStatusResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -27306,64 +24673,127 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpResult_[fieldId])) } -func (p *TStreamLoadWithLoadStatusResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TMasterOpResult_) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = v } + p.MaxJournalId = _field return nil } +func (p *TMasterOpResult_) ReadField2(iprot thrift.TProtocol) error { -func (p *TStreamLoadWithLoadStatusResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { return err } else { - p.TxnId = &v + _field = []byte(v) + } + p.Packet = _field + return nil +} +func (p *TMasterOpResult_) ReadField3(iprot thrift.TProtocol) error { + _field := NewTShowResultSet() + if err := _field.Read(iprot); err != nil { + return err + } + p.ResultSet = _field + return nil +} +func (p *TMasterOpResult_) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err } + p.QueryId = _field return nil } +func (p *TMasterOpResult_) ReadField5(iprot thrift.TProtocol) error { -func (p *TStreamLoadWithLoadStatusResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.TotalRows = &v + _field = &v } + p.Status = _field return nil } +func (p *TMasterOpResult_) ReadField6(iprot thrift.TProtocol) error { -func (p *TStreamLoadWithLoadStatusResult_) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.LoadedRows = &v + _field = &v } + p.StatusCode = _field return nil } +func (p *TMasterOpResult_) ReadField7(iprot thrift.TProtocol) error { -func (p *TStreamLoadWithLoadStatusResult_) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.FilteredRows = &v + _field = &v } + p.ErrMessage = _field return nil } +func (p *TMasterOpResult_) ReadField8(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([][]byte, 0, size) + for i := 0; i < size; i++ { -func (p *TStreamLoadWithLoadStatusResult_) ReadField6(iprot thrift.TProtocol) error { + var _elem []byte + if v, err := iprot.ReadBinary(); err != nil { + return err + } else { + _elem = []byte(v) + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.QueryResultBufList = _field + return nil +} +func (p *TMasterOpResult_) ReadField9(iprot thrift.TProtocol) error { + _field := NewTTxnLoadInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.TxnLoadInfo = _field + return nil +} +func (p *TMasterOpResult_) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.UnselectedRows = &v + _field = &v } + p.GroupCommitLoadBeId = _field return nil } -func (p *TStreamLoadWithLoadStatusResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TMasterOpResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TStreamLoadWithLoadStatusResult"); err != nil { + if err = oprot.WriteStructBegin("TMasterOpResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -27391,7 +24821,22 @@ func (p *TStreamLoadWithLoadStatusResult_) Write(oprot thrift.TProtocol) (err er fieldId = 6 goto WriteFieldError } - + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -27410,36 +24855,32 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil +func (p *TMasterOpResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("maxJournalId", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MaxJournalId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil WriteFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TMasterOpResult_) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("packet", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.Packet)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -27448,12 +24889,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTotalRows() { - if err = oprot.WriteFieldBegin("total_rows", thrift.I64, 3); err != nil { +func (p *TMasterOpResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetResultSet() { + if err = oprot.WriteFieldBegin("resultSet", thrift.STRUCT, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TotalRows); err != nil { + if err := p.ResultSet.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -27467,12 +24908,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadedRows() { - if err = oprot.WriteFieldBegin("loaded_rows", thrift.I64, 4); err != nil { +func (p *TMasterOpResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("queryId", thrift.STRUCT, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.LoadedRows); err != nil { + if err := p.QueryId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -27486,12 +24927,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetFilteredRows() { - if err = oprot.WriteFieldBegin("filtered_rows", thrift.I64, 5); err != nil { +func (p *TMasterOpResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRING, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.FilteredRows); err != nil { + if err := oprot.WriteString(*p.Status); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -27505,12 +24946,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUnselectedRows() { - if err = oprot.WriteFieldBegin("unselected_rows", thrift.I64, 6); err != nil { +func (p *TMasterOpResult_) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetStatusCode() { + if err = oprot.WriteFieldBegin("statusCode", thrift.I32, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.UnselectedRows); err != nil { + if err := oprot.WriteI32(*p.StatusCode); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -27524,162 +24965,299 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) String() string { +func (p *TMasterOpResult_) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetErrMessage() { + if err = oprot.WriteFieldBegin("errMessage", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ErrMessage); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TMasterOpResult_) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryResultBufList() { + if err = oprot.WriteFieldBegin("queryResultBufList", thrift.LIST, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.QueryResultBufList)); err != nil { + return err + } + for _, v := range p.QueryResultBufList { + if err := oprot.WriteBinary([]byte(v)); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TMasterOpResult_) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnLoadInfo() { + if err = oprot.WriteFieldBegin("txnLoadInfo", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnLoadInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TMasterOpResult_) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitLoadBeId() { + if err = oprot.WriteFieldBegin("groupCommitLoadBeId", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.GroupCommitLoadBeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TMasterOpResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TStreamLoadWithLoadStatusResult_(%+v)", *p) + return fmt.Sprintf("TMasterOpResult_(%+v)", *p) + } -func (p *TStreamLoadWithLoadStatusResult_) DeepEqual(ano *TStreamLoadWithLoadStatusResult_) bool { +func (p *TMasterOpResult_) DeepEqual(ano *TMasterOpResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.MaxJournalId) { return false } - if !p.Field2DeepEqual(ano.TxnId) { + if !p.Field2DeepEqual(ano.Packet) { return false } - if !p.Field3DeepEqual(ano.TotalRows) { + if !p.Field3DeepEqual(ano.ResultSet) { return false } - if !p.Field4DeepEqual(ano.LoadedRows) { + if !p.Field4DeepEqual(ano.QueryId) { return false } - if !p.Field5DeepEqual(ano.FilteredRows) { + if !p.Field5DeepEqual(ano.Status) { return false } - if !p.Field6DeepEqual(ano.UnselectedRows) { + if !p.Field6DeepEqual(ano.StatusCode) { + return false + } + if !p.Field7DeepEqual(ano.ErrMessage) { + return false + } + if !p.Field8DeepEqual(ano.QueryResultBufList) { + return false + } + if !p.Field9DeepEqual(ano.TxnLoadInfo) { + return false + } + if !p.Field10DeepEqual(ano.GroupCommitLoadBeId) { return false } return true } -func (p *TStreamLoadWithLoadStatusResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TMasterOpResult_) Field1DeepEqual(src int64) bool { - if !p.Status.DeepEqual(src) { + if p.MaxJournalId != src { return false } return true } -func (p *TStreamLoadWithLoadStatusResult_) Field2DeepEqual(src *int64) bool { +func (p *TMasterOpResult_) Field2DeepEqual(src []byte) bool { - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { + if bytes.Compare(p.Packet, src) != 0 { return false } - if *p.TxnId != *src { + return true +} +func (p *TMasterOpResult_) Field3DeepEqual(src *TShowResultSet) bool { + + if !p.ResultSet.DeepEqual(src) { return false } return true } -func (p *TStreamLoadWithLoadStatusResult_) Field3DeepEqual(src *int64) bool { +func (p *TMasterOpResult_) Field4DeepEqual(src *types.TUniqueId) bool { - if p.TotalRows == src { + if !p.QueryId.DeepEqual(src) { + return false + } + return true +} +func (p *TMasterOpResult_) Field5DeepEqual(src *string) bool { + + if p.Status == src { return true - } else if p.TotalRows == nil || src == nil { + } else if p.Status == nil || src == nil { return false } - if *p.TotalRows != *src { + if strings.Compare(*p.Status, *src) != 0 { return false } return true } -func (p *TStreamLoadWithLoadStatusResult_) Field4DeepEqual(src *int64) bool { +func (p *TMasterOpResult_) Field6DeepEqual(src *int32) bool { - if p.LoadedRows == src { + if p.StatusCode == src { return true - } else if p.LoadedRows == nil || src == nil { + } else if p.StatusCode == nil || src == nil { return false } - if *p.LoadedRows != *src { + if *p.StatusCode != *src { return false } return true } -func (p *TStreamLoadWithLoadStatusResult_) Field5DeepEqual(src *int64) bool { +func (p *TMasterOpResult_) Field7DeepEqual(src *string) bool { - if p.FilteredRows == src { + if p.ErrMessage == src { return true - } else if p.FilteredRows == nil || src == nil { + } else if p.ErrMessage == nil || src == nil { return false } - if *p.FilteredRows != *src { + if strings.Compare(*p.ErrMessage, *src) != 0 { return false } return true } -func (p *TStreamLoadWithLoadStatusResult_) Field6DeepEqual(src *int64) bool { +func (p *TMasterOpResult_) Field8DeepEqual(src [][]byte) bool { - if p.UnselectedRows == src { + if len(p.QueryResultBufList) != len(src) { + return false + } + for i, v := range p.QueryResultBufList { + _src := src[i] + if bytes.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TMasterOpResult_) Field9DeepEqual(src *TTxnLoadInfo) bool { + + if !p.TxnLoadInfo.DeepEqual(src) { + return false + } + return true +} +func (p *TMasterOpResult_) Field10DeepEqual(src *int64) bool { + + if p.GroupCommitLoadBeId == src { return true - } else if p.UnselectedRows == nil || src == nil { + } else if p.GroupCommitLoadBeId == nil || src == nil { return false } - if *p.UnselectedRows != *src { + if *p.GroupCommitLoadBeId != *src { return false } return true } -type TCheckWalRequest struct { - WalId *int64 `thrift:"wal_id,1,optional" frugal:"1,optional,i64" json:"wal_id,omitempty"` - DbId *int64 `thrift:"db_id,2,optional" frugal:"2,optional,i64" json:"db_id,omitempty"` +type TUpdateExportTaskStatusRequest struct { + ProtocolVersion FrontendServiceVersion `thrift:"protocolVersion,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocolVersion"` + TaskId *types.TUniqueId `thrift:"taskId,2,required" frugal:"2,required,types.TUniqueId" json:"taskId"` + TaskStatus *palointernalservice.TExportStatusResult_ `thrift:"taskStatus,3,required" frugal:"3,required,palointernalservice.TExportStatusResult_" json:"taskStatus"` +} + +func NewTUpdateExportTaskStatusRequest() *TUpdateExportTaskStatusRequest { + return &TUpdateExportTaskStatusRequest{} } -func NewTCheckWalRequest() *TCheckWalRequest { - return &TCheckWalRequest{} +func (p *TUpdateExportTaskStatusRequest) InitDefault() { } -func (p *TCheckWalRequest) InitDefault() { - *p = TCheckWalRequest{} +func (p *TUpdateExportTaskStatusRequest) GetProtocolVersion() (v FrontendServiceVersion) { + return p.ProtocolVersion } -var TCheckWalRequest_WalId_DEFAULT int64 +var TUpdateExportTaskStatusRequest_TaskId_DEFAULT *types.TUniqueId -func (p *TCheckWalRequest) GetWalId() (v int64) { - if !p.IsSetWalId() { - return TCheckWalRequest_WalId_DEFAULT +func (p *TUpdateExportTaskStatusRequest) GetTaskId() (v *types.TUniqueId) { + if !p.IsSetTaskId() { + return TUpdateExportTaskStatusRequest_TaskId_DEFAULT } - return *p.WalId + return p.TaskId } -var TCheckWalRequest_DbId_DEFAULT int64 +var TUpdateExportTaskStatusRequest_TaskStatus_DEFAULT *palointernalservice.TExportStatusResult_ -func (p *TCheckWalRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TCheckWalRequest_DbId_DEFAULT +func (p *TUpdateExportTaskStatusRequest) GetTaskStatus() (v *palointernalservice.TExportStatusResult_) { + if !p.IsSetTaskStatus() { + return TUpdateExportTaskStatusRequest_TaskStatus_DEFAULT } - return *p.DbId + return p.TaskStatus +} +func (p *TUpdateExportTaskStatusRequest) SetProtocolVersion(val FrontendServiceVersion) { + p.ProtocolVersion = val } -func (p *TCheckWalRequest) SetWalId(val *int64) { - p.WalId = val +func (p *TUpdateExportTaskStatusRequest) SetTaskId(val *types.TUniqueId) { + p.TaskId = val } -func (p *TCheckWalRequest) SetDbId(val *int64) { - p.DbId = val +func (p *TUpdateExportTaskStatusRequest) SetTaskStatus(val *palointernalservice.TExportStatusResult_) { + p.TaskStatus = val } -var fieldIDToName_TCheckWalRequest = map[int16]string{ - 1: "wal_id", - 2: "db_id", +var fieldIDToName_TUpdateExportTaskStatusRequest = map[int16]string{ + 1: "protocolVersion", + 2: "taskId", + 3: "taskStatus", } -func (p *TCheckWalRequest) IsSetWalId() bool { - return p.WalId != nil +func (p *TUpdateExportTaskStatusRequest) IsSetTaskId() bool { + return p.TaskId != nil } -func (p *TCheckWalRequest) IsSetDbId() bool { - return p.DbId != nil +func (p *TUpdateExportTaskStatusRequest) IsSetTaskStatus() bool { + return p.TaskStatus != nil } -func (p *TCheckWalRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TUpdateExportTaskStatusRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetProtocolVersion bool = false + var issetTaskId bool = false + var issetTaskStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -27696,31 +25274,37 @@ func (p *TCheckWalRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetTaskId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + issetTaskStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -27729,13 +25313,27 @@ func (p *TCheckWalRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } + if !issetProtocolVersion { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTaskId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskStatus { + fieldId = 3 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWalRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateExportTaskStatusRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -27743,29 +25341,41 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TUpdateExportTaskStatusRequest[fieldId])) } -func (p *TCheckWalRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TUpdateExportTaskStatusRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field FrontendServiceVersion + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.WalId = &v + _field = FrontendServiceVersion(v) } + p.ProtocolVersion = _field return nil } - -func (p *TCheckWalRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TUpdateExportTaskStatusRequest) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.TaskId = _field + return nil +} +func (p *TUpdateExportTaskStatusRequest) ReadField3(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTExportStatusResult_() + if err := _field.Read(iprot); err != nil { return err - } else { - p.DbId = &v } + p.TaskStatus = _field return nil } -func (p *TCheckWalRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TUpdateExportTaskStatusRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCheckWalRequest"); err != nil { + if err = oprot.WriteStructBegin("TUpdateExportTaskStatusRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -27777,7 +25387,10 @@ func (p *TCheckWalRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -27796,17 +25409,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCheckWalRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetWalId() { - if err = oprot.WriteFieldBegin("wal_id", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.WalId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TUpdateExportTaskStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocolVersion", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -27815,17 +25426,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCheckWalRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TUpdateExportTaskStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("taskId", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.TaskId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -27834,330 +25443,318 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TCheckWalRequest) String() string { +func (p *TUpdateExportTaskStatusRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("taskStatus", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.TaskStatus.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TUpdateExportTaskStatusRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TCheckWalRequest(%+v)", *p) + return fmt.Sprintf("TUpdateExportTaskStatusRequest(%+v)", *p) + } -func (p *TCheckWalRequest) DeepEqual(ano *TCheckWalRequest) bool { +func (p *TUpdateExportTaskStatusRequest) DeepEqual(ano *TUpdateExportTaskStatusRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.WalId) { + if !p.Field1DeepEqual(ano.ProtocolVersion) { return false } - if !p.Field2DeepEqual(ano.DbId) { + if !p.Field2DeepEqual(ano.TaskId) { + return false + } + if !p.Field3DeepEqual(ano.TaskStatus) { return false } return true } -func (p *TCheckWalRequest) Field1DeepEqual(src *int64) bool { +func (p *TUpdateExportTaskStatusRequest) Field1DeepEqual(src FrontendServiceVersion) bool { - if p.WalId == src { - return true - } else if p.WalId == nil || src == nil { - return false - } - if *p.WalId != *src { + if p.ProtocolVersion != src { return false } return true } -func (p *TCheckWalRequest) Field2DeepEqual(src *int64) bool { +func (p *TUpdateExportTaskStatusRequest) Field2DeepEqual(src *types.TUniqueId) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { + if !p.TaskId.DeepEqual(src) { return false } - if *p.DbId != *src { + return true +} +func (p *TUpdateExportTaskStatusRequest) Field3DeepEqual(src *palointernalservice.TExportStatusResult_) bool { + + if !p.TaskStatus.DeepEqual(src) { return false } return true } -type TCheckWalResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - NeedRecovery *bool `thrift:"need_recovery,2,optional" frugal:"2,optional,bool" json:"need_recovery,omitempty"` +type TLoadTxnBeginRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` + Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` + Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` + Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` + UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` + Label string `thrift:"label,7,required" frugal:"7,required,string" json:"label"` + Timestamp *int64 `thrift:"timestamp,8,optional" frugal:"8,optional,i64" json:"timestamp,omitempty"` + AuthCode *int64 `thrift:"auth_code,9,optional" frugal:"9,optional,i64" json:"auth_code,omitempty"` + Timeout *int64 `thrift:"timeout,10,optional" frugal:"10,optional,i64" json:"timeout,omitempty"` + RequestId *types.TUniqueId `thrift:"request_id,11,optional" frugal:"11,optional,types.TUniqueId" json:"request_id,omitempty"` + Token *string `thrift:"token,12,optional" frugal:"12,optional,string" json:"token,omitempty"` + AuthCodeUuid *string `thrift:"auth_code_uuid,13,optional" frugal:"13,optional,string" json:"auth_code_uuid,omitempty"` + TableId *int64 `thrift:"table_id,14,optional" frugal:"14,optional,i64" json:"table_id,omitempty"` + BackendId *int64 `thrift:"backend_id,15,optional" frugal:"15,optional,i64" json:"backend_id,omitempty"` } -func NewTCheckWalResult_() *TCheckWalResult_ { - return &TCheckWalResult_{} +func NewTLoadTxnBeginRequest() *TLoadTxnBeginRequest { + return &TLoadTxnBeginRequest{} } -func (p *TCheckWalResult_) InitDefault() { - *p = TCheckWalResult_{} +func (p *TLoadTxnBeginRequest) InitDefault() { } -var TCheckWalResult__Status_DEFAULT *status.TStatus +var TLoadTxnBeginRequest_Cluster_DEFAULT string -func (p *TCheckWalResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TCheckWalResult__Status_DEFAULT +func (p *TLoadTxnBeginRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TLoadTxnBeginRequest_Cluster_DEFAULT } - return p.Status + return *p.Cluster } -var TCheckWalResult__NeedRecovery_DEFAULT bool - -func (p *TCheckWalResult_) GetNeedRecovery() (v bool) { - if !p.IsSetNeedRecovery() { - return TCheckWalResult__NeedRecovery_DEFAULT - } - return *p.NeedRecovery -} -func (p *TCheckWalResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TCheckWalResult_) SetNeedRecovery(val *bool) { - p.NeedRecovery = val +func (p *TLoadTxnBeginRequest) GetUser() (v string) { + return p.User } -var fieldIDToName_TCheckWalResult_ = map[int16]string{ - 1: "status", - 2: "need_recovery", +func (p *TLoadTxnBeginRequest) GetPasswd() (v string) { + return p.Passwd } -func (p *TCheckWalResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TLoadTxnBeginRequest) GetDb() (v string) { + return p.Db } -func (p *TCheckWalResult_) IsSetNeedRecovery() bool { - return p.NeedRecovery != nil +func (p *TLoadTxnBeginRequest) GetTbl() (v string) { + return p.Tbl } -func (p *TCheckWalResult_) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 +var TLoadTxnBeginRequest_UserIp_DEFAULT string - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TLoadTxnBeginRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TLoadTxnBeginRequest_UserIp_DEFAULT } + return *p.UserIp +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } +func (p *TLoadTxnBeginRequest) GetLabel() (v string) { + return p.Label +} - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +var TLoadTxnBeginRequest_Timestamp_DEFAULT int64 - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError +func (p *TLoadTxnBeginRequest) GetTimestamp() (v int64) { + if !p.IsSetTimestamp() { + return TLoadTxnBeginRequest_Timestamp_DEFAULT } + return *p.Timestamp +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWalResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +var TLoadTxnBeginRequest_AuthCode_DEFAULT int64 -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +func (p *TLoadTxnBeginRequest) GetAuthCode() (v int64) { + if !p.IsSetAuthCode() { + return TLoadTxnBeginRequest_AuthCode_DEFAULT + } + return *p.AuthCode } -func (p *TCheckWalResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err +var TLoadTxnBeginRequest_Timeout_DEFAULT int64 + +func (p *TLoadTxnBeginRequest) GetTimeout() (v int64) { + if !p.IsSetTimeout() { + return TLoadTxnBeginRequest_Timeout_DEFAULT } - return nil + return *p.Timeout } -func (p *TCheckWalResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.NeedRecovery = &v +var TLoadTxnBeginRequest_RequestId_DEFAULT *types.TUniqueId + +func (p *TLoadTxnBeginRequest) GetRequestId() (v *types.TUniqueId) { + if !p.IsSetRequestId() { + return TLoadTxnBeginRequest_RequestId_DEFAULT } - return nil + return p.RequestId } -func (p *TCheckWalResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TCheckWalResult"); err != nil { - goto WriteStructBeginError +var TLoadTxnBeginRequest_Token_DEFAULT string + +func (p *TLoadTxnBeginRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TLoadTxnBeginRequest_Token_DEFAULT } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } + return *p.Token +} +var TLoadTxnBeginRequest_AuthCodeUuid_DEFAULT string + +func (p *TLoadTxnBeginRequest) GetAuthCodeUuid() (v string) { + if !p.IsSetAuthCodeUuid() { + return TLoadTxnBeginRequest_AuthCodeUuid_DEFAULT } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return *p.AuthCodeUuid } -func (p *TCheckWalResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TLoadTxnBeginRequest_TableId_DEFAULT int64 + +func (p *TLoadTxnBeginRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TLoadTxnBeginRequest_TableId_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return *p.TableId } -func (p *TCheckWalResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetNeedRecovery() { - if err = oprot.WriteFieldBegin("need_recovery", thrift.BOOL, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.NeedRecovery); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TLoadTxnBeginRequest_BackendId_DEFAULT int64 + +func (p *TLoadTxnBeginRequest) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TLoadTxnBeginRequest_BackendId_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return *p.BackendId +} +func (p *TLoadTxnBeginRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TLoadTxnBeginRequest) SetUser(val string) { + p.User = val +} +func (p *TLoadTxnBeginRequest) SetPasswd(val string) { + p.Passwd = val +} +func (p *TLoadTxnBeginRequest) SetDb(val string) { + p.Db = val +} +func (p *TLoadTxnBeginRequest) SetTbl(val string) { + p.Tbl = val +} +func (p *TLoadTxnBeginRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TLoadTxnBeginRequest) SetLabel(val string) { + p.Label = val +} +func (p *TLoadTxnBeginRequest) SetTimestamp(val *int64) { + p.Timestamp = val +} +func (p *TLoadTxnBeginRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TLoadTxnBeginRequest) SetTimeout(val *int64) { + p.Timeout = val +} +func (p *TLoadTxnBeginRequest) SetRequestId(val *types.TUniqueId) { + p.RequestId = val +} +func (p *TLoadTxnBeginRequest) SetToken(val *string) { + p.Token = val +} +func (p *TLoadTxnBeginRequest) SetAuthCodeUuid(val *string) { + p.AuthCodeUuid = val +} +func (p *TLoadTxnBeginRequest) SetTableId(val *int64) { + p.TableId = val +} +func (p *TLoadTxnBeginRequest) SetBackendId(val *int64) { + p.BackendId = val } -func (p *TCheckWalResult_) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TCheckWalResult_(%+v)", *p) +var fieldIDToName_TLoadTxnBeginRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "tbl", + 6: "user_ip", + 7: "label", + 8: "timestamp", + 9: "auth_code", + 10: "timeout", + 11: "request_id", + 12: "token", + 13: "auth_code_uuid", + 14: "table_id", + 15: "backend_id", } -func (p *TCheckWalResult_) DeepEqual(ano *TCheckWalResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.NeedRecovery) { - return false - } - return true +func (p *TLoadTxnBeginRequest) IsSetCluster() bool { + return p.Cluster != nil } -func (p *TCheckWalResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TLoadTxnBeginRequest) IsSetUserIp() bool { + return p.UserIp != nil +} - if !p.Status.DeepEqual(src) { - return false - } - return true +func (p *TLoadTxnBeginRequest) IsSetTimestamp() bool { + return p.Timestamp != nil } -func (p *TCheckWalResult_) Field2DeepEqual(src *bool) bool { - if p.NeedRecovery == src { - return true - } else if p.NeedRecovery == nil || src == nil { - return false - } - if *p.NeedRecovery != *src { - return false - } - return true +func (p *TLoadTxnBeginRequest) IsSetAuthCode() bool { + return p.AuthCode != nil } -type TKafkaRLTaskProgress struct { - PartitionCmtOffset map[int32]int64 `thrift:"partitionCmtOffset,1,required" frugal:"1,required,map" json:"partitionCmtOffset"` +func (p *TLoadTxnBeginRequest) IsSetTimeout() bool { + return p.Timeout != nil } -func NewTKafkaRLTaskProgress() *TKafkaRLTaskProgress { - return &TKafkaRLTaskProgress{} +func (p *TLoadTxnBeginRequest) IsSetRequestId() bool { + return p.RequestId != nil } -func (p *TKafkaRLTaskProgress) InitDefault() { - *p = TKafkaRLTaskProgress{} +func (p *TLoadTxnBeginRequest) IsSetToken() bool { + return p.Token != nil } -func (p *TKafkaRLTaskProgress) GetPartitionCmtOffset() (v map[int32]int64) { - return p.PartitionCmtOffset +func (p *TLoadTxnBeginRequest) IsSetAuthCodeUuid() bool { + return p.AuthCodeUuid != nil } -func (p *TKafkaRLTaskProgress) SetPartitionCmtOffset(val map[int32]int64) { - p.PartitionCmtOffset = val + +func (p *TLoadTxnBeginRequest) IsSetTableId() bool { + return p.TableId != nil } -var fieldIDToName_TKafkaRLTaskProgress = map[int16]string{ - 1: "partitionCmtOffset", +func (p *TLoadTxnBeginRequest) IsSetBackendId() bool { + return p.BackendId != nil } -func (p *TKafkaRLTaskProgress) Read(iprot thrift.TProtocol) (err error) { +func (p *TLoadTxnBeginRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetPartitionCmtOffset bool = false + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetLabel bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -28174,22 +25771,135 @@ func (p *TKafkaRLTaskProgress) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetPartitionCmtOffset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPasswd = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetDb = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetTbl = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + issetLabel = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRING { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I64 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -28198,8 +25908,28 @@ func (p *TKafkaRLTaskProgress) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetPartitionCmtOffset { - fieldId = 1 + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetLabel { + fieldId = 7 goto RequiredFieldNotSetError } return nil @@ -28208,7 +25938,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TKafkaRLTaskProgress[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -28217,605 +25947,175 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TKafkaRLTaskProgress[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginRequest[fieldId])) } -func (p *TKafkaRLTaskProgress) ReadField1(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.PartitionCmtOffset = make(map[int32]int64, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - var _val int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _val = v - } +func (p *TLoadTxnBeginRequest) ReadField1(iprot thrift.TProtocol) error { - p.PartitionCmtOffset[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Cluster = _field return nil } +func (p *TLoadTxnBeginRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TKafkaRLTaskProgress) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TKafkaRLTaskProgress"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v } + p.User = _field return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } +func (p *TLoadTxnBeginRequest) ReadField3(iprot thrift.TProtocol) error { -func (p *TKafkaRLTaskProgress) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("partitionCmtOffset", thrift.MAP, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I64, len(p.PartitionCmtOffset)); err != nil { + var _field string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = v } - for k, v := range p.PartitionCmtOffset { - - if err := oprot.WriteI32(k); err != nil { - return err - } + p.Passwd = _field + return nil +} +func (p *TLoadTxnBeginRequest) ReadField4(iprot thrift.TProtocol) error { - if err := oprot.WriteI64(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + var _field string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.Db = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } +func (p *TLoadTxnBeginRequest) ReadField5(iprot thrift.TProtocol) error { -func (p *TKafkaRLTaskProgress) String() string { - if p == nil { - return "" + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v } - return fmt.Sprintf("TKafkaRLTaskProgress(%+v)", *p) + p.Tbl = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField6(iprot thrift.TProtocol) error { -func (p *TKafkaRLTaskProgress) DeepEqual(ano *TKafkaRLTaskProgress) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.PartitionCmtOffset) { - return false + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return true + p.UserIp = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField7(iprot thrift.TProtocol) error { -func (p *TKafkaRLTaskProgress) Field1DeepEqual(src map[int32]int64) bool { - - if len(p.PartitionCmtOffset) != len(src) { - return false - } - for k, v := range p.PartitionCmtOffset { - _src := src[k] - if v != _src { - return false - } + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v } - return true + p.Label = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField8(iprot thrift.TProtocol) error { -type TRLTaskTxnCommitAttachment struct { - LoadSourceType types.TLoadSourceType `thrift:"loadSourceType,1,required" frugal:"1,required,TLoadSourceType" json:"loadSourceType"` - Id *types.TUniqueId `thrift:"id,2,required" frugal:"2,required,types.TUniqueId" json:"id"` - JobId int64 `thrift:"jobId,3,required" frugal:"3,required,i64" json:"jobId"` - LoadedRows *int64 `thrift:"loadedRows,4,optional" frugal:"4,optional,i64" json:"loadedRows,omitempty"` - FilteredRows *int64 `thrift:"filteredRows,5,optional" frugal:"5,optional,i64" json:"filteredRows,omitempty"` - UnselectedRows *int64 `thrift:"unselectedRows,6,optional" frugal:"6,optional,i64" json:"unselectedRows,omitempty"` - ReceivedBytes *int64 `thrift:"receivedBytes,7,optional" frugal:"7,optional,i64" json:"receivedBytes,omitempty"` - LoadedBytes *int64 `thrift:"loadedBytes,8,optional" frugal:"8,optional,i64" json:"loadedBytes,omitempty"` - LoadCostMs *int64 `thrift:"loadCostMs,9,optional" frugal:"9,optional,i64" json:"loadCostMs,omitempty"` - KafkaRLTaskProgress *TKafkaRLTaskProgress `thrift:"kafkaRLTaskProgress,10,optional" frugal:"10,optional,TKafkaRLTaskProgress" json:"kafkaRLTaskProgress,omitempty"` - ErrorLogUrl *string `thrift:"errorLogUrl,11,optional" frugal:"11,optional,string" json:"errorLogUrl,omitempty"` + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Timestamp = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField9(iprot thrift.TProtocol) error { -func NewTRLTaskTxnCommitAttachment() *TRLTaskTxnCommitAttachment { - return &TRLTaskTxnCommitAttachment{} + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField10(iprot thrift.TProtocol) error { -func (p *TRLTaskTxnCommitAttachment) InitDefault() { - *p = TRLTaskTxnCommitAttachment{} + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Timeout = _field + return nil } - -func (p *TRLTaskTxnCommitAttachment) GetLoadSourceType() (v types.TLoadSourceType) { - return p.LoadSourceType +func (p *TLoadTxnBeginRequest) ReadField11(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.RequestId = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField12(iprot thrift.TProtocol) error { -var TRLTaskTxnCommitAttachment_Id_DEFAULT *types.TUniqueId - -func (p *TRLTaskTxnCommitAttachment) GetId() (v *types.TUniqueId) { - if !p.IsSetId() { - return TRLTaskTxnCommitAttachment_Id_DEFAULT + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return p.Id + p.Token = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField13(iprot thrift.TProtocol) error { -func (p *TRLTaskTxnCommitAttachment) GetJobId() (v int64) { - return p.JobId + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AuthCodeUuid = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField14(iprot thrift.TProtocol) error { -var TRLTaskTxnCommitAttachment_LoadedRows_DEFAULT int64 - -func (p *TRLTaskTxnCommitAttachment) GetLoadedRows() (v int64) { - if !p.IsSetLoadedRows() { - return TRLTaskTxnCommitAttachment_LoadedRows_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return *p.LoadedRows + p.TableId = _field + return nil } +func (p *TLoadTxnBeginRequest) ReadField15(iprot thrift.TProtocol) error { -var TRLTaskTxnCommitAttachment_FilteredRows_DEFAULT int64 - -func (p *TRLTaskTxnCommitAttachment) GetFilteredRows() (v int64) { - if !p.IsSetFilteredRows() { - return TRLTaskTxnCommitAttachment_FilteredRows_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return *p.FilteredRows + p.BackendId = _field + return nil } -var TRLTaskTxnCommitAttachment_UnselectedRows_DEFAULT int64 - -func (p *TRLTaskTxnCommitAttachment) GetUnselectedRows() (v int64) { - if !p.IsSetUnselectedRows() { - return TRLTaskTxnCommitAttachment_UnselectedRows_DEFAULT - } - return *p.UnselectedRows -} - -var TRLTaskTxnCommitAttachment_ReceivedBytes_DEFAULT int64 - -func (p *TRLTaskTxnCommitAttachment) GetReceivedBytes() (v int64) { - if !p.IsSetReceivedBytes() { - return TRLTaskTxnCommitAttachment_ReceivedBytes_DEFAULT - } - return *p.ReceivedBytes -} - -var TRLTaskTxnCommitAttachment_LoadedBytes_DEFAULT int64 - -func (p *TRLTaskTxnCommitAttachment) GetLoadedBytes() (v int64) { - if !p.IsSetLoadedBytes() { - return TRLTaskTxnCommitAttachment_LoadedBytes_DEFAULT - } - return *p.LoadedBytes -} - -var TRLTaskTxnCommitAttachment_LoadCostMs_DEFAULT int64 - -func (p *TRLTaskTxnCommitAttachment) GetLoadCostMs() (v int64) { - if !p.IsSetLoadCostMs() { - return TRLTaskTxnCommitAttachment_LoadCostMs_DEFAULT - } - return *p.LoadCostMs -} - -var TRLTaskTxnCommitAttachment_KafkaRLTaskProgress_DEFAULT *TKafkaRLTaskProgress - -func (p *TRLTaskTxnCommitAttachment) GetKafkaRLTaskProgress() (v *TKafkaRLTaskProgress) { - if !p.IsSetKafkaRLTaskProgress() { - return TRLTaskTxnCommitAttachment_KafkaRLTaskProgress_DEFAULT - } - return p.KafkaRLTaskProgress -} - -var TRLTaskTxnCommitAttachment_ErrorLogUrl_DEFAULT string - -func (p *TRLTaskTxnCommitAttachment) GetErrorLogUrl() (v string) { - if !p.IsSetErrorLogUrl() { - return TRLTaskTxnCommitAttachment_ErrorLogUrl_DEFAULT - } - return *p.ErrorLogUrl -} -func (p *TRLTaskTxnCommitAttachment) SetLoadSourceType(val types.TLoadSourceType) { - p.LoadSourceType = val -} -func (p *TRLTaskTxnCommitAttachment) SetId(val *types.TUniqueId) { - p.Id = val -} -func (p *TRLTaskTxnCommitAttachment) SetJobId(val int64) { - p.JobId = val -} -func (p *TRLTaskTxnCommitAttachment) SetLoadedRows(val *int64) { - p.LoadedRows = val -} -func (p *TRLTaskTxnCommitAttachment) SetFilteredRows(val *int64) { - p.FilteredRows = val -} -func (p *TRLTaskTxnCommitAttachment) SetUnselectedRows(val *int64) { - p.UnselectedRows = val -} -func (p *TRLTaskTxnCommitAttachment) SetReceivedBytes(val *int64) { - p.ReceivedBytes = val -} -func (p *TRLTaskTxnCommitAttachment) SetLoadedBytes(val *int64) { - p.LoadedBytes = val -} -func (p *TRLTaskTxnCommitAttachment) SetLoadCostMs(val *int64) { - p.LoadCostMs = val -} -func (p *TRLTaskTxnCommitAttachment) SetKafkaRLTaskProgress(val *TKafkaRLTaskProgress) { - p.KafkaRLTaskProgress = val -} -func (p *TRLTaskTxnCommitAttachment) SetErrorLogUrl(val *string) { - p.ErrorLogUrl = val -} - -var fieldIDToName_TRLTaskTxnCommitAttachment = map[int16]string{ - 1: "loadSourceType", - 2: "id", - 3: "jobId", - 4: "loadedRows", - 5: "filteredRows", - 6: "unselectedRows", - 7: "receivedBytes", - 8: "loadedBytes", - 9: "loadCostMs", - 10: "kafkaRLTaskProgress", - 11: "errorLogUrl", -} - -func (p *TRLTaskTxnCommitAttachment) IsSetId() bool { - return p.Id != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetLoadedRows() bool { - return p.LoadedRows != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetFilteredRows() bool { - return p.FilteredRows != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetUnselectedRows() bool { - return p.UnselectedRows != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetReceivedBytes() bool { - return p.ReceivedBytes != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetLoadedBytes() bool { - return p.LoadedBytes != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetLoadCostMs() bool { - return p.LoadCostMs != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetKafkaRLTaskProgress() bool { - return p.KafkaRLTaskProgress != nil -} - -func (p *TRLTaskTxnCommitAttachment) IsSetErrorLogUrl() bool { - return p.ErrorLogUrl != nil -} - -func (p *TRLTaskTxnCommitAttachment) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetLoadSourceType bool = false - var issetId bool = false - var issetJobId bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetLoadSourceType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - issetJobId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - if !issetLoadSourceType { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetId { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetJobId { - fieldId = 3 - goto RequiredFieldNotSetError - } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRLTaskTxnCommitAttachment[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRLTaskTxnCommitAttachment[fieldId])) -} - -func (p *TRLTaskTxnCommitAttachment) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.LoadSourceType = types.TLoadSourceType(v) - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField2(iprot thrift.TProtocol) error { - p.Id = types.NewTUniqueId() - if err := p.Id.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.JobId = v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadedRows = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.FilteredRows = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.UnselectedRows = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ReceivedBytes = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadedBytes = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadCostMs = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField10(iprot thrift.TProtocol) error { - p.KafkaRLTaskProgress = NewTKafkaRLTaskProgress() - if err := p.KafkaRLTaskProgress.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.ErrorLogUrl = &v - } - return nil -} - -func (p *TRLTaskTxnCommitAttachment) Write(oprot thrift.TProtocol) (err error) { +func (p *TLoadTxnBeginRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRLTaskTxnCommitAttachment"); err != nil { + if err = oprot.WriteStructBegin("TLoadTxnBeginRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -28863,7 +26163,22 @@ func (p *TRLTaskTxnCommitAttachment) Write(oprot thrift.TProtocol) (err error) { fieldId = 11 goto WriteFieldError } - + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -28882,15 +26197,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("loadSourceType", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.LoadSourceType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TLoadTxnBeginRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -28899,11 +26216,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 2); err != nil { +func (p *TLoadTxnBeginRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := p.Id.Write(oprot); err != nil { + if err := oprot.WriteString(p.User); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -28916,11 +26233,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("jobId", thrift.I64, 3); err != nil { +func (p *TLoadTxnBeginRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.JobId); err != nil { + if err := oprot.WriteString(p.Passwd); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -28933,17 +26250,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadedRows() { - if err = oprot.WriteFieldBegin("loadedRows", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LoadedRows); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TLoadTxnBeginRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -28952,17 +26267,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetFilteredRows() { - if err = oprot.WriteFieldBegin("filteredRows", thrift.I64, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.FilteredRows); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TLoadTxnBeginRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -28971,12 +26284,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUnselectedRows() { - if err = oprot.WriteFieldBegin("unselectedRows", thrift.I64, 6); err != nil { +func (p *TLoadTxnBeginRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.UnselectedRows); err != nil { + if err := oprot.WriteString(*p.UserIp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -28990,17 +26303,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetReceivedBytes() { - if err = oprot.WriteFieldBegin("receivedBytes", thrift.I64, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.ReceivedBytes); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TLoadTxnBeginRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -29009,12 +26320,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadedBytes() { - if err = oprot.WriteFieldBegin("loadedBytes", thrift.I64, 8); err != nil { +func (p *TLoadTxnBeginRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err = oprot.WriteFieldBegin("timestamp", thrift.I64, 8); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.LoadedBytes); err != nil { + if err := oprot.WriteI64(*p.Timestamp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -29028,12 +26339,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadCostMs() { - if err = oprot.WriteFieldBegin("loadCostMs", thrift.I64, 9); err != nil { - goto WriteFieldBeginError +func (p *TLoadTxnBeginRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 9); err != nil { + goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.LoadCostMs); err != nil { + if err := oprot.WriteI64(*p.AuthCode); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -29047,12 +26358,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetKafkaRLTaskProgress() { - if err = oprot.WriteFieldBegin("kafkaRLTaskProgress", thrift.STRUCT, 10); err != nil { +func (p *TLoadTxnBeginRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeout() { + if err = oprot.WriteFieldBegin("timeout", thrift.I64, 10); err != nil { goto WriteFieldBeginError } - if err := p.KafkaRLTaskProgress.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.Timeout); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -29066,12 +26377,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetErrorLogUrl() { - if err = oprot.WriteFieldBegin("errorLogUrl", thrift.STRING, 11); err != nil { +func (p *TLoadTxnBeginRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetRequestId() { + if err = oprot.WriteFieldBegin("request_id", thrift.STRUCT, 11); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.ErrorLogUrl); err != nil { + if err := p.RequestId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -29085,214 +26396,385 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TRLTaskTxnCommitAttachment) String() string { +func (p *TLoadTxnBeginRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TLoadTxnBeginRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCodeUuid() { + if err = oprot.WriteFieldBegin("auth_code_uuid", thrift.STRING, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AuthCodeUuid); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TLoadTxnBeginRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TLoadTxnBeginRequest) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TLoadTxnBeginRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TRLTaskTxnCommitAttachment(%+v)", *p) + return fmt.Sprintf("TLoadTxnBeginRequest(%+v)", *p) + } -func (p *TRLTaskTxnCommitAttachment) DeepEqual(ano *TRLTaskTxnCommitAttachment) bool { +func (p *TLoadTxnBeginRequest) DeepEqual(ano *TLoadTxnBeginRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.LoadSourceType) { + if !p.Field1DeepEqual(ano.Cluster) { return false } - if !p.Field2DeepEqual(ano.Id) { + if !p.Field2DeepEqual(ano.User) { return false } - if !p.Field3DeepEqual(ano.JobId) { + if !p.Field3DeepEqual(ano.Passwd) { return false } - if !p.Field4DeepEqual(ano.LoadedRows) { + if !p.Field4DeepEqual(ano.Db) { return false } - if !p.Field5DeepEqual(ano.FilteredRows) { + if !p.Field5DeepEqual(ano.Tbl) { return false } - if !p.Field6DeepEqual(ano.UnselectedRows) { + if !p.Field6DeepEqual(ano.UserIp) { return false } - if !p.Field7DeepEqual(ano.ReceivedBytes) { + if !p.Field7DeepEqual(ano.Label) { return false } - if !p.Field8DeepEqual(ano.LoadedBytes) { + if !p.Field8DeepEqual(ano.Timestamp) { return false } - if !p.Field9DeepEqual(ano.LoadCostMs) { + if !p.Field9DeepEqual(ano.AuthCode) { return false } - if !p.Field10DeepEqual(ano.KafkaRLTaskProgress) { + if !p.Field10DeepEqual(ano.Timeout) { return false } - if !p.Field11DeepEqual(ano.ErrorLogUrl) { + if !p.Field11DeepEqual(ano.RequestId) { + return false + } + if !p.Field12DeepEqual(ano.Token) { + return false + } + if !p.Field13DeepEqual(ano.AuthCodeUuid) { + return false + } + if !p.Field14DeepEqual(ano.TableId) { + return false + } + if !p.Field15DeepEqual(ano.BackendId) { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field1DeepEqual(src types.TLoadSourceType) bool { +func (p *TLoadTxnBeginRequest) Field1DeepEqual(src *string) bool { - if p.LoadSourceType != src { + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field2DeepEqual(src *types.TUniqueId) bool { +func (p *TLoadTxnBeginRequest) Field2DeepEqual(src string) bool { - if !p.Id.DeepEqual(src) { + if strings.Compare(p.User, src) != 0 { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field3DeepEqual(src int64) bool { +func (p *TLoadTxnBeginRequest) Field3DeepEqual(src string) bool { - if p.JobId != src { + if strings.Compare(p.Passwd, src) != 0 { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field4DeepEqual(src *int64) bool { +func (p *TLoadTxnBeginRequest) Field4DeepEqual(src string) bool { - if p.LoadedRows == src { + if strings.Compare(p.Db, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnBeginRequest) Field5DeepEqual(src string) bool { + + if strings.Compare(p.Tbl, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnBeginRequest) Field6DeepEqual(src *string) bool { + + if p.UserIp == src { return true - } else if p.LoadedRows == nil || src == nil { + } else if p.UserIp == nil || src == nil { return false } - if *p.LoadedRows != *src { + if strings.Compare(*p.UserIp, *src) != 0 { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field5DeepEqual(src *int64) bool { +func (p *TLoadTxnBeginRequest) Field7DeepEqual(src string) bool { - if p.FilteredRows == src { + if strings.Compare(p.Label, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnBeginRequest) Field8DeepEqual(src *int64) bool { + + if p.Timestamp == src { return true - } else if p.FilteredRows == nil || src == nil { + } else if p.Timestamp == nil || src == nil { return false } - if *p.FilteredRows != *src { + if *p.Timestamp != *src { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field6DeepEqual(src *int64) bool { +func (p *TLoadTxnBeginRequest) Field9DeepEqual(src *int64) bool { - if p.UnselectedRows == src { + if p.AuthCode == src { return true - } else if p.UnselectedRows == nil || src == nil { + } else if p.AuthCode == nil || src == nil { return false } - if *p.UnselectedRows != *src { + if *p.AuthCode != *src { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field7DeepEqual(src *int64) bool { +func (p *TLoadTxnBeginRequest) Field10DeepEqual(src *int64) bool { - if p.ReceivedBytes == src { + if p.Timeout == src { return true - } else if p.ReceivedBytes == nil || src == nil { + } else if p.Timeout == nil || src == nil { return false } - if *p.ReceivedBytes != *src { + if *p.Timeout != *src { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field8DeepEqual(src *int64) bool { +func (p *TLoadTxnBeginRequest) Field11DeepEqual(src *types.TUniqueId) bool { - if p.LoadedBytes == src { + if !p.RequestId.DeepEqual(src) { + return false + } + return true +} +func (p *TLoadTxnBeginRequest) Field12DeepEqual(src *string) bool { + + if p.Token == src { return true - } else if p.LoadedBytes == nil || src == nil { + } else if p.Token == nil || src == nil { return false } - if *p.LoadedBytes != *src { + if strings.Compare(*p.Token, *src) != 0 { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field9DeepEqual(src *int64) bool { +func (p *TLoadTxnBeginRequest) Field13DeepEqual(src *string) bool { - if p.LoadCostMs == src { + if p.AuthCodeUuid == src { return true - } else if p.LoadCostMs == nil || src == nil { + } else if p.AuthCodeUuid == nil || src == nil { return false } - if *p.LoadCostMs != *src { + if strings.Compare(*p.AuthCodeUuid, *src) != 0 { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field10DeepEqual(src *TKafkaRLTaskProgress) bool { +func (p *TLoadTxnBeginRequest) Field14DeepEqual(src *int64) bool { - if !p.KafkaRLTaskProgress.DeepEqual(src) { + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { return false } return true } -func (p *TRLTaskTxnCommitAttachment) Field11DeepEqual(src *string) bool { +func (p *TLoadTxnBeginRequest) Field15DeepEqual(src *int64) bool { - if p.ErrorLogUrl == src { + if p.BackendId == src { return true - } else if p.ErrorLogUrl == nil || src == nil { + } else if p.BackendId == nil || src == nil { return false } - if strings.Compare(*p.ErrorLogUrl, *src) != 0 { + if *p.BackendId != *src { return false } return true } -type TTxnCommitAttachment struct { - LoadType types.TLoadType `thrift:"loadType,1,required" frugal:"1,required,TLoadType" json:"loadType"` - RlTaskTxnCommitAttachment *TRLTaskTxnCommitAttachment `thrift:"rlTaskTxnCommitAttachment,2,optional" frugal:"2,optional,TRLTaskTxnCommitAttachment" json:"rlTaskTxnCommitAttachment,omitempty"` +type TLoadTxnBeginResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + TxnId *int64 `thrift:"txnId,2,optional" frugal:"2,optional,i64" json:"txnId,omitempty"` + JobStatus *string `thrift:"job_status,3,optional" frugal:"3,optional,string" json:"job_status,omitempty"` + DbId *int64 `thrift:"db_id,4,optional" frugal:"4,optional,i64" json:"db_id,omitempty"` } -func NewTTxnCommitAttachment() *TTxnCommitAttachment { - return &TTxnCommitAttachment{} +func NewTLoadTxnBeginResult_() *TLoadTxnBeginResult_ { + return &TLoadTxnBeginResult_{} } -func (p *TTxnCommitAttachment) InitDefault() { - *p = TTxnCommitAttachment{} +func (p *TLoadTxnBeginResult_) InitDefault() { } -func (p *TTxnCommitAttachment) GetLoadType() (v types.TLoadType) { - return p.LoadType +var TLoadTxnBeginResult__Status_DEFAULT *status.TStatus + +func (p *TLoadTxnBeginResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TLoadTxnBeginResult__Status_DEFAULT + } + return p.Status } -var TTxnCommitAttachment_RlTaskTxnCommitAttachment_DEFAULT *TRLTaskTxnCommitAttachment +var TLoadTxnBeginResult__TxnId_DEFAULT int64 -func (p *TTxnCommitAttachment) GetRlTaskTxnCommitAttachment() (v *TRLTaskTxnCommitAttachment) { - if !p.IsSetRlTaskTxnCommitAttachment() { - return TTxnCommitAttachment_RlTaskTxnCommitAttachment_DEFAULT +func (p *TLoadTxnBeginResult_) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TLoadTxnBeginResult__TxnId_DEFAULT } - return p.RlTaskTxnCommitAttachment + return *p.TxnId } -func (p *TTxnCommitAttachment) SetLoadType(val types.TLoadType) { - p.LoadType = val + +var TLoadTxnBeginResult__JobStatus_DEFAULT string + +func (p *TLoadTxnBeginResult_) GetJobStatus() (v string) { + if !p.IsSetJobStatus() { + return TLoadTxnBeginResult__JobStatus_DEFAULT + } + return *p.JobStatus } -func (p *TTxnCommitAttachment) SetRlTaskTxnCommitAttachment(val *TRLTaskTxnCommitAttachment) { - p.RlTaskTxnCommitAttachment = val + +var TLoadTxnBeginResult__DbId_DEFAULT int64 + +func (p *TLoadTxnBeginResult_) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TLoadTxnBeginResult__DbId_DEFAULT + } + return *p.DbId +} +func (p *TLoadTxnBeginResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TLoadTxnBeginResult_) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TLoadTxnBeginResult_) SetJobStatus(val *string) { + p.JobStatus = val +} +func (p *TLoadTxnBeginResult_) SetDbId(val *int64) { + p.DbId = val } -var fieldIDToName_TTxnCommitAttachment = map[int16]string{ - 1: "loadType", - 2: "rlTaskTxnCommitAttachment", +var fieldIDToName_TLoadTxnBeginResult_ = map[int16]string{ + 1: "status", + 2: "txnId", + 3: "job_status", + 4: "db_id", } -func (p *TTxnCommitAttachment) IsSetRlTaskTxnCommitAttachment() bool { - return p.RlTaskTxnCommitAttachment != nil +func (p *TLoadTxnBeginResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TTxnCommitAttachment) Read(iprot thrift.TProtocol) (err error) { +func (p *TLoadTxnBeginResult_) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TLoadTxnBeginResult_) IsSetJobStatus() bool { + return p.JobStatus != nil +} + +func (p *TLoadTxnBeginResult_) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TLoadTxnBeginResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetLoadType bool = false + var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -29309,32 +26791,43 @@ func (p *TTxnCommitAttachment) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetLoadType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -29343,7 +26836,7 @@ func (p *TTxnCommitAttachment) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetLoadType { + if !issetStatus { fieldId = 1 goto RequiredFieldNotSetError } @@ -29353,7 +26846,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnCommitAttachment[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -29362,29 +26855,54 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTxnCommitAttachment[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginResult_[fieldId])) } -func (p *TTxnCommitAttachment) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TLoadTxnBeginResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TLoadTxnBeginResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.LoadType = types.TLoadType(v) + _field = &v } + p.TxnId = _field return nil } +func (p *TLoadTxnBeginResult_) ReadField3(iprot thrift.TProtocol) error { -func (p *TTxnCommitAttachment) ReadField2(iprot thrift.TProtocol) error { - p.RlTaskTxnCommitAttachment = NewTRLTaskTxnCommitAttachment() - if err := p.RlTaskTxnCommitAttachment.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.JobStatus = _field return nil } +func (p *TLoadTxnBeginResult_) ReadField4(iprot thrift.TProtocol) error { -func (p *TTxnCommitAttachment) Write(oprot thrift.TProtocol) (err error) { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} + +func (p *TLoadTxnBeginResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTxnCommitAttachment"); err != nil { + if err = oprot.WriteStructBegin("TLoadTxnBeginResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -29396,7 +26914,14 @@ func (p *TTxnCommitAttachment) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -29415,11 +26940,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTxnCommitAttachment) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("loadType", thrift.I32, 1); err != nil { +func (p *TLoadTxnBeginResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(p.LoadType)); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -29432,12 +26957,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTxnCommitAttachment) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetRlTaskTxnCommitAttachment() { - if err = oprot.WriteFieldBegin("rlTaskTxnCommitAttachment", thrift.STRUCT, 2); err != nil { +func (p *TLoadTxnBeginResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := p.RlTaskTxnCommitAttachment.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.TxnId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -29451,301 +26976,372 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTxnCommitAttachment) String() string { +func (p *TLoadTxnBeginResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetJobStatus() { + if err = oprot.WriteFieldBegin("job_status", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.JobStatus); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TLoadTxnBeginResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TLoadTxnBeginResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TTxnCommitAttachment(%+v)", *p) + return fmt.Sprintf("TLoadTxnBeginResult_(%+v)", *p) + } -func (p *TTxnCommitAttachment) DeepEqual(ano *TTxnCommitAttachment) bool { +func (p *TLoadTxnBeginResult_) DeepEqual(ano *TLoadTxnBeginResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.LoadType) { + if !p.Field1DeepEqual(ano.Status) { return false } - if !p.Field2DeepEqual(ano.RlTaskTxnCommitAttachment) { + if !p.Field2DeepEqual(ano.TxnId) { + return false + } + if !p.Field3DeepEqual(ano.JobStatus) { + return false + } + if !p.Field4DeepEqual(ano.DbId) { return false } return true } -func (p *TTxnCommitAttachment) Field1DeepEqual(src types.TLoadType) bool { +func (p *TLoadTxnBeginResult_) Field1DeepEqual(src *status.TStatus) bool { - if p.LoadType != src { + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TTxnCommitAttachment) Field2DeepEqual(src *TRLTaskTxnCommitAttachment) bool { +func (p *TLoadTxnBeginResult_) Field2DeepEqual(src *int64) bool { - if !p.RlTaskTxnCommitAttachment.DeepEqual(src) { + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { return false } return true } +func (p *TLoadTxnBeginResult_) Field3DeepEqual(src *string) bool { -type TLoadTxnCommitRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` - Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` - Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` - Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` - UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` - TxnId int64 `thrift:"txnId,7,required" frugal:"7,required,i64" json:"txnId"` - Sync bool `thrift:"sync,8,required" frugal:"8,required,bool" json:"sync"` - CommitInfos []*types.TTabletCommitInfo `thrift:"commitInfos,9,optional" frugal:"9,optional,list" json:"commitInfos,omitempty"` - AuthCode *int64 `thrift:"auth_code,10,optional" frugal:"10,optional,i64" json:"auth_code,omitempty"` - TxnCommitAttachment *TTxnCommitAttachment `thrift:"txnCommitAttachment,11,optional" frugal:"11,optional,TTxnCommitAttachment" json:"txnCommitAttachment,omitempty"` - ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,12,optional" frugal:"12,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` - Token *string `thrift:"token,13,optional" frugal:"13,optional,string" json:"token,omitempty"` - DbId *int64 `thrift:"db_id,14,optional" frugal:"14,optional,i64" json:"db_id,omitempty"` - Tbls []string `thrift:"tbls,15,optional" frugal:"15,optional,list" json:"tbls,omitempty"` - TableId *int64 `thrift:"table_id,16,optional" frugal:"16,optional,i64" json:"table_id,omitempty"` + if p.JobStatus == src { + return true + } else if p.JobStatus == nil || src == nil { + return false + } + if strings.Compare(*p.JobStatus, *src) != 0 { + return false + } + return true } +func (p *TLoadTxnBeginResult_) Field4DeepEqual(src *int64) bool { -func NewTLoadTxnCommitRequest() *TLoadTxnCommitRequest { - return &TLoadTxnCommitRequest{} + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true } -func (p *TLoadTxnCommitRequest) InitDefault() { - *p = TLoadTxnCommitRequest{} +type TBeginTxnRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + TableIds []int64 `thrift:"table_ids,5,optional" frugal:"5,optional,list" json:"table_ids,omitempty"` + UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` + Label *string `thrift:"label,7,optional" frugal:"7,optional,string" json:"label,omitempty"` + AuthCode *int64 `thrift:"auth_code,8,optional" frugal:"8,optional,i64" json:"auth_code,omitempty"` + Timeout *int64 `thrift:"timeout,9,optional" frugal:"9,optional,i64" json:"timeout,omitempty"` + RequestId *types.TUniqueId `thrift:"request_id,10,optional" frugal:"10,optional,types.TUniqueId" json:"request_id,omitempty"` + Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` + BackendId *int64 `thrift:"backend_id,12,optional" frugal:"12,optional,i64" json:"backend_id,omitempty"` + SubTxnNum int64 `thrift:"sub_txn_num,13,optional" frugal:"13,optional,i64" json:"sub_txn_num,omitempty"` } -var TLoadTxnCommitRequest_Cluster_DEFAULT string +func NewTBeginTxnRequest() *TBeginTxnRequest { + return &TBeginTxnRequest{ -func (p *TLoadTxnCommitRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TLoadTxnCommitRequest_Cluster_DEFAULT + SubTxnNum: 0, } - return *p.Cluster } -func (p *TLoadTxnCommitRequest) GetUser() (v string) { - return p.User +func (p *TBeginTxnRequest) InitDefault() { + p.SubTxnNum = 0 } -func (p *TLoadTxnCommitRequest) GetPasswd() (v string) { - return p.Passwd +var TBeginTxnRequest_Cluster_DEFAULT string + +func (p *TBeginTxnRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TBeginTxnRequest_Cluster_DEFAULT + } + return *p.Cluster } -func (p *TLoadTxnCommitRequest) GetDb() (v string) { - return p.Db +var TBeginTxnRequest_User_DEFAULT string + +func (p *TBeginTxnRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TBeginTxnRequest_User_DEFAULT + } + return *p.User } -func (p *TLoadTxnCommitRequest) GetTbl() (v string) { - return p.Tbl +var TBeginTxnRequest_Passwd_DEFAULT string + +func (p *TBeginTxnRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TBeginTxnRequest_Passwd_DEFAULT + } + return *p.Passwd } -var TLoadTxnCommitRequest_UserIp_DEFAULT string +var TBeginTxnRequest_Db_DEFAULT string -func (p *TLoadTxnCommitRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TLoadTxnCommitRequest_UserIp_DEFAULT +func (p *TBeginTxnRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TBeginTxnRequest_Db_DEFAULT } - return *p.UserIp + return *p.Db } -func (p *TLoadTxnCommitRequest) GetTxnId() (v int64) { - return p.TxnId +var TBeginTxnRequest_TableIds_DEFAULT []int64 + +func (p *TBeginTxnRequest) GetTableIds() (v []int64) { + if !p.IsSetTableIds() { + return TBeginTxnRequest_TableIds_DEFAULT + } + return p.TableIds } -func (p *TLoadTxnCommitRequest) GetSync() (v bool) { - return p.Sync +var TBeginTxnRequest_UserIp_DEFAULT string + +func (p *TBeginTxnRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TBeginTxnRequest_UserIp_DEFAULT + } + return *p.UserIp } -var TLoadTxnCommitRequest_CommitInfos_DEFAULT []*types.TTabletCommitInfo +var TBeginTxnRequest_Label_DEFAULT string -func (p *TLoadTxnCommitRequest) GetCommitInfos() (v []*types.TTabletCommitInfo) { - if !p.IsSetCommitInfos() { - return TLoadTxnCommitRequest_CommitInfos_DEFAULT +func (p *TBeginTxnRequest) GetLabel() (v string) { + if !p.IsSetLabel() { + return TBeginTxnRequest_Label_DEFAULT } - return p.CommitInfos + return *p.Label } -var TLoadTxnCommitRequest_AuthCode_DEFAULT int64 +var TBeginTxnRequest_AuthCode_DEFAULT int64 -func (p *TLoadTxnCommitRequest) GetAuthCode() (v int64) { +func (p *TBeginTxnRequest) GetAuthCode() (v int64) { if !p.IsSetAuthCode() { - return TLoadTxnCommitRequest_AuthCode_DEFAULT + return TBeginTxnRequest_AuthCode_DEFAULT } return *p.AuthCode } -var TLoadTxnCommitRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment +var TBeginTxnRequest_Timeout_DEFAULT int64 -func (p *TLoadTxnCommitRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { - if !p.IsSetTxnCommitAttachment() { - return TLoadTxnCommitRequest_TxnCommitAttachment_DEFAULT +func (p *TBeginTxnRequest) GetTimeout() (v int64) { + if !p.IsSetTimeout() { + return TBeginTxnRequest_Timeout_DEFAULT } - return p.TxnCommitAttachment + return *p.Timeout } -var TLoadTxnCommitRequest_ThriftRpcTimeoutMs_DEFAULT int64 +var TBeginTxnRequest_RequestId_DEFAULT *types.TUniqueId -func (p *TLoadTxnCommitRequest) GetThriftRpcTimeoutMs() (v int64) { - if !p.IsSetThriftRpcTimeoutMs() { - return TLoadTxnCommitRequest_ThriftRpcTimeoutMs_DEFAULT +func (p *TBeginTxnRequest) GetRequestId() (v *types.TUniqueId) { + if !p.IsSetRequestId() { + return TBeginTxnRequest_RequestId_DEFAULT } - return *p.ThriftRpcTimeoutMs + return p.RequestId } -var TLoadTxnCommitRequest_Token_DEFAULT string +var TBeginTxnRequest_Token_DEFAULT string -func (p *TLoadTxnCommitRequest) GetToken() (v string) { +func (p *TBeginTxnRequest) GetToken() (v string) { if !p.IsSetToken() { - return TLoadTxnCommitRequest_Token_DEFAULT + return TBeginTxnRequest_Token_DEFAULT } return *p.Token } -var TLoadTxnCommitRequest_DbId_DEFAULT int64 - -func (p *TLoadTxnCommitRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TLoadTxnCommitRequest_DbId_DEFAULT - } - return *p.DbId -} - -var TLoadTxnCommitRequest_Tbls_DEFAULT []string +var TBeginTxnRequest_BackendId_DEFAULT int64 -func (p *TLoadTxnCommitRequest) GetTbls() (v []string) { - if !p.IsSetTbls() { - return TLoadTxnCommitRequest_Tbls_DEFAULT +func (p *TBeginTxnRequest) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TBeginTxnRequest_BackendId_DEFAULT } - return p.Tbls + return *p.BackendId } -var TLoadTxnCommitRequest_TableId_DEFAULT int64 +var TBeginTxnRequest_SubTxnNum_DEFAULT int64 = 0 -func (p *TLoadTxnCommitRequest) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TLoadTxnCommitRequest_TableId_DEFAULT +func (p *TBeginTxnRequest) GetSubTxnNum() (v int64) { + if !p.IsSetSubTxnNum() { + return TBeginTxnRequest_SubTxnNum_DEFAULT } - return *p.TableId + return p.SubTxnNum } -func (p *TLoadTxnCommitRequest) SetCluster(val *string) { +func (p *TBeginTxnRequest) SetCluster(val *string) { p.Cluster = val } -func (p *TLoadTxnCommitRequest) SetUser(val string) { +func (p *TBeginTxnRequest) SetUser(val *string) { p.User = val } -func (p *TLoadTxnCommitRequest) SetPasswd(val string) { +func (p *TBeginTxnRequest) SetPasswd(val *string) { p.Passwd = val } -func (p *TLoadTxnCommitRequest) SetDb(val string) { +func (p *TBeginTxnRequest) SetDb(val *string) { p.Db = val } -func (p *TLoadTxnCommitRequest) SetTbl(val string) { - p.Tbl = val +func (p *TBeginTxnRequest) SetTableIds(val []int64) { + p.TableIds = val } -func (p *TLoadTxnCommitRequest) SetUserIp(val *string) { +func (p *TBeginTxnRequest) SetUserIp(val *string) { p.UserIp = val } -func (p *TLoadTxnCommitRequest) SetTxnId(val int64) { - p.TxnId = val -} -func (p *TLoadTxnCommitRequest) SetSync(val bool) { - p.Sync = val -} -func (p *TLoadTxnCommitRequest) SetCommitInfos(val []*types.TTabletCommitInfo) { - p.CommitInfos = val +func (p *TBeginTxnRequest) SetLabel(val *string) { + p.Label = val } -func (p *TLoadTxnCommitRequest) SetAuthCode(val *int64) { +func (p *TBeginTxnRequest) SetAuthCode(val *int64) { p.AuthCode = val } -func (p *TLoadTxnCommitRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { - p.TxnCommitAttachment = val +func (p *TBeginTxnRequest) SetTimeout(val *int64) { + p.Timeout = val } -func (p *TLoadTxnCommitRequest) SetThriftRpcTimeoutMs(val *int64) { - p.ThriftRpcTimeoutMs = val +func (p *TBeginTxnRequest) SetRequestId(val *types.TUniqueId) { + p.RequestId = val } -func (p *TLoadTxnCommitRequest) SetToken(val *string) { +func (p *TBeginTxnRequest) SetToken(val *string) { p.Token = val } -func (p *TLoadTxnCommitRequest) SetDbId(val *int64) { - p.DbId = val -} -func (p *TLoadTxnCommitRequest) SetTbls(val []string) { - p.Tbls = val +func (p *TBeginTxnRequest) SetBackendId(val *int64) { + p.BackendId = val } -func (p *TLoadTxnCommitRequest) SetTableId(val *int64) { - p.TableId = val +func (p *TBeginTxnRequest) SetSubTxnNum(val int64) { + p.SubTxnNum = val } -var fieldIDToName_TLoadTxnCommitRequest = map[int16]string{ +var fieldIDToName_TBeginTxnRequest = map[int16]string{ 1: "cluster", 2: "user", 3: "passwd", 4: "db", - 5: "tbl", + 5: "table_ids", 6: "user_ip", - 7: "txnId", - 8: "sync", - 9: "commitInfos", - 10: "auth_code", - 11: "txnCommitAttachment", - 12: "thrift_rpc_timeout_ms", - 13: "token", - 14: "db_id", - 15: "tbls", - 16: "table_id", + 7: "label", + 8: "auth_code", + 9: "timeout", + 10: "request_id", + 11: "token", + 12: "backend_id", + 13: "sub_txn_num", } -func (p *TLoadTxnCommitRequest) IsSetCluster() bool { +func (p *TBeginTxnRequest) IsSetCluster() bool { return p.Cluster != nil } -func (p *TLoadTxnCommitRequest) IsSetUserIp() bool { +func (p *TBeginTxnRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TBeginTxnRequest) IsSetPasswd() bool { + return p.Passwd != nil +} + +func (p *TBeginTxnRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TBeginTxnRequest) IsSetTableIds() bool { + return p.TableIds != nil +} + +func (p *TBeginTxnRequest) IsSetUserIp() bool { return p.UserIp != nil } -func (p *TLoadTxnCommitRequest) IsSetCommitInfos() bool { - return p.CommitInfos != nil +func (p *TBeginTxnRequest) IsSetLabel() bool { + return p.Label != nil } -func (p *TLoadTxnCommitRequest) IsSetAuthCode() bool { +func (p *TBeginTxnRequest) IsSetAuthCode() bool { return p.AuthCode != nil } -func (p *TLoadTxnCommitRequest) IsSetTxnCommitAttachment() bool { - return p.TxnCommitAttachment != nil +func (p *TBeginTxnRequest) IsSetTimeout() bool { + return p.Timeout != nil } -func (p *TLoadTxnCommitRequest) IsSetThriftRpcTimeoutMs() bool { - return p.ThriftRpcTimeoutMs != nil +func (p *TBeginTxnRequest) IsSetRequestId() bool { + return p.RequestId != nil } -func (p *TLoadTxnCommitRequest) IsSetToken() bool { +func (p *TBeginTxnRequest) IsSetToken() bool { return p.Token != nil } -func (p *TLoadTxnCommitRequest) IsSetDbId() bool { - return p.DbId != nil -} - -func (p *TLoadTxnCommitRequest) IsSetTbls() bool { - return p.Tbls != nil +func (p *TBeginTxnRequest) IsSetBackendId() bool { + return p.BackendId != nil } -func (p *TLoadTxnCommitRequest) IsSetTableId() bool { - return p.TableId != nil +func (p *TBeginTxnRequest) IsSetSubTxnNum() bool { + return p.SubTxnNum != TBeginTxnRequest_SubTxnNum_DEFAULT } -func (p *TLoadTxnCommitRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TBeginTxnRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetTxnId bool = false - var issetSync bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -29766,173 +27362,110 @@ func (p *TLoadTxnCommitRequest) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - issetTbl = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - issetTxnId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - issetSync = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: - if fieldTypeId == thrift.STRING { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.I64 { - if err = p.ReadField14(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.LIST { - if err = p.ReadField15(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 16: if fieldTypeId == thrift.I64 { - if err = p.ReadField16(iprot); err != nil { + if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -29941,42 +27474,13 @@ func (p *TLoadTxnCommitRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 7 - goto RequiredFieldNotSetError - } - - if !issetSync { - fieldId = 8 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -29984,180 +27488,164 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitRequest[fieldId])) -} - -func (p *TLoadTxnCommitRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil } -func (p *TLoadTxnCommitRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = v - } - return nil -} +func (p *TBeginTxnRequest) ReadField1(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = &v } + p.Cluster = _field return nil } +func (p *TBeginTxnRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField4(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = &v } + p.User = _field return nil } +func (p *TBeginTxnRequest) ReadField3(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField5(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Tbl = v + _field = &v } + p.Passwd = _field return nil } +func (p *TBeginTxnRequest) ReadField4(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField6(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v - } - return nil -} - -func (p *TLoadTxnCommitRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = v - } - return nil -} - -func (p *TLoadTxnCommitRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.Sync = v + _field = &v } + p.Db = _field return nil } - -func (p *TLoadTxnCommitRequest) ReadField9(iprot thrift.TProtocol) error { +func (p *TBeginTxnRequest) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { - _elem := types.NewTTabletCommitInfo() - if err := _elem.Read(iprot); err != nil { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _elem = v } - p.CommitInfos = append(p.CommitInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TableIds = _field return nil } +func (p *TBeginTxnRequest) ReadField6(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.AuthCode = &v + _field = &v } + p.UserIp = _field return nil } +func (p *TBeginTxnRequest) ReadField7(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField11(iprot thrift.TProtocol) error { - p.TxnCommitAttachment = NewTTxnCommitAttachment() - if err := p.TxnCommitAttachment.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Label = _field return nil } +func (p *TBeginTxnRequest) ReadField8(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField12(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ThriftRpcTimeoutMs = &v + _field = &v } + p.AuthCode = _field return nil } +func (p *TBeginTxnRequest) ReadField9(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Token = &v + _field = &v } + p.Timeout = _field return nil } - -func (p *TLoadTxnCommitRequest) ReadField14(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TBeginTxnRequest) ReadField10(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err - } else { - p.DbId = &v } + p.RequestId = _field return nil } +func (p *TBeginTxnRequest) ReadField11(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField15(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } - p.Tbls = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } + p.Token = _field + return nil +} +func (p *TBeginTxnRequest) ReadField12(iprot thrift.TProtocol) error { - p.Tbls = append(p.Tbls, _elem) - } - if err := iprot.ReadListEnd(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.BackendId = _field return nil } +func (p *TBeginTxnRequest) ReadField13(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitRequest) ReadField16(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = &v + _field = v } + p.SubTxnNum = _field return nil } -func (p *TLoadTxnCommitRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TBeginTxnRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxnCommitRequest"); err != nil { + if err = oprot.WriteStructBegin("TBeginTxnRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -30213,19 +27701,6 @@ func (p *TLoadTxnCommitRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 13 goto WriteFieldError } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError - } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError - } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -30244,7 +27719,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TBeginTxnRequest) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetCluster() { if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { goto WriteFieldBeginError @@ -30263,15 +27738,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TBeginTxnRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -30280,32 +27757,36 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +func (p *TBeginTxnRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TBeginTxnRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -30314,15 +27795,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Tbl); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TBeginTxnRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTableIds() { + if err = oprot.WriteFieldBegin("table_ids", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.TableIds)); err != nil { + return err + } + for _, v := range p.TableIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -30331,7 +27822,7 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField6(oprot thrift.TProtocol) (err error) { +func (p *TBeginTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { if p.IsSetUserIp() { if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { goto WriteFieldBeginError @@ -30350,54 +27841,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("txnId", thrift.I64, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TLoadTxnCommitRequest) writeField8(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("sync", thrift.BOOL, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.Sync); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TLoadTxnCommitRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetCommitInfos() { - if err = oprot.WriteFieldBegin("commitInfos", thrift.LIST, 9); err != nil { +func (p *TBeginTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 7); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommitInfos)); err != nil { - return err - } - for _, v := range p.CommitInfos { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.Label); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -30406,14 +27855,14 @@ func (p *TLoadTxnCommitRequest) writeField9(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField10(oprot thrift.TProtocol) (err error) { +func (p *TBeginTxnRequest) writeField8(oprot thrift.TProtocol) (err error) { if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 10); err != nil { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 8); err != nil { goto WriteFieldBeginError } if err := oprot.WriteI64(*p.AuthCode); err != nil { @@ -30425,17 +27874,17 @@ func (p *TLoadTxnCommitRequest) writeField10(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnCommitAttachment() { - if err = oprot.WriteFieldBegin("txnCommitAttachment", thrift.STRUCT, 11); err != nil { +func (p *TBeginTxnRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeout() { + if err = oprot.WriteFieldBegin("timeout", thrift.I64, 9); err != nil { goto WriteFieldBeginError } - if err := p.TxnCommitAttachment.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.Timeout); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -30444,17 +27893,17 @@ func (p *TLoadTxnCommitRequest) writeField11(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetThriftRpcTimeoutMs() { - if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 12); err != nil { +func (p *TBeginTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetRequestId() { + if err = oprot.WriteFieldBegin("request_id", thrift.STRUCT, 10); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + if err := p.RequestId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -30463,14 +27912,14 @@ func (p *TLoadTxnCommitRequest) writeField12(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField13(oprot thrift.TProtocol) (err error) { +func (p *TBeginTxnRequest) writeField11(oprot thrift.TProtocol) (err error) { if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 13); err != nil { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { goto WriteFieldBeginError } if err := oprot.WriteString(*p.Token); err != nil { @@ -30482,44 +27931,17 @@ func (p *TLoadTxnCommitRequest) writeField13(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) -} - -func (p *TLoadTxnCommitRequest) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 14); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetTbls() { - if err = oprot.WriteFieldBegin("tbls", thrift.LIST, 15); err != nil { +func (p *TBeginTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 12); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Tbls)); err != nil { - return err - } - for _, v := range p.Tbls { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteI64(*p.BackendId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -30528,17 +27950,17 @@ func (p *TLoadTxnCommitRequest) writeField15(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("table_id", thrift.I64, 16); err != nil { +func (p *TBeginTxnRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnNum() { + if err = oprot.WriteFieldBegin("sub_txn_num", thrift.I64, 13); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TableId); err != nil { + if err := oprot.WriteI64(p.SubTxnNum); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -30547,19 +27969,20 @@ func (p *TLoadTxnCommitRequest) writeField16(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -func (p *TLoadTxnCommitRequest) String() string { +func (p *TBeginTxnRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TLoadTxnCommitRequest(%+v)", *p) + return fmt.Sprintf("TBeginTxnRequest(%+v)", *p) + } -func (p *TLoadTxnCommitRequest) DeepEqual(ano *TLoadTxnCommitRequest) bool { +func (p *TBeginTxnRequest) DeepEqual(ano *TBeginTxnRequest) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -30577,46 +28000,37 @@ func (p *TLoadTxnCommitRequest) DeepEqual(ano *TLoadTxnCommitRequest) bool { if !p.Field4DeepEqual(ano.Db) { return false } - if !p.Field5DeepEqual(ano.Tbl) { + if !p.Field5DeepEqual(ano.TableIds) { return false } if !p.Field6DeepEqual(ano.UserIp) { return false } - if !p.Field7DeepEqual(ano.TxnId) { - return false - } - if !p.Field8DeepEqual(ano.Sync) { - return false - } - if !p.Field9DeepEqual(ano.CommitInfos) { - return false - } - if !p.Field10DeepEqual(ano.AuthCode) { + if !p.Field7DeepEqual(ano.Label) { return false } - if !p.Field11DeepEqual(ano.TxnCommitAttachment) { + if !p.Field8DeepEqual(ano.AuthCode) { return false } - if !p.Field12DeepEqual(ano.ThriftRpcTimeoutMs) { + if !p.Field9DeepEqual(ano.Timeout) { return false } - if !p.Field13DeepEqual(ano.Token) { + if !p.Field10DeepEqual(ano.RequestId) { return false } - if !p.Field14DeepEqual(ano.DbId) { + if !p.Field11DeepEqual(ano.Token) { return false } - if !p.Field15DeepEqual(ano.Tbls) { + if !p.Field12DeepEqual(ano.BackendId) { return false } - if !p.Field16DeepEqual(ano.TableId) { + if !p.Field13DeepEqual(ano.SubTxnNum) { return false } return true } -func (p *TLoadTxnCommitRequest) Field1DeepEqual(src *string) bool { +func (p *TBeginTxnRequest) Field1DeepEqual(src *string) bool { if p.Cluster == src { return true @@ -30628,74 +28042,80 @@ func (p *TLoadTxnCommitRequest) Field1DeepEqual(src *string) bool { } return true } -func (p *TLoadTxnCommitRequest) Field2DeepEqual(src string) bool { +func (p *TBeginTxnRequest) Field2DeepEqual(src *string) bool { - if strings.Compare(p.User, src) != 0 { + if p.User == src { + return true + } else if p.User == nil || src == nil { return false } - return true -} -func (p *TLoadTxnCommitRequest) Field3DeepEqual(src string) bool { - - if strings.Compare(p.Passwd, src) != 0 { + if strings.Compare(*p.User, *src) != 0 { return false } return true } -func (p *TLoadTxnCommitRequest) Field4DeepEqual(src string) bool { +func (p *TBeginTxnRequest) Field3DeepEqual(src *string) bool { - if strings.Compare(p.Db, src) != 0 { + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { return false } - return true -} -func (p *TLoadTxnCommitRequest) Field5DeepEqual(src string) bool { - - if strings.Compare(p.Tbl, src) != 0 { + if strings.Compare(*p.Passwd, *src) != 0 { return false } return true } -func (p *TLoadTxnCommitRequest) Field6DeepEqual(src *string) bool { +func (p *TBeginTxnRequest) Field4DeepEqual(src *string) bool { - if p.UserIp == src { + if p.Db == src { return true - } else if p.UserIp == nil || src == nil { + } else if p.Db == nil || src == nil { return false } - if strings.Compare(*p.UserIp, *src) != 0 { + if strings.Compare(*p.Db, *src) != 0 { return false } return true } -func (p *TLoadTxnCommitRequest) Field7DeepEqual(src int64) bool { +func (p *TBeginTxnRequest) Field5DeepEqual(src []int64) bool { - if p.TxnId != src { + if len(p.TableIds) != len(src) { return false } + for i, v := range p.TableIds { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TLoadTxnCommitRequest) Field8DeepEqual(src bool) bool { +func (p *TBeginTxnRequest) Field6DeepEqual(src *string) bool { - if p.Sync != src { + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { return false } return true } -func (p *TLoadTxnCommitRequest) Field9DeepEqual(src []*types.TTabletCommitInfo) bool { +func (p *TBeginTxnRequest) Field7DeepEqual(src *string) bool { - if len(p.CommitInfos) != len(src) { + if p.Label == src { + return true + } else if p.Label == nil || src == nil { return false } - for i, v := range p.CommitInfos { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if strings.Compare(*p.Label, *src) != 0 { + return false } return true } -func (p *TLoadTxnCommitRequest) Field10DeepEqual(src *int64) bool { +func (p *TBeginTxnRequest) Field8DeepEqual(src *int64) bool { if p.AuthCode == src { return true @@ -30707,26 +28127,26 @@ func (p *TLoadTxnCommitRequest) Field10DeepEqual(src *int64) bool { } return true } -func (p *TLoadTxnCommitRequest) Field11DeepEqual(src *TTxnCommitAttachment) bool { +func (p *TBeginTxnRequest) Field9DeepEqual(src *int64) bool { - if !p.TxnCommitAttachment.DeepEqual(src) { + if p.Timeout == src { + return true + } else if p.Timeout == nil || src == nil { + return false + } + if *p.Timeout != *src { return false } return true } -func (p *TLoadTxnCommitRequest) Field12DeepEqual(src *int64) bool { +func (p *TBeginTxnRequest) Field10DeepEqual(src *types.TUniqueId) bool { - if p.ThriftRpcTimeoutMs == src { - return true - } else if p.ThriftRpcTimeoutMs == nil || src == nil { - return false - } - if *p.ThriftRpcTimeoutMs != *src { + if !p.RequestId.DeepEqual(src) { return false } return true } -func (p *TLoadTxnCommitRequest) Field13DeepEqual(src *string) bool { +func (p *TBeginTxnRequest) Field11DeepEqual(src *string) bool { if p.Token == src { return true @@ -30738,81 +28158,151 @@ func (p *TLoadTxnCommitRequest) Field13DeepEqual(src *string) bool { } return true } -func (p *TLoadTxnCommitRequest) Field14DeepEqual(src *int64) bool { +func (p *TBeginTxnRequest) Field12DeepEqual(src *int64) bool { - if p.DbId == src { + if p.BackendId == src { return true - } else if p.DbId == nil || src == nil { + } else if p.BackendId == nil || src == nil { return false } - if *p.DbId != *src { + if *p.BackendId != *src { return false } return true } -func (p *TLoadTxnCommitRequest) Field15DeepEqual(src []string) bool { +func (p *TBeginTxnRequest) Field13DeepEqual(src int64) bool { - if len(p.Tbls) != len(src) { + if p.SubTxnNum != src { return false } - for i, v := range p.Tbls { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } return true } -func (p *TLoadTxnCommitRequest) Field16DeepEqual(src *int64) bool { - if p.TableId == src { - return true - } else if p.TableId == nil || src == nil { - return false +type TBeginTxnResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + TxnId *int64 `thrift:"txn_id,2,optional" frugal:"2,optional,i64" json:"txn_id,omitempty"` + JobStatus *string `thrift:"job_status,3,optional" frugal:"3,optional,string" json:"job_status,omitempty"` + DbId *int64 `thrift:"db_id,4,optional" frugal:"4,optional,i64" json:"db_id,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,5,optional" frugal:"5,optional,types.TNetworkAddress" json:"master_address,omitempty"` + SubTxnIds []int64 `thrift:"sub_txn_ids,6,optional" frugal:"6,optional,list" json:"sub_txn_ids,omitempty"` +} + +func NewTBeginTxnResult_() *TBeginTxnResult_ { + return &TBeginTxnResult_{} +} + +func (p *TBeginTxnResult_) InitDefault() { +} + +var TBeginTxnResult__Status_DEFAULT *status.TStatus + +func (p *TBeginTxnResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TBeginTxnResult__Status_DEFAULT } - if *p.TableId != *src { - return false + return p.Status +} + +var TBeginTxnResult__TxnId_DEFAULT int64 + +func (p *TBeginTxnResult_) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TBeginTxnResult__TxnId_DEFAULT } - return true + return *p.TxnId } -type TLoadTxnCommitResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +var TBeginTxnResult__JobStatus_DEFAULT string + +func (p *TBeginTxnResult_) GetJobStatus() (v string) { + if !p.IsSetJobStatus() { + return TBeginTxnResult__JobStatus_DEFAULT + } + return *p.JobStatus } -func NewTLoadTxnCommitResult_() *TLoadTxnCommitResult_ { - return &TLoadTxnCommitResult_{} +var TBeginTxnResult__DbId_DEFAULT int64 + +func (p *TBeginTxnResult_) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TBeginTxnResult__DbId_DEFAULT + } + return *p.DbId } -func (p *TLoadTxnCommitResult_) InitDefault() { - *p = TLoadTxnCommitResult_{} +var TBeginTxnResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TBeginTxnResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TBeginTxnResult__MasterAddress_DEFAULT + } + return p.MasterAddress } -var TLoadTxnCommitResult__Status_DEFAULT *status.TStatus +var TBeginTxnResult__SubTxnIds_DEFAULT []int64 -func (p *TLoadTxnCommitResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TLoadTxnCommitResult__Status_DEFAULT +func (p *TBeginTxnResult_) GetSubTxnIds() (v []int64) { + if !p.IsSetSubTxnIds() { + return TBeginTxnResult__SubTxnIds_DEFAULT } - return p.Status + return p.SubTxnIds } -func (p *TLoadTxnCommitResult_) SetStatus(val *status.TStatus) { +func (p *TBeginTxnResult_) SetStatus(val *status.TStatus) { p.Status = val } +func (p *TBeginTxnResult_) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TBeginTxnResult_) SetJobStatus(val *string) { + p.JobStatus = val +} +func (p *TBeginTxnResult_) SetDbId(val *int64) { + p.DbId = val +} +func (p *TBeginTxnResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} +func (p *TBeginTxnResult_) SetSubTxnIds(val []int64) { + p.SubTxnIds = val +} -var fieldIDToName_TLoadTxnCommitResult_ = map[int16]string{ +var fieldIDToName_TBeginTxnResult_ = map[int16]string{ 1: "status", + 2: "txn_id", + 3: "job_status", + 4: "db_id", + 5: "master_address", + 6: "sub_txn_ids", } -func (p *TLoadTxnCommitResult_) IsSetStatus() bool { +func (p *TBeginTxnResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TLoadTxnCommitResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TBeginTxnResult_) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TBeginTxnResult_) IsSetJobStatus() bool { + return p.JobStatus != nil +} + +func (p *TBeginTxnResult_) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TBeginTxnResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TBeginTxnResult_) IsSetSubTxnIds() bool { + return p.SubTxnIds != nil +} + +func (p *TBeginTxnResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -30833,18 +28323,54 @@ func (p *TLoadTxnCommitResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -30853,17 +28379,13 @@ func (p *TLoadTxnCommitResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -30871,56 +28393,140 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitResult_[fieldId])) } -func (p *TLoadTxnCommitResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TBeginTxnResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } +func (p *TBeginTxnResult_) ReadField2(iprot thrift.TProtocol) error { -func (p *TLoadTxnCommitResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxnCommitResult"); err != nil { - goto WriteStructBeginError + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } + p.TxnId = _field + return nil +} +func (p *TBeginTxnResult_) ReadField3(iprot thrift.TProtocol) error { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } + p.JobStatus = _field return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +} +func (p *TBeginTxnResult_) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TBeginTxnResult_) ReadField5(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} +func (p *TBeginTxnResult_) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SubTxnIds = _field + return nil +} + +func (p *TBeginTxnResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TBeginTxnResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxnCommitResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TBeginTxnResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -30929,14 +28535,118 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxnCommitResult_) String() string { +func (p *TBeginTxnResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TBeginTxnResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetJobStatus() { + if err = oprot.WriteFieldBegin("job_status", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.JobStatus); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TBeginTxnResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TBeginTxnResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TBeginTxnResult_) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnIds() { + if err = oprot.WriteFieldBegin("sub_txn_ids", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.SubTxnIds)); err != nil { + return err + } + for _, v := range p.SubTxnIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TBeginTxnResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TLoadTxnCommitResult_(%+v)", *p) + return fmt.Sprintf("TBeginTxnResult_(%+v)", *p) + } -func (p *TLoadTxnCommitResult_) DeepEqual(ano *TLoadTxnCommitResult_) bool { +func (p *TBeginTxnResult_) DeepEqual(ano *TBeginTxnResult_) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -30945,589 +28655,34110 @@ func (p *TLoadTxnCommitResult_) DeepEqual(ano *TLoadTxnCommitResult_) bool { if !p.Field1DeepEqual(ano.Status) { return false } + if !p.Field2DeepEqual(ano.TxnId) { + return false + } + if !p.Field3DeepEqual(ano.JobStatus) { + return false + } + if !p.Field4DeepEqual(ano.DbId) { + return false + } + if !p.Field5DeepEqual(ano.MasterAddress) { + return false + } + if !p.Field6DeepEqual(ano.SubTxnIds) { + return false + } return true } -func (p *TLoadTxnCommitResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TBeginTxnResult_) Field1DeepEqual(src *status.TStatus) bool { if !p.Status.DeepEqual(src) { return false } return true } +func (p *TBeginTxnResult_) Field2DeepEqual(src *int64) bool { -type TCommitTxnRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - UserIp *string `thrift:"user_ip,5,optional" frugal:"5,optional,string" json:"user_ip,omitempty"` - TxnId *int64 `thrift:"txn_id,6,optional" frugal:"6,optional,i64" json:"txn_id,omitempty"` - CommitInfos []*types.TTabletCommitInfo `thrift:"commit_infos,7,optional" frugal:"7,optional,list" json:"commit_infos,omitempty"` - AuthCode *int64 `thrift:"auth_code,8,optional" frugal:"8,optional,i64" json:"auth_code,omitempty"` - TxnCommitAttachment *TTxnCommitAttachment `thrift:"txn_commit_attachment,9,optional" frugal:"9,optional,TTxnCommitAttachment" json:"txn_commit_attachment,omitempty"` - ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,10,optional" frugal:"10,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` - Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` - DbId *int64 `thrift:"db_id,12,optional" frugal:"12,optional,i64" json:"db_id,omitempty"` + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true } +func (p *TBeginTxnResult_) Field3DeepEqual(src *string) bool { -func NewTCommitTxnRequest() *TCommitTxnRequest { - return &TCommitTxnRequest{} + if p.JobStatus == src { + return true + } else if p.JobStatus == nil || src == nil { + return false + } + if strings.Compare(*p.JobStatus, *src) != 0 { + return false + } + return true } +func (p *TBeginTxnResult_) Field4DeepEqual(src *int64) bool { -func (p *TCommitTxnRequest) InitDefault() { - *p = TCommitTxnRequest{} + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true } +func (p *TBeginTxnResult_) Field5DeepEqual(src *types.TNetworkAddress) bool { -var TCommitTxnRequest_Cluster_DEFAULT string + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} +func (p *TBeginTxnResult_) Field6DeepEqual(src []int64) bool { -func (p *TCommitTxnRequest) GetCluster() (v string) { + if len(p.SubTxnIds) != len(src) { + return false + } + for i, v := range p.SubTxnIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TStreamLoadPutRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` + Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` + Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` + Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` + UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` + LoadId *types.TUniqueId `thrift:"loadId,7,required" frugal:"7,required,types.TUniqueId" json:"loadId"` + TxnId int64 `thrift:"txnId,8,required" frugal:"8,required,i64" json:"txnId"` + FileType types.TFileType `thrift:"fileType,9,required" frugal:"9,required,TFileType" json:"fileType"` + FormatType plannodes.TFileFormatType `thrift:"formatType,10,required" frugal:"10,required,TFileFormatType" json:"formatType"` + Path *string `thrift:"path,11,optional" frugal:"11,optional,string" json:"path,omitempty"` + Columns *string `thrift:"columns,12,optional" frugal:"12,optional,string" json:"columns,omitempty"` + Where *string `thrift:"where,13,optional" frugal:"13,optional,string" json:"where,omitempty"` + ColumnSeparator *string `thrift:"columnSeparator,14,optional" frugal:"14,optional,string" json:"columnSeparator,omitempty"` + Partitions *string `thrift:"partitions,15,optional" frugal:"15,optional,string" json:"partitions,omitempty"` + AuthCode *int64 `thrift:"auth_code,16,optional" frugal:"16,optional,i64" json:"auth_code,omitempty"` + Negative *bool `thrift:"negative,17,optional" frugal:"17,optional,bool" json:"negative,omitempty"` + Timeout *int32 `thrift:"timeout,18,optional" frugal:"18,optional,i32" json:"timeout,omitempty"` + StrictMode *bool `thrift:"strictMode,19,optional" frugal:"19,optional,bool" json:"strictMode,omitempty"` + Timezone *string `thrift:"timezone,20,optional" frugal:"20,optional,string" json:"timezone,omitempty"` + ExecMemLimit *int64 `thrift:"execMemLimit,21,optional" frugal:"21,optional,i64" json:"execMemLimit,omitempty"` + IsTempPartition *bool `thrift:"isTempPartition,22,optional" frugal:"22,optional,bool" json:"isTempPartition,omitempty"` + StripOuterArray *bool `thrift:"strip_outer_array,23,optional" frugal:"23,optional,bool" json:"strip_outer_array,omitempty"` + Jsonpaths *string `thrift:"jsonpaths,24,optional" frugal:"24,optional,string" json:"jsonpaths,omitempty"` + ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,25,optional" frugal:"25,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` + JsonRoot *string `thrift:"json_root,26,optional" frugal:"26,optional,string" json:"json_root,omitempty"` + MergeType *types.TMergeType `thrift:"merge_type,27,optional" frugal:"27,optional,TMergeType" json:"merge_type,omitempty"` + DeleteCondition *string `thrift:"delete_condition,28,optional" frugal:"28,optional,string" json:"delete_condition,omitempty"` + SequenceCol *string `thrift:"sequence_col,29,optional" frugal:"29,optional,string" json:"sequence_col,omitempty"` + NumAsString *bool `thrift:"num_as_string,30,optional" frugal:"30,optional,bool" json:"num_as_string,omitempty"` + FuzzyParse *bool `thrift:"fuzzy_parse,31,optional" frugal:"31,optional,bool" json:"fuzzy_parse,omitempty"` + LineDelimiter *string `thrift:"line_delimiter,32,optional" frugal:"32,optional,string" json:"line_delimiter,omitempty"` + ReadJsonByLine *bool `thrift:"read_json_by_line,33,optional" frugal:"33,optional,bool" json:"read_json_by_line,omitempty"` + Token *string `thrift:"token,34,optional" frugal:"34,optional,string" json:"token,omitempty"` + SendBatchParallelism *int32 `thrift:"send_batch_parallelism,35,optional" frugal:"35,optional,i32" json:"send_batch_parallelism,omitempty"` + MaxFilterRatio *float64 `thrift:"max_filter_ratio,36,optional" frugal:"36,optional,double" json:"max_filter_ratio,omitempty"` + LoadToSingleTablet *bool `thrift:"load_to_single_tablet,37,optional" frugal:"37,optional,bool" json:"load_to_single_tablet,omitempty"` + HeaderType *string `thrift:"header_type,38,optional" frugal:"38,optional,string" json:"header_type,omitempty"` + HiddenColumns *string `thrift:"hidden_columns,39,optional" frugal:"39,optional,string" json:"hidden_columns,omitempty"` + CompressType *plannodes.TFileCompressType `thrift:"compress_type,40,optional" frugal:"40,optional,TFileCompressType" json:"compress_type,omitempty"` + FileSize *int64 `thrift:"file_size,41,optional" frugal:"41,optional,i64" json:"file_size,omitempty"` + TrimDoubleQuotes *bool `thrift:"trim_double_quotes,42,optional" frugal:"42,optional,bool" json:"trim_double_quotes,omitempty"` + SkipLines *int32 `thrift:"skip_lines,43,optional" frugal:"43,optional,i32" json:"skip_lines,omitempty"` + EnableProfile *bool `thrift:"enable_profile,44,optional" frugal:"44,optional,bool" json:"enable_profile,omitempty"` + PartialUpdate *bool `thrift:"partial_update,45,optional" frugal:"45,optional,bool" json:"partial_update,omitempty"` + TableNames []string `thrift:"table_names,46,optional" frugal:"46,optional,list" json:"table_names,omitempty"` + LoadSql *string `thrift:"load_sql,47,optional" frugal:"47,optional,string" json:"load_sql,omitempty"` + BackendId *int64 `thrift:"backend_id,48,optional" frugal:"48,optional,i64" json:"backend_id,omitempty"` + Version *int32 `thrift:"version,49,optional" frugal:"49,optional,i32" json:"version,omitempty"` + Label *string `thrift:"label,50,optional" frugal:"50,optional,string" json:"label,omitempty"` + Enclose *int8 `thrift:"enclose,51,optional" frugal:"51,optional,i8" json:"enclose,omitempty"` + Escape *int8 `thrift:"escape,52,optional" frugal:"52,optional,i8" json:"escape,omitempty"` + MemtableOnSinkNode *bool `thrift:"memtable_on_sink_node,53,optional" frugal:"53,optional,bool" json:"memtable_on_sink_node,omitempty"` + GroupCommit *bool `thrift:"group_commit,54,optional" frugal:"54,optional,bool" json:"group_commit,omitempty"` + StreamPerNode *int32 `thrift:"stream_per_node,55,optional" frugal:"55,optional,i32" json:"stream_per_node,omitempty"` + GroupCommitMode *string `thrift:"group_commit_mode,56,optional" frugal:"56,optional,string" json:"group_commit_mode,omitempty"` + UniqueKeyUpdateMode *types.TUniqueKeyUpdateMode `thrift:"unique_key_update_mode,57,optional" frugal:"57,optional,TUniqueKeyUpdateMode" json:"unique_key_update_mode,omitempty"` + CloudCluster *string `thrift:"cloud_cluster,1000,optional" frugal:"1000,optional,string" json:"cloud_cluster,omitempty"` + TableId *int64 `thrift:"table_id,1001,optional" frugal:"1001,optional,i64" json:"table_id,omitempty"` +} + +func NewTStreamLoadPutRequest() *TStreamLoadPutRequest { + return &TStreamLoadPutRequest{} +} + +func (p *TStreamLoadPutRequest) InitDefault() { +} + +var TStreamLoadPutRequest_Cluster_DEFAULT string + +func (p *TStreamLoadPutRequest) GetCluster() (v string) { if !p.IsSetCluster() { - return TCommitTxnRequest_Cluster_DEFAULT + return TStreamLoadPutRequest_Cluster_DEFAULT } return *p.Cluster } -var TCommitTxnRequest_User_DEFAULT string +func (p *TStreamLoadPutRequest) GetUser() (v string) { + return p.User +} -func (p *TCommitTxnRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TCommitTxnRequest_User_DEFAULT +func (p *TStreamLoadPutRequest) GetPasswd() (v string) { + return p.Passwd +} + +func (p *TStreamLoadPutRequest) GetDb() (v string) { + return p.Db +} + +func (p *TStreamLoadPutRequest) GetTbl() (v string) { + return p.Tbl +} + +var TStreamLoadPutRequest_UserIp_DEFAULT string + +func (p *TStreamLoadPutRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TStreamLoadPutRequest_UserIp_DEFAULT } - return *p.User + return *p.UserIp } -var TCommitTxnRequest_Passwd_DEFAULT string +var TStreamLoadPutRequest_LoadId_DEFAULT *types.TUniqueId -func (p *TCommitTxnRequest) GetPasswd() (v string) { - if !p.IsSetPasswd() { - return TCommitTxnRequest_Passwd_DEFAULT +func (p *TStreamLoadPutRequest) GetLoadId() (v *types.TUniqueId) { + if !p.IsSetLoadId() { + return TStreamLoadPutRequest_LoadId_DEFAULT } - return *p.Passwd + return p.LoadId } -var TCommitTxnRequest_Db_DEFAULT string +func (p *TStreamLoadPutRequest) GetTxnId() (v int64) { + return p.TxnId +} -func (p *TCommitTxnRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TCommitTxnRequest_Db_DEFAULT +func (p *TStreamLoadPutRequest) GetFileType() (v types.TFileType) { + return p.FileType +} + +func (p *TStreamLoadPutRequest) GetFormatType() (v plannodes.TFileFormatType) { + return p.FormatType +} + +var TStreamLoadPutRequest_Path_DEFAULT string + +func (p *TStreamLoadPutRequest) GetPath() (v string) { + if !p.IsSetPath() { + return TStreamLoadPutRequest_Path_DEFAULT } - return *p.Db + return *p.Path } -var TCommitTxnRequest_UserIp_DEFAULT string +var TStreamLoadPutRequest_Columns_DEFAULT string -func (p *TCommitTxnRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TCommitTxnRequest_UserIp_DEFAULT +func (p *TStreamLoadPutRequest) GetColumns() (v string) { + if !p.IsSetColumns() { + return TStreamLoadPutRequest_Columns_DEFAULT } - return *p.UserIp + return *p.Columns } -var TCommitTxnRequest_TxnId_DEFAULT int64 +var TStreamLoadPutRequest_Where_DEFAULT string -func (p *TCommitTxnRequest) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TCommitTxnRequest_TxnId_DEFAULT +func (p *TStreamLoadPutRequest) GetWhere() (v string) { + if !p.IsSetWhere() { + return TStreamLoadPutRequest_Where_DEFAULT } - return *p.TxnId + return *p.Where } -var TCommitTxnRequest_CommitInfos_DEFAULT []*types.TTabletCommitInfo +var TStreamLoadPutRequest_ColumnSeparator_DEFAULT string -func (p *TCommitTxnRequest) GetCommitInfos() (v []*types.TTabletCommitInfo) { - if !p.IsSetCommitInfos() { - return TCommitTxnRequest_CommitInfos_DEFAULT +func (p *TStreamLoadPutRequest) GetColumnSeparator() (v string) { + if !p.IsSetColumnSeparator() { + return TStreamLoadPutRequest_ColumnSeparator_DEFAULT } - return p.CommitInfos + return *p.ColumnSeparator } -var TCommitTxnRequest_AuthCode_DEFAULT int64 +var TStreamLoadPutRequest_Partitions_DEFAULT string -func (p *TCommitTxnRequest) GetAuthCode() (v int64) { +func (p *TStreamLoadPutRequest) GetPartitions() (v string) { + if !p.IsSetPartitions() { + return TStreamLoadPutRequest_Partitions_DEFAULT + } + return *p.Partitions +} + +var TStreamLoadPutRequest_AuthCode_DEFAULT int64 + +func (p *TStreamLoadPutRequest) GetAuthCode() (v int64) { if !p.IsSetAuthCode() { - return TCommitTxnRequest_AuthCode_DEFAULT + return TStreamLoadPutRequest_AuthCode_DEFAULT } return *p.AuthCode } -var TCommitTxnRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment +var TStreamLoadPutRequest_Negative_DEFAULT bool -func (p *TCommitTxnRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { - if !p.IsSetTxnCommitAttachment() { - return TCommitTxnRequest_TxnCommitAttachment_DEFAULT +func (p *TStreamLoadPutRequest) GetNegative() (v bool) { + if !p.IsSetNegative() { + return TStreamLoadPutRequest_Negative_DEFAULT } - return p.TxnCommitAttachment + return *p.Negative } -var TCommitTxnRequest_ThriftRpcTimeoutMs_DEFAULT int64 +var TStreamLoadPutRequest_Timeout_DEFAULT int32 -func (p *TCommitTxnRequest) GetThriftRpcTimeoutMs() (v int64) { - if !p.IsSetThriftRpcTimeoutMs() { - return TCommitTxnRequest_ThriftRpcTimeoutMs_DEFAULT +func (p *TStreamLoadPutRequest) GetTimeout() (v int32) { + if !p.IsSetTimeout() { + return TStreamLoadPutRequest_Timeout_DEFAULT } - return *p.ThriftRpcTimeoutMs + return *p.Timeout } -var TCommitTxnRequest_Token_DEFAULT string +var TStreamLoadPutRequest_StrictMode_DEFAULT bool -func (p *TCommitTxnRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TCommitTxnRequest_Token_DEFAULT +func (p *TStreamLoadPutRequest) GetStrictMode() (v bool) { + if !p.IsSetStrictMode() { + return TStreamLoadPutRequest_StrictMode_DEFAULT } - return *p.Token + return *p.StrictMode } -var TCommitTxnRequest_DbId_DEFAULT int64 +var TStreamLoadPutRequest_Timezone_DEFAULT string -func (p *TCommitTxnRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TCommitTxnRequest_DbId_DEFAULT +func (p *TStreamLoadPutRequest) GetTimezone() (v string) { + if !p.IsSetTimezone() { + return TStreamLoadPutRequest_Timezone_DEFAULT } - return *p.DbId -} -func (p *TCommitTxnRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TCommitTxnRequest) SetUser(val *string) { - p.User = val + return *p.Timezone } -func (p *TCommitTxnRequest) SetPasswd(val *string) { - p.Passwd = val + +var TStreamLoadPutRequest_ExecMemLimit_DEFAULT int64 + +func (p *TStreamLoadPutRequest) GetExecMemLimit() (v int64) { + if !p.IsSetExecMemLimit() { + return TStreamLoadPutRequest_ExecMemLimit_DEFAULT + } + return *p.ExecMemLimit } -func (p *TCommitTxnRequest) SetDb(val *string) { - p.Db = val + +var TStreamLoadPutRequest_IsTempPartition_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetIsTempPartition() (v bool) { + if !p.IsSetIsTempPartition() { + return TStreamLoadPutRequest_IsTempPartition_DEFAULT + } + return *p.IsTempPartition } -func (p *TCommitTxnRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TCommitTxnRequest) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TCommitTxnRequest) SetCommitInfos(val []*types.TTabletCommitInfo) { - p.CommitInfos = val -} -func (p *TCommitTxnRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TCommitTxnRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { - p.TxnCommitAttachment = val -} -func (p *TCommitTxnRequest) SetThriftRpcTimeoutMs(val *int64) { - p.ThriftRpcTimeoutMs = val + +var TStreamLoadPutRequest_StripOuterArray_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetStripOuterArray() (v bool) { + if !p.IsSetStripOuterArray() { + return TStreamLoadPutRequest_StripOuterArray_DEFAULT + } + return *p.StripOuterArray } -func (p *TCommitTxnRequest) SetToken(val *string) { - p.Token = val + +var TStreamLoadPutRequest_Jsonpaths_DEFAULT string + +func (p *TStreamLoadPutRequest) GetJsonpaths() (v string) { + if !p.IsSetJsonpaths() { + return TStreamLoadPutRequest_Jsonpaths_DEFAULT + } + return *p.Jsonpaths } -func (p *TCommitTxnRequest) SetDbId(val *int64) { - p.DbId = val + +var TStreamLoadPutRequest_ThriftRpcTimeoutMs_DEFAULT int64 + +func (p *TStreamLoadPutRequest) GetThriftRpcTimeoutMs() (v int64) { + if !p.IsSetThriftRpcTimeoutMs() { + return TStreamLoadPutRequest_ThriftRpcTimeoutMs_DEFAULT + } + return *p.ThriftRpcTimeoutMs } -var fieldIDToName_TCommitTxnRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "user_ip", - 6: "txn_id", - 7: "commit_infos", - 8: "auth_code", - 9: "txn_commit_attachment", - 10: "thrift_rpc_timeout_ms", - 11: "token", - 12: "db_id", +var TStreamLoadPutRequest_JsonRoot_DEFAULT string + +func (p *TStreamLoadPutRequest) GetJsonRoot() (v string) { + if !p.IsSetJsonRoot() { + return TStreamLoadPutRequest_JsonRoot_DEFAULT + } + return *p.JsonRoot } -func (p *TCommitTxnRequest) IsSetCluster() bool { - return p.Cluster != nil +var TStreamLoadPutRequest_MergeType_DEFAULT types.TMergeType + +func (p *TStreamLoadPutRequest) GetMergeType() (v types.TMergeType) { + if !p.IsSetMergeType() { + return TStreamLoadPutRequest_MergeType_DEFAULT + } + return *p.MergeType } -func (p *TCommitTxnRequest) IsSetUser() bool { - return p.User != nil +var TStreamLoadPutRequest_DeleteCondition_DEFAULT string + +func (p *TStreamLoadPutRequest) GetDeleteCondition() (v string) { + if !p.IsSetDeleteCondition() { + return TStreamLoadPutRequest_DeleteCondition_DEFAULT + } + return *p.DeleteCondition } -func (p *TCommitTxnRequest) IsSetPasswd() bool { - return p.Passwd != nil +var TStreamLoadPutRequest_SequenceCol_DEFAULT string + +func (p *TStreamLoadPutRequest) GetSequenceCol() (v string) { + if !p.IsSetSequenceCol() { + return TStreamLoadPutRequest_SequenceCol_DEFAULT + } + return *p.SequenceCol } -func (p *TCommitTxnRequest) IsSetDb() bool { - return p.Db != nil +var TStreamLoadPutRequest_NumAsString_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetNumAsString() (v bool) { + if !p.IsSetNumAsString() { + return TStreamLoadPutRequest_NumAsString_DEFAULT + } + return *p.NumAsString } -func (p *TCommitTxnRequest) IsSetUserIp() bool { - return p.UserIp != nil +var TStreamLoadPutRequest_FuzzyParse_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetFuzzyParse() (v bool) { + if !p.IsSetFuzzyParse() { + return TStreamLoadPutRequest_FuzzyParse_DEFAULT + } + return *p.FuzzyParse } -func (p *TCommitTxnRequest) IsSetTxnId() bool { - return p.TxnId != nil +var TStreamLoadPutRequest_LineDelimiter_DEFAULT string + +func (p *TStreamLoadPutRequest) GetLineDelimiter() (v string) { + if !p.IsSetLineDelimiter() { + return TStreamLoadPutRequest_LineDelimiter_DEFAULT + } + return *p.LineDelimiter } -func (p *TCommitTxnRequest) IsSetCommitInfos() bool { - return p.CommitInfos != nil +var TStreamLoadPutRequest_ReadJsonByLine_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetReadJsonByLine() (v bool) { + if !p.IsSetReadJsonByLine() { + return TStreamLoadPutRequest_ReadJsonByLine_DEFAULT + } + return *p.ReadJsonByLine } -func (p *TCommitTxnRequest) IsSetAuthCode() bool { - return p.AuthCode != nil +var TStreamLoadPutRequest_Token_DEFAULT string + +func (p *TStreamLoadPutRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TStreamLoadPutRequest_Token_DEFAULT + } + return *p.Token } -func (p *TCommitTxnRequest) IsSetTxnCommitAttachment() bool { - return p.TxnCommitAttachment != nil +var TStreamLoadPutRequest_SendBatchParallelism_DEFAULT int32 + +func (p *TStreamLoadPutRequest) GetSendBatchParallelism() (v int32) { + if !p.IsSetSendBatchParallelism() { + return TStreamLoadPutRequest_SendBatchParallelism_DEFAULT + } + return *p.SendBatchParallelism } -func (p *TCommitTxnRequest) IsSetThriftRpcTimeoutMs() bool { - return p.ThriftRpcTimeoutMs != nil +var TStreamLoadPutRequest_MaxFilterRatio_DEFAULT float64 + +func (p *TStreamLoadPutRequest) GetMaxFilterRatio() (v float64) { + if !p.IsSetMaxFilterRatio() { + return TStreamLoadPutRequest_MaxFilterRatio_DEFAULT + } + return *p.MaxFilterRatio } -func (p *TCommitTxnRequest) IsSetToken() bool { - return p.Token != nil +var TStreamLoadPutRequest_LoadToSingleTablet_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetLoadToSingleTablet() (v bool) { + if !p.IsSetLoadToSingleTablet() { + return TStreamLoadPutRequest_LoadToSingleTablet_DEFAULT + } + return *p.LoadToSingleTablet } -func (p *TCommitTxnRequest) IsSetDbId() bool { - return p.DbId != nil +var TStreamLoadPutRequest_HeaderType_DEFAULT string + +func (p *TStreamLoadPutRequest) GetHeaderType() (v string) { + if !p.IsSetHeaderType() { + return TStreamLoadPutRequest_HeaderType_DEFAULT + } + return *p.HeaderType } -func (p *TCommitTxnRequest) Read(iprot thrift.TProtocol) (err error) { +var TStreamLoadPutRequest_HiddenColumns_DEFAULT string - var fieldTypeId thrift.TType - var fieldId int16 +func (p *TStreamLoadPutRequest) GetHiddenColumns() (v string) { + if !p.IsSetHiddenColumns() { + return TStreamLoadPutRequest_HiddenColumns_DEFAULT + } + return *p.HiddenColumns +} - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +var TStreamLoadPutRequest_CompressType_DEFAULT plannodes.TFileCompressType + +func (p *TStreamLoadPutRequest) GetCompressType() (v plannodes.TFileCompressType) { + if !p.IsSetCompressType() { + return TStreamLoadPutRequest_CompressType_DEFAULT } + return *p.CompressType +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } +var TStreamLoadPutRequest_FileSize_DEFAULT int64 - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.LIST { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.I64 { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +func (p *TStreamLoadPutRequest) GetFileSize() (v int64) { + if !p.IsSetFileSize() { + return TStreamLoadPutRequest_FileSize_DEFAULT + } + return *p.FileSize +} - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } +var TStreamLoadPutRequest_TrimDoubleQuotes_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetTrimDoubleQuotes() (v bool) { + if !p.IsSetTrimDoubleQuotes() { + return TStreamLoadPutRequest_TrimDoubleQuotes_DEFAULT } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + return *p.TrimDoubleQuotes +} + +var TStreamLoadPutRequest_SkipLines_DEFAULT int32 + +func (p *TStreamLoadPutRequest) GetSkipLines() (v int32) { + if !p.IsSetSkipLines() { + return TStreamLoadPutRequest_SkipLines_DEFAULT } + return *p.SkipLines +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnRequest[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +var TStreamLoadPutRequest_EnableProfile_DEFAULT bool -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +func (p *TStreamLoadPutRequest) GetEnableProfile() (v bool) { + if !p.IsSetEnableProfile() { + return TStreamLoadPutRequest_EnableProfile_DEFAULT + } + return *p.EnableProfile } -func (p *TCommitTxnRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v +var TStreamLoadPutRequest_PartialUpdate_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetPartialUpdate() (v bool) { + if !p.IsSetPartialUpdate() { + return TStreamLoadPutRequest_PartialUpdate_DEFAULT } - return nil + return *p.PartialUpdate } -func (p *TCommitTxnRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = &v +var TStreamLoadPutRequest_TableNames_DEFAULT []string + +func (p *TStreamLoadPutRequest) GetTableNames() (v []string) { + if !p.IsSetTableNames() { + return TStreamLoadPutRequest_TableNames_DEFAULT } - return nil + return p.TableNames } -func (p *TCommitTxnRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = &v +var TStreamLoadPutRequest_LoadSql_DEFAULT string + +func (p *TStreamLoadPutRequest) GetLoadSql() (v string) { + if !p.IsSetLoadSql() { + return TStreamLoadPutRequest_LoadSql_DEFAULT } - return nil + return *p.LoadSql } -func (p *TCommitTxnRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v +var TStreamLoadPutRequest_BackendId_DEFAULT int64 + +func (p *TStreamLoadPutRequest) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TStreamLoadPutRequest_BackendId_DEFAULT } - return nil + return *p.BackendId } -func (p *TCommitTxnRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v +var TStreamLoadPutRequest_Version_DEFAULT int32 + +func (p *TStreamLoadPutRequest) GetVersion() (v int32) { + if !p.IsSetVersion() { + return TStreamLoadPutRequest_Version_DEFAULT } - return nil + return *p.Version } -func (p *TCommitTxnRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = &v +var TStreamLoadPutRequest_Label_DEFAULT string + +func (p *TStreamLoadPutRequest) GetLabel() (v string) { + if !p.IsSetLabel() { + return TStreamLoadPutRequest_Label_DEFAULT } - return nil + return *p.Label } -func (p *TCommitTxnRequest) ReadField7(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +var TStreamLoadPutRequest_Enclose_DEFAULT int8 + +func (p *TStreamLoadPutRequest) GetEnclose() (v int8) { + if !p.IsSetEnclose() { + return TStreamLoadPutRequest_Enclose_DEFAULT } - p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTTabletCommitInfo() - if err := _elem.Read(iprot); err != nil { - return err - } + return *p.Enclose +} + +var TStreamLoadPutRequest_Escape_DEFAULT int8 - p.CommitInfos = append(p.CommitInfos, _elem) +func (p *TStreamLoadPutRequest) GetEscape() (v int8) { + if !p.IsSetEscape() { + return TStreamLoadPutRequest_Escape_DEFAULT } - if err := iprot.ReadListEnd(); err != nil { - return err + return *p.Escape +} + +var TStreamLoadPutRequest_MemtableOnSinkNode_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetMemtableOnSinkNode() (v bool) { + if !p.IsSetMemtableOnSinkNode() { + return TStreamLoadPutRequest_MemtableOnSinkNode_DEFAULT } - return nil + return *p.MemtableOnSinkNode } -func (p *TCommitTxnRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.AuthCode = &v +var TStreamLoadPutRequest_GroupCommit_DEFAULT bool + +func (p *TStreamLoadPutRequest) GetGroupCommit() (v bool) { + if !p.IsSetGroupCommit() { + return TStreamLoadPutRequest_GroupCommit_DEFAULT } - return nil + return *p.GroupCommit } -func (p *TCommitTxnRequest) ReadField9(iprot thrift.TProtocol) error { - p.TxnCommitAttachment = NewTTxnCommitAttachment() - if err := p.TxnCommitAttachment.Read(iprot); err != nil { - return err +var TStreamLoadPutRequest_StreamPerNode_DEFAULT int32 + +func (p *TStreamLoadPutRequest) GetStreamPerNode() (v int32) { + if !p.IsSetStreamPerNode() { + return TStreamLoadPutRequest_StreamPerNode_DEFAULT } - return nil + return *p.StreamPerNode } -func (p *TCommitTxnRequest) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ThriftRpcTimeoutMs = &v +var TStreamLoadPutRequest_GroupCommitMode_DEFAULT string + +func (p *TStreamLoadPutRequest) GetGroupCommitMode() (v string) { + if !p.IsSetGroupCommitMode() { + return TStreamLoadPutRequest_GroupCommitMode_DEFAULT } - return nil + return *p.GroupCommitMode } -func (p *TCommitTxnRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v +var TStreamLoadPutRequest_UniqueKeyUpdateMode_DEFAULT types.TUniqueKeyUpdateMode + +func (p *TStreamLoadPutRequest) GetUniqueKeyUpdateMode() (v types.TUniqueKeyUpdateMode) { + if !p.IsSetUniqueKeyUpdateMode() { + return TStreamLoadPutRequest_UniqueKeyUpdateMode_DEFAULT } - return nil + return *p.UniqueKeyUpdateMode } -func (p *TCommitTxnRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DbId = &v +var TStreamLoadPutRequest_CloudCluster_DEFAULT string + +func (p *TStreamLoadPutRequest) GetCloudCluster() (v string) { + if !p.IsSetCloudCluster() { + return TStreamLoadPutRequest_CloudCluster_DEFAULT } - return nil + return *p.CloudCluster } -func (p *TCommitTxnRequest) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TCommitTxnRequest"); err != nil { - goto WriteStructBeginError +var TStreamLoadPutRequest_TableId_DEFAULT int64 + +func (p *TStreamLoadPutRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TStreamLoadPutRequest_TableId_DEFAULT } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { + return *p.TableId +} +func (p *TStreamLoadPutRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TStreamLoadPutRequest) SetUser(val string) { + p.User = val +} +func (p *TStreamLoadPutRequest) SetPasswd(val string) { + p.Passwd = val +} +func (p *TStreamLoadPutRequest) SetDb(val string) { + p.Db = val +} +func (p *TStreamLoadPutRequest) SetTbl(val string) { + p.Tbl = val +} +func (p *TStreamLoadPutRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TStreamLoadPutRequest) SetLoadId(val *types.TUniqueId) { + p.LoadId = val +} +func (p *TStreamLoadPutRequest) SetTxnId(val int64) { + p.TxnId = val +} +func (p *TStreamLoadPutRequest) SetFileType(val types.TFileType) { + p.FileType = val +} +func (p *TStreamLoadPutRequest) SetFormatType(val plannodes.TFileFormatType) { + p.FormatType = val +} +func (p *TStreamLoadPutRequest) SetPath(val *string) { + p.Path = val +} +func (p *TStreamLoadPutRequest) SetColumns(val *string) { + p.Columns = val +} +func (p *TStreamLoadPutRequest) SetWhere(val *string) { + p.Where = val +} +func (p *TStreamLoadPutRequest) SetColumnSeparator(val *string) { + p.ColumnSeparator = val +} +func (p *TStreamLoadPutRequest) SetPartitions(val *string) { + p.Partitions = val +} +func (p *TStreamLoadPutRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TStreamLoadPutRequest) SetNegative(val *bool) { + p.Negative = val +} +func (p *TStreamLoadPutRequest) SetTimeout(val *int32) { + p.Timeout = val +} +func (p *TStreamLoadPutRequest) SetStrictMode(val *bool) { + p.StrictMode = val +} +func (p *TStreamLoadPutRequest) SetTimezone(val *string) { + p.Timezone = val +} +func (p *TStreamLoadPutRequest) SetExecMemLimit(val *int64) { + p.ExecMemLimit = val +} +func (p *TStreamLoadPutRequest) SetIsTempPartition(val *bool) { + p.IsTempPartition = val +} +func (p *TStreamLoadPutRequest) SetStripOuterArray(val *bool) { + p.StripOuterArray = val +} +func (p *TStreamLoadPutRequest) SetJsonpaths(val *string) { + p.Jsonpaths = val +} +func (p *TStreamLoadPutRequest) SetThriftRpcTimeoutMs(val *int64) { + p.ThriftRpcTimeoutMs = val +} +func (p *TStreamLoadPutRequest) SetJsonRoot(val *string) { + p.JsonRoot = val +} +func (p *TStreamLoadPutRequest) SetMergeType(val *types.TMergeType) { + p.MergeType = val +} +func (p *TStreamLoadPutRequest) SetDeleteCondition(val *string) { + p.DeleteCondition = val +} +func (p *TStreamLoadPutRequest) SetSequenceCol(val *string) { + p.SequenceCol = val +} +func (p *TStreamLoadPutRequest) SetNumAsString(val *bool) { + p.NumAsString = val +} +func (p *TStreamLoadPutRequest) SetFuzzyParse(val *bool) { + p.FuzzyParse = val +} +func (p *TStreamLoadPutRequest) SetLineDelimiter(val *string) { + p.LineDelimiter = val +} +func (p *TStreamLoadPutRequest) SetReadJsonByLine(val *bool) { + p.ReadJsonByLine = val +} +func (p *TStreamLoadPutRequest) SetToken(val *string) { + p.Token = val +} +func (p *TStreamLoadPutRequest) SetSendBatchParallelism(val *int32) { + p.SendBatchParallelism = val +} +func (p *TStreamLoadPutRequest) SetMaxFilterRatio(val *float64) { + p.MaxFilterRatio = val +} +func (p *TStreamLoadPutRequest) SetLoadToSingleTablet(val *bool) { + p.LoadToSingleTablet = val +} +func (p *TStreamLoadPutRequest) SetHeaderType(val *string) { + p.HeaderType = val +} +func (p *TStreamLoadPutRequest) SetHiddenColumns(val *string) { + p.HiddenColumns = val +} +func (p *TStreamLoadPutRequest) SetCompressType(val *plannodes.TFileCompressType) { + p.CompressType = val +} +func (p *TStreamLoadPutRequest) SetFileSize(val *int64) { + p.FileSize = val +} +func (p *TStreamLoadPutRequest) SetTrimDoubleQuotes(val *bool) { + p.TrimDoubleQuotes = val +} +func (p *TStreamLoadPutRequest) SetSkipLines(val *int32) { + p.SkipLines = val +} +func (p *TStreamLoadPutRequest) SetEnableProfile(val *bool) { + p.EnableProfile = val +} +func (p *TStreamLoadPutRequest) SetPartialUpdate(val *bool) { + p.PartialUpdate = val +} +func (p *TStreamLoadPutRequest) SetTableNames(val []string) { + p.TableNames = val +} +func (p *TStreamLoadPutRequest) SetLoadSql(val *string) { + p.LoadSql = val +} +func (p *TStreamLoadPutRequest) SetBackendId(val *int64) { + p.BackendId = val +} +func (p *TStreamLoadPutRequest) SetVersion(val *int32) { + p.Version = val +} +func (p *TStreamLoadPutRequest) SetLabel(val *string) { + p.Label = val +} +func (p *TStreamLoadPutRequest) SetEnclose(val *int8) { + p.Enclose = val +} +func (p *TStreamLoadPutRequest) SetEscape(val *int8) { + p.Escape = val +} +func (p *TStreamLoadPutRequest) SetMemtableOnSinkNode(val *bool) { + p.MemtableOnSinkNode = val +} +func (p *TStreamLoadPutRequest) SetGroupCommit(val *bool) { + p.GroupCommit = val +} +func (p *TStreamLoadPutRequest) SetStreamPerNode(val *int32) { + p.StreamPerNode = val +} +func (p *TStreamLoadPutRequest) SetGroupCommitMode(val *string) { + p.GroupCommitMode = val +} +func (p *TStreamLoadPutRequest) SetUniqueKeyUpdateMode(val *types.TUniqueKeyUpdateMode) { + p.UniqueKeyUpdateMode = val +} +func (p *TStreamLoadPutRequest) SetCloudCluster(val *string) { + p.CloudCluster = val +} +func (p *TStreamLoadPutRequest) SetTableId(val *int64) { + p.TableId = val +} + +var fieldIDToName_TStreamLoadPutRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "tbl", + 6: "user_ip", + 7: "loadId", + 8: "txnId", + 9: "fileType", + 10: "formatType", + 11: "path", + 12: "columns", + 13: "where", + 14: "columnSeparator", + 15: "partitions", + 16: "auth_code", + 17: "negative", + 18: "timeout", + 19: "strictMode", + 20: "timezone", + 21: "execMemLimit", + 22: "isTempPartition", + 23: "strip_outer_array", + 24: "jsonpaths", + 25: "thrift_rpc_timeout_ms", + 26: "json_root", + 27: "merge_type", + 28: "delete_condition", + 29: "sequence_col", + 30: "num_as_string", + 31: "fuzzy_parse", + 32: "line_delimiter", + 33: "read_json_by_line", + 34: "token", + 35: "send_batch_parallelism", + 36: "max_filter_ratio", + 37: "load_to_single_tablet", + 38: "header_type", + 39: "hidden_columns", + 40: "compress_type", + 41: "file_size", + 42: "trim_double_quotes", + 43: "skip_lines", + 44: "enable_profile", + 45: "partial_update", + 46: "table_names", + 47: "load_sql", + 48: "backend_id", + 49: "version", + 50: "label", + 51: "enclose", + 52: "escape", + 53: "memtable_on_sink_node", + 54: "group_commit", + 55: "stream_per_node", + 56: "group_commit_mode", + 57: "unique_key_update_mode", + 1000: "cloud_cluster", + 1001: "table_id", +} + +func (p *TStreamLoadPutRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TStreamLoadPutRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TStreamLoadPutRequest) IsSetLoadId() bool { + return p.LoadId != nil +} + +func (p *TStreamLoadPutRequest) IsSetPath() bool { + return p.Path != nil +} + +func (p *TStreamLoadPutRequest) IsSetColumns() bool { + return p.Columns != nil +} + +func (p *TStreamLoadPutRequest) IsSetWhere() bool { + return p.Where != nil +} + +func (p *TStreamLoadPutRequest) IsSetColumnSeparator() bool { + return p.ColumnSeparator != nil +} + +func (p *TStreamLoadPutRequest) IsSetPartitions() bool { + return p.Partitions != nil +} + +func (p *TStreamLoadPutRequest) IsSetAuthCode() bool { + return p.AuthCode != nil +} + +func (p *TStreamLoadPutRequest) IsSetNegative() bool { + return p.Negative != nil +} + +func (p *TStreamLoadPutRequest) IsSetTimeout() bool { + return p.Timeout != nil +} + +func (p *TStreamLoadPutRequest) IsSetStrictMode() bool { + return p.StrictMode != nil +} + +func (p *TStreamLoadPutRequest) IsSetTimezone() bool { + return p.Timezone != nil +} + +func (p *TStreamLoadPutRequest) IsSetExecMemLimit() bool { + return p.ExecMemLimit != nil +} + +func (p *TStreamLoadPutRequest) IsSetIsTempPartition() bool { + return p.IsTempPartition != nil +} + +func (p *TStreamLoadPutRequest) IsSetStripOuterArray() bool { + return p.StripOuterArray != nil +} + +func (p *TStreamLoadPutRequest) IsSetJsonpaths() bool { + return p.Jsonpaths != nil +} + +func (p *TStreamLoadPutRequest) IsSetThriftRpcTimeoutMs() bool { + return p.ThriftRpcTimeoutMs != nil +} + +func (p *TStreamLoadPutRequest) IsSetJsonRoot() bool { + return p.JsonRoot != nil +} + +func (p *TStreamLoadPutRequest) IsSetMergeType() bool { + return p.MergeType != nil +} + +func (p *TStreamLoadPutRequest) IsSetDeleteCondition() bool { + return p.DeleteCondition != nil +} + +func (p *TStreamLoadPutRequest) IsSetSequenceCol() bool { + return p.SequenceCol != nil +} + +func (p *TStreamLoadPutRequest) IsSetNumAsString() bool { + return p.NumAsString != nil +} + +func (p *TStreamLoadPutRequest) IsSetFuzzyParse() bool { + return p.FuzzyParse != nil +} + +func (p *TStreamLoadPutRequest) IsSetLineDelimiter() bool { + return p.LineDelimiter != nil +} + +func (p *TStreamLoadPutRequest) IsSetReadJsonByLine() bool { + return p.ReadJsonByLine != nil +} + +func (p *TStreamLoadPutRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TStreamLoadPutRequest) IsSetSendBatchParallelism() bool { + return p.SendBatchParallelism != nil +} + +func (p *TStreamLoadPutRequest) IsSetMaxFilterRatio() bool { + return p.MaxFilterRatio != nil +} + +func (p *TStreamLoadPutRequest) IsSetLoadToSingleTablet() bool { + return p.LoadToSingleTablet != nil +} + +func (p *TStreamLoadPutRequest) IsSetHeaderType() bool { + return p.HeaderType != nil +} + +func (p *TStreamLoadPutRequest) IsSetHiddenColumns() bool { + return p.HiddenColumns != nil +} + +func (p *TStreamLoadPutRequest) IsSetCompressType() bool { + return p.CompressType != nil +} + +func (p *TStreamLoadPutRequest) IsSetFileSize() bool { + return p.FileSize != nil +} + +func (p *TStreamLoadPutRequest) IsSetTrimDoubleQuotes() bool { + return p.TrimDoubleQuotes != nil +} + +func (p *TStreamLoadPutRequest) IsSetSkipLines() bool { + return p.SkipLines != nil +} + +func (p *TStreamLoadPutRequest) IsSetEnableProfile() bool { + return p.EnableProfile != nil +} + +func (p *TStreamLoadPutRequest) IsSetPartialUpdate() bool { + return p.PartialUpdate != nil +} + +func (p *TStreamLoadPutRequest) IsSetTableNames() bool { + return p.TableNames != nil +} + +func (p *TStreamLoadPutRequest) IsSetLoadSql() bool { + return p.LoadSql != nil +} + +func (p *TStreamLoadPutRequest) IsSetBackendId() bool { + return p.BackendId != nil +} + +func (p *TStreamLoadPutRequest) IsSetVersion() bool { + return p.Version != nil +} + +func (p *TStreamLoadPutRequest) IsSetLabel() bool { + return p.Label != nil +} + +func (p *TStreamLoadPutRequest) IsSetEnclose() bool { + return p.Enclose != nil +} + +func (p *TStreamLoadPutRequest) IsSetEscape() bool { + return p.Escape != nil +} + +func (p *TStreamLoadPutRequest) IsSetMemtableOnSinkNode() bool { + return p.MemtableOnSinkNode != nil +} + +func (p *TStreamLoadPutRequest) IsSetGroupCommit() bool { + return p.GroupCommit != nil +} + +func (p *TStreamLoadPutRequest) IsSetStreamPerNode() bool { + return p.StreamPerNode != nil +} + +func (p *TStreamLoadPutRequest) IsSetGroupCommitMode() bool { + return p.GroupCommitMode != nil +} + +func (p *TStreamLoadPutRequest) IsSetUniqueKeyUpdateMode() bool { + return p.UniqueKeyUpdateMode != nil +} + +func (p *TStreamLoadPutRequest) IsSetCloudCluster() bool { + return p.CloudCluster != nil +} + +func (p *TStreamLoadPutRequest) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TStreamLoadPutRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetLoadId bool = false + var issetTxnId bool = false + var issetFileType bool = false + var issetFormatType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPasswd = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetDb = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetTbl = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + issetLoadId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + issetTxnId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + issetFileType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + issetFormatType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRING { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.STRING { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.STRING { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.I64 { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.I32 { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.STRING { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.I64 { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 24: + if fieldTypeId == thrift.STRING { + if err = p.ReadField24(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 25: + if fieldTypeId == thrift.I64 { + if err = p.ReadField25(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 26: + if fieldTypeId == thrift.STRING { + if err = p.ReadField26(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 27: + if fieldTypeId == thrift.I32 { + if err = p.ReadField27(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 28: + if fieldTypeId == thrift.STRING { + if err = p.ReadField28(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 29: + if fieldTypeId == thrift.STRING { + if err = p.ReadField29(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 30: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField30(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 31: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField31(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 32: + if fieldTypeId == thrift.STRING { + if err = p.ReadField32(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 33: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField33(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 34: + if fieldTypeId == thrift.STRING { + if err = p.ReadField34(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 35: + if fieldTypeId == thrift.I32 { + if err = p.ReadField35(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 36: + if fieldTypeId == thrift.DOUBLE { + if err = p.ReadField36(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 37: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField37(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 38: + if fieldTypeId == thrift.STRING { + if err = p.ReadField38(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 39: + if fieldTypeId == thrift.STRING { + if err = p.ReadField39(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 40: + if fieldTypeId == thrift.I32 { + if err = p.ReadField40(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 41: + if fieldTypeId == thrift.I64 { + if err = p.ReadField41(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 42: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField42(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 43: + if fieldTypeId == thrift.I32 { + if err = p.ReadField43(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 44: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField44(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 45: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField45(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 46: + if fieldTypeId == thrift.LIST { + if err = p.ReadField46(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 47: + if fieldTypeId == thrift.STRING { + if err = p.ReadField47(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 48: + if fieldTypeId == thrift.I64 { + if err = p.ReadField48(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 49: + if fieldTypeId == thrift.I32 { + if err = p.ReadField49(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 50: + if fieldTypeId == thrift.STRING { + if err = p.ReadField50(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 51: + if fieldTypeId == thrift.BYTE { + if err = p.ReadField51(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 52: + if fieldTypeId == thrift.BYTE { + if err = p.ReadField52(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 53: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField53(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 54: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField54(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 55: + if fieldTypeId == thrift.I32 { + if err = p.ReadField55(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 56: + if fieldTypeId == thrift.STRING { + if err = p.ReadField56(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 57: + if fieldTypeId == thrift.I32 { + if err = p.ReadField57(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1001: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1001(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetLoadId { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 8 + goto RequiredFieldNotSetError + } + + if !issetFileType { + fieldId = 9 + goto RequiredFieldNotSetError + } + + if !issetFormatType { + fieldId = 10 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutRequest[fieldId])) +} + +func (p *TStreamLoadPutRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.User = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Passwd = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Db = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Tbl = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField7(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.LoadId = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TxnId = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field types.TFileType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TFileType(v) + } + p.FileType = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field plannodes.TFileFormatType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = plannodes.TFileFormatType(v) + } + p.FormatType = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Path = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Columns = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField13(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Where = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField14(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ColumnSeparator = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField15(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Partitions = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField16(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField17(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Negative = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField18(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Timeout = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField19(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.StrictMode = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField20(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Timezone = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField21(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ExecMemLimit = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField22(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsTempPartition = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField23(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.StripOuterArray = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField24(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Jsonpaths = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField25(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ThriftRpcTimeoutMs = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField26(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.JsonRoot = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField27(iprot thrift.TProtocol) error { + + var _field *types.TMergeType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TMergeType(v) + _field = &tmp + } + p.MergeType = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField28(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DeleteCondition = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField29(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SequenceCol = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField30(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.NumAsString = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField31(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.FuzzyParse = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField32(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.LineDelimiter = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField33(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ReadJsonByLine = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField34(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField35(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SendBatchParallelism = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField36(iprot thrift.TProtocol) error { + + var _field *float64 + if v, err := iprot.ReadDouble(); err != nil { + return err + } else { + _field = &v + } + p.MaxFilterRatio = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField37(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.LoadToSingleTablet = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField38(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.HeaderType = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField39(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.HiddenColumns = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField40(iprot thrift.TProtocol) error { + + var _field *plannodes.TFileCompressType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := plannodes.TFileCompressType(v) + _field = &tmp + } + p.CompressType = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField41(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FileSize = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField42(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.TrimDoubleQuotes = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField43(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SkipLines = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField44(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableProfile = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField45(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.PartialUpdate = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField46(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TableNames = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField47(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.LoadSql = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField48(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BackendId = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField49(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Version = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField50(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Label = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField51(iprot thrift.TProtocol) error { + + var _field *int8 + if v, err := iprot.ReadByte(); err != nil { + return err + } else { + _field = &v + } + p.Enclose = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField52(iprot thrift.TProtocol) error { + + var _field *int8 + if v, err := iprot.ReadByte(); err != nil { + return err + } else { + _field = &v + } + p.Escape = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField53(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.MemtableOnSinkNode = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField54(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommit = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField55(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.StreamPerNode = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField56(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommitMode = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField57(iprot thrift.TProtocol) error { + + var _field *types.TUniqueKeyUpdateMode + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TUniqueKeyUpdateMode(v) + _field = &tmp + } + p.UniqueKeyUpdateMode = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField1000(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CloudCluster = _field + return nil +} +func (p *TStreamLoadPutRequest) ReadField1001(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} + +func (p *TStreamLoadPutRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TStreamLoadPutRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } + if err = p.writeField25(oprot); err != nil { + fieldId = 25 + goto WriteFieldError + } + if err = p.writeField26(oprot); err != nil { + fieldId = 26 + goto WriteFieldError + } + if err = p.writeField27(oprot); err != nil { + fieldId = 27 + goto WriteFieldError + } + if err = p.writeField28(oprot); err != nil { + fieldId = 28 + goto WriteFieldError + } + if err = p.writeField29(oprot); err != nil { + fieldId = 29 + goto WriteFieldError + } + if err = p.writeField30(oprot); err != nil { + fieldId = 30 + goto WriteFieldError + } + if err = p.writeField31(oprot); err != nil { + fieldId = 31 + goto WriteFieldError + } + if err = p.writeField32(oprot); err != nil { + fieldId = 32 + goto WriteFieldError + } + if err = p.writeField33(oprot); err != nil { + fieldId = 33 + goto WriteFieldError + } + if err = p.writeField34(oprot); err != nil { + fieldId = 34 + goto WriteFieldError + } + if err = p.writeField35(oprot); err != nil { + fieldId = 35 + goto WriteFieldError + } + if err = p.writeField36(oprot); err != nil { + fieldId = 36 + goto WriteFieldError + } + if err = p.writeField37(oprot); err != nil { + fieldId = 37 + goto WriteFieldError + } + if err = p.writeField38(oprot); err != nil { + fieldId = 38 + goto WriteFieldError + } + if err = p.writeField39(oprot); err != nil { + fieldId = 39 + goto WriteFieldError + } + if err = p.writeField40(oprot); err != nil { + fieldId = 40 + goto WriteFieldError + } + if err = p.writeField41(oprot); err != nil { + fieldId = 41 + goto WriteFieldError + } + if err = p.writeField42(oprot); err != nil { + fieldId = 42 + goto WriteFieldError + } + if err = p.writeField43(oprot); err != nil { + fieldId = 43 + goto WriteFieldError + } + if err = p.writeField44(oprot); err != nil { + fieldId = 44 + goto WriteFieldError + } + if err = p.writeField45(oprot); err != nil { + fieldId = 45 + goto WriteFieldError + } + if err = p.writeField46(oprot); err != nil { + fieldId = 46 + goto WriteFieldError + } + if err = p.writeField47(oprot); err != nil { + fieldId = 47 + goto WriteFieldError + } + if err = p.writeField48(oprot); err != nil { + fieldId = 48 + goto WriteFieldError + } + if err = p.writeField49(oprot); err != nil { + fieldId = 49 + goto WriteFieldError + } + if err = p.writeField50(oprot); err != nil { + fieldId = 50 + goto WriteFieldError + } + if err = p.writeField51(oprot); err != nil { + fieldId = 51 + goto WriteFieldError + } + if err = p.writeField52(oprot); err != nil { + fieldId = 52 + goto WriteFieldError + } + if err = p.writeField53(oprot); err != nil { + fieldId = 53 + goto WriteFieldError + } + if err = p.writeField54(oprot); err != nil { + fieldId = 54 + goto WriteFieldError + } + if err = p.writeField55(oprot); err != nil { + fieldId = 55 + goto WriteFieldError + } + if err = p.writeField56(oprot); err != nil { + fieldId = 56 + goto WriteFieldError + } + if err = p.writeField57(oprot); err != nil { + fieldId = 57 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("loadId", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("fileType", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.FileType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField10(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("formatType", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.FormatType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetPath() { + if err = oprot.WriteFieldBegin("path", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Path); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetColumns() { + if err = oprot.WriteFieldBegin("columns", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Columns); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetWhere() { + if err = oprot.WriteFieldBegin("where", thrift.STRING, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Where); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnSeparator() { + if err = oprot.WriteFieldBegin("columnSeparator", thrift.STRING, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ColumnSeparator); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitions() { + if err = oprot.WriteFieldBegin("partitions", thrift.STRING, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Partitions); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AuthCode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetNegative() { + if err = oprot.WriteFieldBegin("negative", thrift.BOOL, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Negative); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeout() { + if err = oprot.WriteFieldBegin("timeout", thrift.I32, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Timeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetStrictMode() { + if err = oprot.WriteFieldBegin("strictMode", thrift.BOOL, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.StrictMode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetTimezone() { + if err = oprot.WriteFieldBegin("timezone", thrift.STRING, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Timezone); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetExecMemLimit() { + if err = oprot.WriteFieldBegin("execMemLimit", thrift.I64, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ExecMemLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetIsTempPartition() { + if err = oprot.WriteFieldBegin("isTempPartition", thrift.BOOL, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsTempPartition); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetStripOuterArray() { + if err = oprot.WriteFieldBegin("strip_outer_array", thrift.BOOL, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.StripOuterArray); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetJsonpaths() { + if err = oprot.WriteFieldBegin("jsonpaths", thrift.STRING, 24); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Jsonpaths); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField25(oprot thrift.TProtocol) (err error) { + if p.IsSetThriftRpcTimeoutMs() { + if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 25); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetJsonRoot() { + if err = oprot.WriteFieldBegin("json_root", thrift.STRING, 26); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.JsonRoot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetMergeType() { + if err = oprot.WriteFieldBegin("merge_type", thrift.I32, 27); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.MergeType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetDeleteCondition() { + if err = oprot.WriteFieldBegin("delete_condition", thrift.STRING, 28); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DeleteCondition); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetSequenceCol() { + if err = oprot.WriteFieldBegin("sequence_col", thrift.STRING, 29); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SequenceCol); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetNumAsString() { + if err = oprot.WriteFieldBegin("num_as_string", thrift.BOOL, 30); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.NumAsString); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField31(oprot thrift.TProtocol) (err error) { + if p.IsSetFuzzyParse() { + if err = oprot.WriteFieldBegin("fuzzy_parse", thrift.BOOL, 31); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.FuzzyParse); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField32(oprot thrift.TProtocol) (err error) { + if p.IsSetLineDelimiter() { + if err = oprot.WriteFieldBegin("line_delimiter", thrift.STRING, 32); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.LineDelimiter); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField33(oprot thrift.TProtocol) (err error) { + if p.IsSetReadJsonByLine() { + if err = oprot.WriteFieldBegin("read_json_by_line", thrift.BOOL, 33); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.ReadJsonByLine); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 33 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField34(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 34); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField35(oprot thrift.TProtocol) (err error) { + if p.IsSetSendBatchParallelism() { + if err = oprot.WriteFieldBegin("send_batch_parallelism", thrift.I32, 35); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SendBatchParallelism); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField36(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxFilterRatio() { + if err = oprot.WriteFieldBegin("max_filter_ratio", thrift.DOUBLE, 36); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteDouble(*p.MaxFilterRatio); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField37(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadToSingleTablet() { + if err = oprot.WriteFieldBegin("load_to_single_tablet", thrift.BOOL, 37); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.LoadToSingleTablet); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 37 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 37 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField38(oprot thrift.TProtocol) (err error) { + if p.IsSetHeaderType() { + if err = oprot.WriteFieldBegin("header_type", thrift.STRING, 38); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.HeaderType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 38 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 38 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField39(oprot thrift.TProtocol) (err error) { + if p.IsSetHiddenColumns() { + if err = oprot.WriteFieldBegin("hidden_columns", thrift.STRING, 39); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.HiddenColumns); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 39 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 39 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField40(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressType() { + if err = oprot.WriteFieldBegin("compress_type", thrift.I32, 40); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.CompressType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 40 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 40 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField41(oprot thrift.TProtocol) (err error) { + if p.IsSetFileSize() { + if err = oprot.WriteFieldBegin("file_size", thrift.I64, 41); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FileSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 41 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 41 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField42(oprot thrift.TProtocol) (err error) { + if p.IsSetTrimDoubleQuotes() { + if err = oprot.WriteFieldBegin("trim_double_quotes", thrift.BOOL, 42); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.TrimDoubleQuotes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 42 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 42 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField43(oprot thrift.TProtocol) (err error) { + if p.IsSetSkipLines() { + if err = oprot.WriteFieldBegin("skip_lines", thrift.I32, 43); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SkipLines); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 43 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 43 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField44(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableProfile() { + if err = oprot.WriteFieldBegin("enable_profile", thrift.BOOL, 44); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableProfile); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField45(oprot thrift.TProtocol) (err error) { + if p.IsSetPartialUpdate() { + if err = oprot.WriteFieldBegin("partial_update", thrift.BOOL, 45); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.PartialUpdate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 45 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 45 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField46(oprot thrift.TProtocol) (err error) { + if p.IsSetTableNames() { + if err = oprot.WriteFieldBegin("table_names", thrift.LIST, 46); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.TableNames)); err != nil { + return err + } + for _, v := range p.TableNames { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 46 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 46 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField47(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadSql() { + if err = oprot.WriteFieldBegin("load_sql", thrift.STRING, 47); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.LoadSql); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 47 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 47 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField48(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 48); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 48 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 48 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField49(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I32, 49); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 49 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 49 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField50(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 50); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 50 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 50 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField51(oprot thrift.TProtocol) (err error) { + if p.IsSetEnclose() { + if err = oprot.WriteFieldBegin("enclose", thrift.BYTE, 51); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteByte(*p.Enclose); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 51 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 51 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField52(oprot thrift.TProtocol) (err error) { + if p.IsSetEscape() { + if err = oprot.WriteFieldBegin("escape", thrift.BYTE, 52); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteByte(*p.Escape); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 52 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 52 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField53(oprot thrift.TProtocol) (err error) { + if p.IsSetMemtableOnSinkNode() { + if err = oprot.WriteFieldBegin("memtable_on_sink_node", thrift.BOOL, 53); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.MemtableOnSinkNode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 53 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 53 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField54(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommit() { + if err = oprot.WriteFieldBegin("group_commit", thrift.BOOL, 54); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.GroupCommit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 54 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 54 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField55(oprot thrift.TProtocol) (err error) { + if p.IsSetStreamPerNode() { + if err = oprot.WriteFieldBegin("stream_per_node", thrift.I32, 55); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.StreamPerNode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 55 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 55 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField56(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitMode() { + if err = oprot.WriteFieldBegin("group_commit_mode", thrift.STRING, 56); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.GroupCommitMode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 56 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 56 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField57(oprot thrift.TProtocol) (err error) { + if p.IsSetUniqueKeyUpdateMode() { + if err = oprot.WriteFieldBegin("unique_key_update_mode", thrift.I32, 57); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.UniqueKeyUpdateMode)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 57 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 57 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetCloudCluster() { + if err = oprot.WriteFieldBegin("cloud_cluster", thrift.STRING, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CloudCluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 1001); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) +} + +func (p *TStreamLoadPutRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStreamLoadPutRequest(%+v)", *p) + +} + +func (p *TStreamLoadPutRequest) DeepEqual(ano *TStreamLoadPutRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Tbl) { + return false + } + if !p.Field6DeepEqual(ano.UserIp) { + return false + } + if !p.Field7DeepEqual(ano.LoadId) { + return false + } + if !p.Field8DeepEqual(ano.TxnId) { + return false + } + if !p.Field9DeepEqual(ano.FileType) { + return false + } + if !p.Field10DeepEqual(ano.FormatType) { + return false + } + if !p.Field11DeepEqual(ano.Path) { + return false + } + if !p.Field12DeepEqual(ano.Columns) { + return false + } + if !p.Field13DeepEqual(ano.Where) { + return false + } + if !p.Field14DeepEqual(ano.ColumnSeparator) { + return false + } + if !p.Field15DeepEqual(ano.Partitions) { + return false + } + if !p.Field16DeepEqual(ano.AuthCode) { + return false + } + if !p.Field17DeepEqual(ano.Negative) { + return false + } + if !p.Field18DeepEqual(ano.Timeout) { + return false + } + if !p.Field19DeepEqual(ano.StrictMode) { + return false + } + if !p.Field20DeepEqual(ano.Timezone) { + return false + } + if !p.Field21DeepEqual(ano.ExecMemLimit) { + return false + } + if !p.Field22DeepEqual(ano.IsTempPartition) { + return false + } + if !p.Field23DeepEqual(ano.StripOuterArray) { + return false + } + if !p.Field24DeepEqual(ano.Jsonpaths) { + return false + } + if !p.Field25DeepEqual(ano.ThriftRpcTimeoutMs) { + return false + } + if !p.Field26DeepEqual(ano.JsonRoot) { + return false + } + if !p.Field27DeepEqual(ano.MergeType) { + return false + } + if !p.Field28DeepEqual(ano.DeleteCondition) { + return false + } + if !p.Field29DeepEqual(ano.SequenceCol) { + return false + } + if !p.Field30DeepEqual(ano.NumAsString) { + return false + } + if !p.Field31DeepEqual(ano.FuzzyParse) { + return false + } + if !p.Field32DeepEqual(ano.LineDelimiter) { + return false + } + if !p.Field33DeepEqual(ano.ReadJsonByLine) { + return false + } + if !p.Field34DeepEqual(ano.Token) { + return false + } + if !p.Field35DeepEqual(ano.SendBatchParallelism) { + return false + } + if !p.Field36DeepEqual(ano.MaxFilterRatio) { + return false + } + if !p.Field37DeepEqual(ano.LoadToSingleTablet) { + return false + } + if !p.Field38DeepEqual(ano.HeaderType) { + return false + } + if !p.Field39DeepEqual(ano.HiddenColumns) { + return false + } + if !p.Field40DeepEqual(ano.CompressType) { + return false + } + if !p.Field41DeepEqual(ano.FileSize) { + return false + } + if !p.Field42DeepEqual(ano.TrimDoubleQuotes) { + return false + } + if !p.Field43DeepEqual(ano.SkipLines) { + return false + } + if !p.Field44DeepEqual(ano.EnableProfile) { + return false + } + if !p.Field45DeepEqual(ano.PartialUpdate) { + return false + } + if !p.Field46DeepEqual(ano.TableNames) { + return false + } + if !p.Field47DeepEqual(ano.LoadSql) { + return false + } + if !p.Field48DeepEqual(ano.BackendId) { + return false + } + if !p.Field49DeepEqual(ano.Version) { + return false + } + if !p.Field50DeepEqual(ano.Label) { + return false + } + if !p.Field51DeepEqual(ano.Enclose) { + return false + } + if !p.Field52DeepEqual(ano.Escape) { + return false + } + if !p.Field53DeepEqual(ano.MemtableOnSinkNode) { + return false + } + if !p.Field54DeepEqual(ano.GroupCommit) { + return false + } + if !p.Field55DeepEqual(ano.StreamPerNode) { + return false + } + if !p.Field56DeepEqual(ano.GroupCommitMode) { + return false + } + if !p.Field57DeepEqual(ano.UniqueKeyUpdateMode) { + return false + } + if !p.Field1000DeepEqual(ano.CloudCluster) { + return false + } + if !p.Field1001DeepEqual(ano.TableId) { + return false + } + return true +} + +func (p *TStreamLoadPutRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.User, src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Passwd, src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field4DeepEqual(src string) bool { + + if strings.Compare(p.Db, src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field5DeepEqual(src string) bool { + + if strings.Compare(p.Tbl, src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field6DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field7DeepEqual(src *types.TUniqueId) bool { + + if !p.LoadId.DeepEqual(src) { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field8DeepEqual(src int64) bool { + + if p.TxnId != src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field9DeepEqual(src types.TFileType) bool { + + if p.FileType != src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field10DeepEqual(src plannodes.TFileFormatType) bool { + + if p.FormatType != src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field11DeepEqual(src *string) bool { + + if p.Path == src { + return true + } else if p.Path == nil || src == nil { + return false + } + if strings.Compare(*p.Path, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field12DeepEqual(src *string) bool { + + if p.Columns == src { + return true + } else if p.Columns == nil || src == nil { + return false + } + if strings.Compare(*p.Columns, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field13DeepEqual(src *string) bool { + + if p.Where == src { + return true + } else if p.Where == nil || src == nil { + return false + } + if strings.Compare(*p.Where, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field14DeepEqual(src *string) bool { + + if p.ColumnSeparator == src { + return true + } else if p.ColumnSeparator == nil || src == nil { + return false + } + if strings.Compare(*p.ColumnSeparator, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field15DeepEqual(src *string) bool { + + if p.Partitions == src { + return true + } else if p.Partitions == nil || src == nil { + return false + } + if strings.Compare(*p.Partitions, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field16DeepEqual(src *int64) bool { + + if p.AuthCode == src { + return true + } else if p.AuthCode == nil || src == nil { + return false + } + if *p.AuthCode != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field17DeepEqual(src *bool) bool { + + if p.Negative == src { + return true + } else if p.Negative == nil || src == nil { + return false + } + if *p.Negative != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field18DeepEqual(src *int32) bool { + + if p.Timeout == src { + return true + } else if p.Timeout == nil || src == nil { + return false + } + if *p.Timeout != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field19DeepEqual(src *bool) bool { + + if p.StrictMode == src { + return true + } else if p.StrictMode == nil || src == nil { + return false + } + if *p.StrictMode != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field20DeepEqual(src *string) bool { + + if p.Timezone == src { + return true + } else if p.Timezone == nil || src == nil { + return false + } + if strings.Compare(*p.Timezone, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field21DeepEqual(src *int64) bool { + + if p.ExecMemLimit == src { + return true + } else if p.ExecMemLimit == nil || src == nil { + return false + } + if *p.ExecMemLimit != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field22DeepEqual(src *bool) bool { + + if p.IsTempPartition == src { + return true + } else if p.IsTempPartition == nil || src == nil { + return false + } + if *p.IsTempPartition != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field23DeepEqual(src *bool) bool { + + if p.StripOuterArray == src { + return true + } else if p.StripOuterArray == nil || src == nil { + return false + } + if *p.StripOuterArray != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field24DeepEqual(src *string) bool { + + if p.Jsonpaths == src { + return true + } else if p.Jsonpaths == nil || src == nil { + return false + } + if strings.Compare(*p.Jsonpaths, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field25DeepEqual(src *int64) bool { + + if p.ThriftRpcTimeoutMs == src { + return true + } else if p.ThriftRpcTimeoutMs == nil || src == nil { + return false + } + if *p.ThriftRpcTimeoutMs != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field26DeepEqual(src *string) bool { + + if p.JsonRoot == src { + return true + } else if p.JsonRoot == nil || src == nil { + return false + } + if strings.Compare(*p.JsonRoot, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field27DeepEqual(src *types.TMergeType) bool { + + if p.MergeType == src { + return true + } else if p.MergeType == nil || src == nil { + return false + } + if *p.MergeType != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field28DeepEqual(src *string) bool { + + if p.DeleteCondition == src { + return true + } else if p.DeleteCondition == nil || src == nil { + return false + } + if strings.Compare(*p.DeleteCondition, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field29DeepEqual(src *string) bool { + + if p.SequenceCol == src { + return true + } else if p.SequenceCol == nil || src == nil { + return false + } + if strings.Compare(*p.SequenceCol, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field30DeepEqual(src *bool) bool { + + if p.NumAsString == src { + return true + } else if p.NumAsString == nil || src == nil { + return false + } + if *p.NumAsString != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field31DeepEqual(src *bool) bool { + + if p.FuzzyParse == src { + return true + } else if p.FuzzyParse == nil || src == nil { + return false + } + if *p.FuzzyParse != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field32DeepEqual(src *string) bool { + + if p.LineDelimiter == src { + return true + } else if p.LineDelimiter == nil || src == nil { + return false + } + if strings.Compare(*p.LineDelimiter, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field33DeepEqual(src *bool) bool { + + if p.ReadJsonByLine == src { + return true + } else if p.ReadJsonByLine == nil || src == nil { + return false + } + if *p.ReadJsonByLine != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field34DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field35DeepEqual(src *int32) bool { + + if p.SendBatchParallelism == src { + return true + } else if p.SendBatchParallelism == nil || src == nil { + return false + } + if *p.SendBatchParallelism != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field36DeepEqual(src *float64) bool { + + if p.MaxFilterRatio == src { + return true + } else if p.MaxFilterRatio == nil || src == nil { + return false + } + if *p.MaxFilterRatio != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field37DeepEqual(src *bool) bool { + + if p.LoadToSingleTablet == src { + return true + } else if p.LoadToSingleTablet == nil || src == nil { + return false + } + if *p.LoadToSingleTablet != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field38DeepEqual(src *string) bool { + + if p.HeaderType == src { + return true + } else if p.HeaderType == nil || src == nil { + return false + } + if strings.Compare(*p.HeaderType, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field39DeepEqual(src *string) bool { + + if p.HiddenColumns == src { + return true + } else if p.HiddenColumns == nil || src == nil { + return false + } + if strings.Compare(*p.HiddenColumns, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field40DeepEqual(src *plannodes.TFileCompressType) bool { + + if p.CompressType == src { + return true + } else if p.CompressType == nil || src == nil { + return false + } + if *p.CompressType != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field41DeepEqual(src *int64) bool { + + if p.FileSize == src { + return true + } else if p.FileSize == nil || src == nil { + return false + } + if *p.FileSize != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field42DeepEqual(src *bool) bool { + + if p.TrimDoubleQuotes == src { + return true + } else if p.TrimDoubleQuotes == nil || src == nil { + return false + } + if *p.TrimDoubleQuotes != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field43DeepEqual(src *int32) bool { + + if p.SkipLines == src { + return true + } else if p.SkipLines == nil || src == nil { + return false + } + if *p.SkipLines != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field44DeepEqual(src *bool) bool { + + if p.EnableProfile == src { + return true + } else if p.EnableProfile == nil || src == nil { + return false + } + if *p.EnableProfile != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field45DeepEqual(src *bool) bool { + + if p.PartialUpdate == src { + return true + } else if p.PartialUpdate == nil || src == nil { + return false + } + if *p.PartialUpdate != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field46DeepEqual(src []string) bool { + + if len(p.TableNames) != len(src) { + return false + } + for i, v := range p.TableNames { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TStreamLoadPutRequest) Field47DeepEqual(src *string) bool { + + if p.LoadSql == src { + return true + } else if p.LoadSql == nil || src == nil { + return false + } + if strings.Compare(*p.LoadSql, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field48DeepEqual(src *int64) bool { + + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field49DeepEqual(src *int32) bool { + + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false + } + if *p.Version != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field50DeepEqual(src *string) bool { + + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field51DeepEqual(src *int8) bool { + + if p.Enclose == src { + return true + } else if p.Enclose == nil || src == nil { + return false + } + if *p.Enclose != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field52DeepEqual(src *int8) bool { + + if p.Escape == src { + return true + } else if p.Escape == nil || src == nil { + return false + } + if *p.Escape != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field53DeepEqual(src *bool) bool { + + if p.MemtableOnSinkNode == src { + return true + } else if p.MemtableOnSinkNode == nil || src == nil { + return false + } + if *p.MemtableOnSinkNode != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field54DeepEqual(src *bool) bool { + + if p.GroupCommit == src { + return true + } else if p.GroupCommit == nil || src == nil { + return false + } + if *p.GroupCommit != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field55DeepEqual(src *int32) bool { + + if p.StreamPerNode == src { + return true + } else if p.StreamPerNode == nil || src == nil { + return false + } + if *p.StreamPerNode != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field56DeepEqual(src *string) bool { + + if p.GroupCommitMode == src { + return true + } else if p.GroupCommitMode == nil || src == nil { + return false + } + if strings.Compare(*p.GroupCommitMode, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field57DeepEqual(src *types.TUniqueKeyUpdateMode) bool { + + if p.UniqueKeyUpdateMode == src { + return true + } else if p.UniqueKeyUpdateMode == nil || src == nil { + return false + } + if *p.UniqueKeyUpdateMode != *src { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field1000DeepEqual(src *string) bool { + + if p.CloudCluster == src { + return true + } else if p.CloudCluster == nil || src == nil { + return false + } + if strings.Compare(*p.CloudCluster, *src) != 0 { + return false + } + return true +} +func (p *TStreamLoadPutRequest) Field1001DeepEqual(src *int64) bool { + + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true +} + +type TStreamLoadPutResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,2,optional" frugal:"2,optional,palointernalservice.TExecPlanFragmentParams" json:"params,omitempty"` + PipelineParams *palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,3,optional" frugal:"3,optional,palointernalservice.TPipelineFragmentParams" json:"pipeline_params,omitempty"` + BaseSchemaVersion *int64 `thrift:"base_schema_version,4,optional" frugal:"4,optional,i64" json:"base_schema_version,omitempty"` + DbId *int64 `thrift:"db_id,5,optional" frugal:"5,optional,i64" json:"db_id,omitempty"` + TableId *int64 `thrift:"table_id,6,optional" frugal:"6,optional,i64" json:"table_id,omitempty"` + WaitInternalGroupCommitFinish bool `thrift:"wait_internal_group_commit_finish,7,optional" frugal:"7,optional,bool" json:"wait_internal_group_commit_finish,omitempty"` + GroupCommitIntervalMs *int64 `thrift:"group_commit_interval_ms,8,optional" frugal:"8,optional,i64" json:"group_commit_interval_ms,omitempty"` + GroupCommitDataBytes *int64 `thrift:"group_commit_data_bytes,9,optional" frugal:"9,optional,i64" json:"group_commit_data_bytes,omitempty"` +} + +func NewTStreamLoadPutResult_() *TStreamLoadPutResult_ { + return &TStreamLoadPutResult_{ + + WaitInternalGroupCommitFinish: false, + } +} + +func (p *TStreamLoadPutResult_) InitDefault() { + p.WaitInternalGroupCommitFinish = false +} + +var TStreamLoadPutResult__Status_DEFAULT *status.TStatus + +func (p *TStreamLoadPutResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TStreamLoadPutResult__Status_DEFAULT + } + return p.Status +} + +var TStreamLoadPutResult__Params_DEFAULT *palointernalservice.TExecPlanFragmentParams + +func (p *TStreamLoadPutResult_) GetParams() (v *palointernalservice.TExecPlanFragmentParams) { + if !p.IsSetParams() { + return TStreamLoadPutResult__Params_DEFAULT + } + return p.Params +} + +var TStreamLoadPutResult__PipelineParams_DEFAULT *palointernalservice.TPipelineFragmentParams + +func (p *TStreamLoadPutResult_) GetPipelineParams() (v *palointernalservice.TPipelineFragmentParams) { + if !p.IsSetPipelineParams() { + return TStreamLoadPutResult__PipelineParams_DEFAULT + } + return p.PipelineParams +} + +var TStreamLoadPutResult__BaseSchemaVersion_DEFAULT int64 + +func (p *TStreamLoadPutResult_) GetBaseSchemaVersion() (v int64) { + if !p.IsSetBaseSchemaVersion() { + return TStreamLoadPutResult__BaseSchemaVersion_DEFAULT + } + return *p.BaseSchemaVersion +} + +var TStreamLoadPutResult__DbId_DEFAULT int64 + +func (p *TStreamLoadPutResult_) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TStreamLoadPutResult__DbId_DEFAULT + } + return *p.DbId +} + +var TStreamLoadPutResult__TableId_DEFAULT int64 + +func (p *TStreamLoadPutResult_) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TStreamLoadPutResult__TableId_DEFAULT + } + return *p.TableId +} + +var TStreamLoadPutResult__WaitInternalGroupCommitFinish_DEFAULT bool = false + +func (p *TStreamLoadPutResult_) GetWaitInternalGroupCommitFinish() (v bool) { + if !p.IsSetWaitInternalGroupCommitFinish() { + return TStreamLoadPutResult__WaitInternalGroupCommitFinish_DEFAULT + } + return p.WaitInternalGroupCommitFinish +} + +var TStreamLoadPutResult__GroupCommitIntervalMs_DEFAULT int64 + +func (p *TStreamLoadPutResult_) GetGroupCommitIntervalMs() (v int64) { + if !p.IsSetGroupCommitIntervalMs() { + return TStreamLoadPutResult__GroupCommitIntervalMs_DEFAULT + } + return *p.GroupCommitIntervalMs +} + +var TStreamLoadPutResult__GroupCommitDataBytes_DEFAULT int64 + +func (p *TStreamLoadPutResult_) GetGroupCommitDataBytes() (v int64) { + if !p.IsSetGroupCommitDataBytes() { + return TStreamLoadPutResult__GroupCommitDataBytes_DEFAULT + } + return *p.GroupCommitDataBytes +} +func (p *TStreamLoadPutResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TStreamLoadPutResult_) SetParams(val *palointernalservice.TExecPlanFragmentParams) { + p.Params = val +} +func (p *TStreamLoadPutResult_) SetPipelineParams(val *palointernalservice.TPipelineFragmentParams) { + p.PipelineParams = val +} +func (p *TStreamLoadPutResult_) SetBaseSchemaVersion(val *int64) { + p.BaseSchemaVersion = val +} +func (p *TStreamLoadPutResult_) SetDbId(val *int64) { + p.DbId = val +} +func (p *TStreamLoadPutResult_) SetTableId(val *int64) { + p.TableId = val +} +func (p *TStreamLoadPutResult_) SetWaitInternalGroupCommitFinish(val bool) { + p.WaitInternalGroupCommitFinish = val +} +func (p *TStreamLoadPutResult_) SetGroupCommitIntervalMs(val *int64) { + p.GroupCommitIntervalMs = val +} +func (p *TStreamLoadPutResult_) SetGroupCommitDataBytes(val *int64) { + p.GroupCommitDataBytes = val +} + +var fieldIDToName_TStreamLoadPutResult_ = map[int16]string{ + 1: "status", + 2: "params", + 3: "pipeline_params", + 4: "base_schema_version", + 5: "db_id", + 6: "table_id", + 7: "wait_internal_group_commit_finish", + 8: "group_commit_interval_ms", + 9: "group_commit_data_bytes", +} + +func (p *TStreamLoadPutResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TStreamLoadPutResult_) IsSetParams() bool { + return p.Params != nil +} + +func (p *TStreamLoadPutResult_) IsSetPipelineParams() bool { + return p.PipelineParams != nil +} + +func (p *TStreamLoadPutResult_) IsSetBaseSchemaVersion() bool { + return p.BaseSchemaVersion != nil +} + +func (p *TStreamLoadPutResult_) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TStreamLoadPutResult_) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TStreamLoadPutResult_) IsSetWaitInternalGroupCommitFinish() bool { + return p.WaitInternalGroupCommitFinish != TStreamLoadPutResult__WaitInternalGroupCommitFinish_DEFAULT +} + +func (p *TStreamLoadPutResult_) IsSetGroupCommitIntervalMs() bool { + return p.GroupCommitIntervalMs != nil +} + +func (p *TStreamLoadPutResult_) IsSetGroupCommitDataBytes() bool { + return p.GroupCommitDataBytes != nil +} + +func (p *TStreamLoadPutResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutResult_[fieldId])) +} + +func (p *TStreamLoadPutResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField2(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTExecPlanFragmentParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Params = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField3(iprot thrift.TProtocol) error { + _field := palointernalservice.NewTPipelineFragmentParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.PipelineParams = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BaseSchemaVersion = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField7(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.WaitInternalGroupCommitFinish = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommitIntervalMs = _field + return nil +} +func (p *TStreamLoadPutResult_) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommitDataBytes = _field + return nil +} + +func (p *TStreamLoadPutResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TStreamLoadPutResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetParams() { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPipelineParams() { + if err = oprot.WriteFieldBegin("pipeline_params", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.PipelineParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseSchemaVersion() { + if err = oprot.WriteFieldBegin("base_schema_version", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BaseSchemaVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetWaitInternalGroupCommitFinish() { + if err = oprot.WriteFieldBegin("wait_internal_group_commit_finish", thrift.BOOL, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.WaitInternalGroupCommitFinish); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitIntervalMs() { + if err = oprot.WriteFieldBegin("group_commit_interval_ms", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.GroupCommitIntervalMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitDataBytes() { + if err = oprot.WriteFieldBegin("group_commit_data_bytes", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.GroupCommitDataBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TStreamLoadPutResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStreamLoadPutResult_(%+v)", *p) + +} + +func (p *TStreamLoadPutResult_) DeepEqual(ano *TStreamLoadPutResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Params) { + return false + } + if !p.Field3DeepEqual(ano.PipelineParams) { + return false + } + if !p.Field4DeepEqual(ano.BaseSchemaVersion) { + return false + } + if !p.Field5DeepEqual(ano.DbId) { + return false + } + if !p.Field6DeepEqual(ano.TableId) { + return false + } + if !p.Field7DeepEqual(ano.WaitInternalGroupCommitFinish) { + return false + } + if !p.Field8DeepEqual(ano.GroupCommitIntervalMs) { + return false + } + if !p.Field9DeepEqual(ano.GroupCommitDataBytes) { + return false + } + return true +} + +func (p *TStreamLoadPutResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field2DeepEqual(src *palointernalservice.TExecPlanFragmentParams) bool { + + if !p.Params.DeepEqual(src) { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field3DeepEqual(src *palointernalservice.TPipelineFragmentParams) bool { + + if !p.PipelineParams.DeepEqual(src) { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field4DeepEqual(src *int64) bool { + + if p.BaseSchemaVersion == src { + return true + } else if p.BaseSchemaVersion == nil || src == nil { + return false + } + if *p.BaseSchemaVersion != *src { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field5DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field6DeepEqual(src *int64) bool { + + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field7DeepEqual(src bool) bool { + + if p.WaitInternalGroupCommitFinish != src { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field8DeepEqual(src *int64) bool { + + if p.GroupCommitIntervalMs == src { + return true + } else if p.GroupCommitIntervalMs == nil || src == nil { + return false + } + if *p.GroupCommitIntervalMs != *src { + return false + } + return true +} +func (p *TStreamLoadPutResult_) Field9DeepEqual(src *int64) bool { + + if p.GroupCommitDataBytes == src { + return true + } else if p.GroupCommitDataBytes == nil || src == nil { + return false + } + if *p.GroupCommitDataBytes != *src { + return false + } + return true +} + +type TStreamLoadMultiTablePutResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + Params []*palointernalservice.TExecPlanFragmentParams `thrift:"params,2,optional" frugal:"2,optional,list" json:"params,omitempty"` + PipelineParams []*palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,3,optional" frugal:"3,optional,list" json:"pipeline_params,omitempty"` +} + +func NewTStreamLoadMultiTablePutResult_() *TStreamLoadMultiTablePutResult_ { + return &TStreamLoadMultiTablePutResult_{} +} + +func (p *TStreamLoadMultiTablePutResult_) InitDefault() { +} + +var TStreamLoadMultiTablePutResult__Status_DEFAULT *status.TStatus + +func (p *TStreamLoadMultiTablePutResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TStreamLoadMultiTablePutResult__Status_DEFAULT + } + return p.Status +} + +var TStreamLoadMultiTablePutResult__Params_DEFAULT []*palointernalservice.TExecPlanFragmentParams + +func (p *TStreamLoadMultiTablePutResult_) GetParams() (v []*palointernalservice.TExecPlanFragmentParams) { + if !p.IsSetParams() { + return TStreamLoadMultiTablePutResult__Params_DEFAULT + } + return p.Params +} + +var TStreamLoadMultiTablePutResult__PipelineParams_DEFAULT []*palointernalservice.TPipelineFragmentParams + +func (p *TStreamLoadMultiTablePutResult_) GetPipelineParams() (v []*palointernalservice.TPipelineFragmentParams) { + if !p.IsSetPipelineParams() { + return TStreamLoadMultiTablePutResult__PipelineParams_DEFAULT + } + return p.PipelineParams +} +func (p *TStreamLoadMultiTablePutResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TStreamLoadMultiTablePutResult_) SetParams(val []*palointernalservice.TExecPlanFragmentParams) { + p.Params = val +} +func (p *TStreamLoadMultiTablePutResult_) SetPipelineParams(val []*palointernalservice.TPipelineFragmentParams) { + p.PipelineParams = val +} + +var fieldIDToName_TStreamLoadMultiTablePutResult_ = map[int16]string{ + 1: "status", + 2: "params", + 3: "pipeline_params", +} + +func (p *TStreamLoadMultiTablePutResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TStreamLoadMultiTablePutResult_) IsSetParams() bool { + return p.Params != nil +} + +func (p *TStreamLoadMultiTablePutResult_) IsSetPipelineParams() bool { + return p.PipelineParams != nil +} + +func (p *TStreamLoadMultiTablePutResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId])) +} + +func (p *TStreamLoadMultiTablePutResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TStreamLoadMultiTablePutResult_) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*palointernalservice.TExecPlanFragmentParams, 0, size) + values := make([]palointernalservice.TExecPlanFragmentParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Params = _field + return nil +} +func (p *TStreamLoadMultiTablePutResult_) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*palointernalservice.TPipelineFragmentParams, 0, size) + values := make([]palointernalservice.TPipelineFragmentParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PipelineParams = _field + return nil +} + +func (p *TStreamLoadMultiTablePutResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TStreamLoadMultiTablePutResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TStreamLoadMultiTablePutResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TStreamLoadMultiTablePutResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetParams() { + if err = oprot.WriteFieldBegin("params", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Params)); err != nil { + return err + } + for _, v := range p.Params { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TStreamLoadMultiTablePutResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPipelineParams() { + if err = oprot.WriteFieldBegin("pipeline_params", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PipelineParams)); err != nil { + return err + } + for _, v := range p.PipelineParams { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TStreamLoadMultiTablePutResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStreamLoadMultiTablePutResult_(%+v)", *p) + +} + +func (p *TStreamLoadMultiTablePutResult_) DeepEqual(ano *TStreamLoadMultiTablePutResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Params) { + return false + } + if !p.Field3DeepEqual(ano.PipelineParams) { + return false + } + return true +} + +func (p *TStreamLoadMultiTablePutResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TStreamLoadMultiTablePutResult_) Field2DeepEqual(src []*palointernalservice.TExecPlanFragmentParams) bool { + + if len(p.Params) != len(src) { + return false + } + for i, v := range p.Params { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TStreamLoadMultiTablePutResult_) Field3DeepEqual(src []*palointernalservice.TPipelineFragmentParams) bool { + + if len(p.PipelineParams) != len(src) { + return false + } + for i, v := range p.PipelineParams { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TStreamLoadWithLoadStatusResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + TxnId *int64 `thrift:"txn_id,2,optional" frugal:"2,optional,i64" json:"txn_id,omitempty"` + TotalRows *int64 `thrift:"total_rows,3,optional" frugal:"3,optional,i64" json:"total_rows,omitempty"` + LoadedRows *int64 `thrift:"loaded_rows,4,optional" frugal:"4,optional,i64" json:"loaded_rows,omitempty"` + FilteredRows *int64 `thrift:"filtered_rows,5,optional" frugal:"5,optional,i64" json:"filtered_rows,omitempty"` + UnselectedRows *int64 `thrift:"unselected_rows,6,optional" frugal:"6,optional,i64" json:"unselected_rows,omitempty"` +} + +func NewTStreamLoadWithLoadStatusResult_() *TStreamLoadWithLoadStatusResult_ { + return &TStreamLoadWithLoadStatusResult_{} +} + +func (p *TStreamLoadWithLoadStatusResult_) InitDefault() { +} + +var TStreamLoadWithLoadStatusResult__Status_DEFAULT *status.TStatus + +func (p *TStreamLoadWithLoadStatusResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TStreamLoadWithLoadStatusResult__Status_DEFAULT + } + return p.Status +} + +var TStreamLoadWithLoadStatusResult__TxnId_DEFAULT int64 + +func (p *TStreamLoadWithLoadStatusResult_) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TStreamLoadWithLoadStatusResult__TxnId_DEFAULT + } + return *p.TxnId +} + +var TStreamLoadWithLoadStatusResult__TotalRows_DEFAULT int64 + +func (p *TStreamLoadWithLoadStatusResult_) GetTotalRows() (v int64) { + if !p.IsSetTotalRows() { + return TStreamLoadWithLoadStatusResult__TotalRows_DEFAULT + } + return *p.TotalRows +} + +var TStreamLoadWithLoadStatusResult__LoadedRows_DEFAULT int64 + +func (p *TStreamLoadWithLoadStatusResult_) GetLoadedRows() (v int64) { + if !p.IsSetLoadedRows() { + return TStreamLoadWithLoadStatusResult__LoadedRows_DEFAULT + } + return *p.LoadedRows +} + +var TStreamLoadWithLoadStatusResult__FilteredRows_DEFAULT int64 + +func (p *TStreamLoadWithLoadStatusResult_) GetFilteredRows() (v int64) { + if !p.IsSetFilteredRows() { + return TStreamLoadWithLoadStatusResult__FilteredRows_DEFAULT + } + return *p.FilteredRows +} + +var TStreamLoadWithLoadStatusResult__UnselectedRows_DEFAULT int64 + +func (p *TStreamLoadWithLoadStatusResult_) GetUnselectedRows() (v int64) { + if !p.IsSetUnselectedRows() { + return TStreamLoadWithLoadStatusResult__UnselectedRows_DEFAULT + } + return *p.UnselectedRows +} +func (p *TStreamLoadWithLoadStatusResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TStreamLoadWithLoadStatusResult_) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TStreamLoadWithLoadStatusResult_) SetTotalRows(val *int64) { + p.TotalRows = val +} +func (p *TStreamLoadWithLoadStatusResult_) SetLoadedRows(val *int64) { + p.LoadedRows = val +} +func (p *TStreamLoadWithLoadStatusResult_) SetFilteredRows(val *int64) { + p.FilteredRows = val +} +func (p *TStreamLoadWithLoadStatusResult_) SetUnselectedRows(val *int64) { + p.UnselectedRows = val +} + +var fieldIDToName_TStreamLoadWithLoadStatusResult_ = map[int16]string{ + 1: "status", + 2: "txn_id", + 3: "total_rows", + 4: "loaded_rows", + 5: "filtered_rows", + 6: "unselected_rows", +} + +func (p *TStreamLoadWithLoadStatusResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TStreamLoadWithLoadStatusResult_) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TStreamLoadWithLoadStatusResult_) IsSetTotalRows() bool { + return p.TotalRows != nil +} + +func (p *TStreamLoadWithLoadStatusResult_) IsSetLoadedRows() bool { + return p.LoadedRows != nil +} + +func (p *TStreamLoadWithLoadStatusResult_) IsSetFilteredRows() bool { + return p.FilteredRows != nil +} + +func (p *TStreamLoadWithLoadStatusResult_) IsSetUnselectedRows() bool { + return p.UnselectedRows != nil +} + +func (p *TStreamLoadWithLoadStatusResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadWithLoadStatusResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TStreamLoadWithLoadStatusResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TStreamLoadWithLoadStatusResult_) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TotalRows = _field + return nil +} +func (p *TStreamLoadWithLoadStatusResult_) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LoadedRows = _field + return nil +} +func (p *TStreamLoadWithLoadStatusResult_) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FilteredRows = _field + return nil +} +func (p *TStreamLoadWithLoadStatusResult_) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.UnselectedRows = _field + return nil +} + +func (p *TStreamLoadWithLoadStatusResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TStreamLoadWithLoadStatusResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalRows() { + if err = oprot.WriteFieldBegin("total_rows", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TotalRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedRows() { + if err = oprot.WriteFieldBegin("loaded_rows", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadedRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetFilteredRows() { + if err = oprot.WriteFieldBegin("filtered_rows", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FilteredRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUnselectedRows() { + if err = oprot.WriteFieldBegin("unselected_rows", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.UnselectedRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStreamLoadWithLoadStatusResult_(%+v)", *p) + +} + +func (p *TStreamLoadWithLoadStatusResult_) DeepEqual(ano *TStreamLoadWithLoadStatusResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.TxnId) { + return false + } + if !p.Field3DeepEqual(ano.TotalRows) { + return false + } + if !p.Field4DeepEqual(ano.LoadedRows) { + return false + } + if !p.Field5DeepEqual(ano.FilteredRows) { + return false + } + if !p.Field6DeepEqual(ano.UnselectedRows) { + return false + } + return true +} + +func (p *TStreamLoadWithLoadStatusResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TStreamLoadWithLoadStatusResult_) Field2DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TStreamLoadWithLoadStatusResult_) Field3DeepEqual(src *int64) bool { + + if p.TotalRows == src { + return true + } else if p.TotalRows == nil || src == nil { + return false + } + if *p.TotalRows != *src { + return false + } + return true +} +func (p *TStreamLoadWithLoadStatusResult_) Field4DeepEqual(src *int64) bool { + + if p.LoadedRows == src { + return true + } else if p.LoadedRows == nil || src == nil { + return false + } + if *p.LoadedRows != *src { + return false + } + return true +} +func (p *TStreamLoadWithLoadStatusResult_) Field5DeepEqual(src *int64) bool { + + if p.FilteredRows == src { + return true + } else if p.FilteredRows == nil || src == nil { + return false + } + if *p.FilteredRows != *src { + return false + } + return true +} +func (p *TStreamLoadWithLoadStatusResult_) Field6DeepEqual(src *int64) bool { + + if p.UnselectedRows == src { + return true + } else if p.UnselectedRows == nil || src == nil { + return false + } + if *p.UnselectedRows != *src { + return false + } + return true +} + +type TKafkaRLTaskProgress struct { + PartitionCmtOffset map[int32]int64 `thrift:"partitionCmtOffset,1,required" frugal:"1,required,map" json:"partitionCmtOffset"` +} + +func NewTKafkaRLTaskProgress() *TKafkaRLTaskProgress { + return &TKafkaRLTaskProgress{} +} + +func (p *TKafkaRLTaskProgress) InitDefault() { +} + +func (p *TKafkaRLTaskProgress) GetPartitionCmtOffset() (v map[int32]int64) { + return p.PartitionCmtOffset +} +func (p *TKafkaRLTaskProgress) SetPartitionCmtOffset(val map[int32]int64) { + p.PartitionCmtOffset = val +} + +var fieldIDToName_TKafkaRLTaskProgress = map[int16]string{ + 1: "partitionCmtOffset", +} + +func (p *TKafkaRLTaskProgress) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionCmtOffset bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetPartitionCmtOffset = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetPartitionCmtOffset { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TKafkaRLTaskProgress[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TKafkaRLTaskProgress[fieldId])) +} + +func (p *TKafkaRLTaskProgress) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]int64, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + var _val int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PartitionCmtOffset = _field + return nil +} + +func (p *TKafkaRLTaskProgress) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TKafkaRLTaskProgress"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TKafkaRLTaskProgress) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partitionCmtOffset", thrift.MAP, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I64, len(p.PartitionCmtOffset)); err != nil { + return err + } + for k, v := range p.PartitionCmtOffset { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TKafkaRLTaskProgress) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TKafkaRLTaskProgress(%+v)", *p) + +} + +func (p *TKafkaRLTaskProgress) DeepEqual(ano *TKafkaRLTaskProgress) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PartitionCmtOffset) { + return false + } + return true +} + +func (p *TKafkaRLTaskProgress) Field1DeepEqual(src map[int32]int64) bool { + + if len(p.PartitionCmtOffset) != len(src) { + return false + } + for k, v := range p.PartitionCmtOffset { + _src := src[k] + if v != _src { + return false + } + } + return true +} + +type TRLTaskTxnCommitAttachment struct { + LoadSourceType types.TLoadSourceType `thrift:"loadSourceType,1,required" frugal:"1,required,TLoadSourceType" json:"loadSourceType"` + Id *types.TUniqueId `thrift:"id,2,required" frugal:"2,required,types.TUniqueId" json:"id"` + JobId int64 `thrift:"jobId,3,required" frugal:"3,required,i64" json:"jobId"` + LoadedRows *int64 `thrift:"loadedRows,4,optional" frugal:"4,optional,i64" json:"loadedRows,omitempty"` + FilteredRows *int64 `thrift:"filteredRows,5,optional" frugal:"5,optional,i64" json:"filteredRows,omitempty"` + UnselectedRows *int64 `thrift:"unselectedRows,6,optional" frugal:"6,optional,i64" json:"unselectedRows,omitempty"` + ReceivedBytes *int64 `thrift:"receivedBytes,7,optional" frugal:"7,optional,i64" json:"receivedBytes,omitempty"` + LoadedBytes *int64 `thrift:"loadedBytes,8,optional" frugal:"8,optional,i64" json:"loadedBytes,omitempty"` + LoadCostMs *int64 `thrift:"loadCostMs,9,optional" frugal:"9,optional,i64" json:"loadCostMs,omitempty"` + KafkaRLTaskProgress *TKafkaRLTaskProgress `thrift:"kafkaRLTaskProgress,10,optional" frugal:"10,optional,TKafkaRLTaskProgress" json:"kafkaRLTaskProgress,omitempty"` + ErrorLogUrl *string `thrift:"errorLogUrl,11,optional" frugal:"11,optional,string" json:"errorLogUrl,omitempty"` +} + +func NewTRLTaskTxnCommitAttachment() *TRLTaskTxnCommitAttachment { + return &TRLTaskTxnCommitAttachment{} +} + +func (p *TRLTaskTxnCommitAttachment) InitDefault() { +} + +func (p *TRLTaskTxnCommitAttachment) GetLoadSourceType() (v types.TLoadSourceType) { + return p.LoadSourceType +} + +var TRLTaskTxnCommitAttachment_Id_DEFAULT *types.TUniqueId + +func (p *TRLTaskTxnCommitAttachment) GetId() (v *types.TUniqueId) { + if !p.IsSetId() { + return TRLTaskTxnCommitAttachment_Id_DEFAULT + } + return p.Id +} + +func (p *TRLTaskTxnCommitAttachment) GetJobId() (v int64) { + return p.JobId +} + +var TRLTaskTxnCommitAttachment_LoadedRows_DEFAULT int64 + +func (p *TRLTaskTxnCommitAttachment) GetLoadedRows() (v int64) { + if !p.IsSetLoadedRows() { + return TRLTaskTxnCommitAttachment_LoadedRows_DEFAULT + } + return *p.LoadedRows +} + +var TRLTaskTxnCommitAttachment_FilteredRows_DEFAULT int64 + +func (p *TRLTaskTxnCommitAttachment) GetFilteredRows() (v int64) { + if !p.IsSetFilteredRows() { + return TRLTaskTxnCommitAttachment_FilteredRows_DEFAULT + } + return *p.FilteredRows +} + +var TRLTaskTxnCommitAttachment_UnselectedRows_DEFAULT int64 + +func (p *TRLTaskTxnCommitAttachment) GetUnselectedRows() (v int64) { + if !p.IsSetUnselectedRows() { + return TRLTaskTxnCommitAttachment_UnselectedRows_DEFAULT + } + return *p.UnselectedRows +} + +var TRLTaskTxnCommitAttachment_ReceivedBytes_DEFAULT int64 + +func (p *TRLTaskTxnCommitAttachment) GetReceivedBytes() (v int64) { + if !p.IsSetReceivedBytes() { + return TRLTaskTxnCommitAttachment_ReceivedBytes_DEFAULT + } + return *p.ReceivedBytes +} + +var TRLTaskTxnCommitAttachment_LoadedBytes_DEFAULT int64 + +func (p *TRLTaskTxnCommitAttachment) GetLoadedBytes() (v int64) { + if !p.IsSetLoadedBytes() { + return TRLTaskTxnCommitAttachment_LoadedBytes_DEFAULT + } + return *p.LoadedBytes +} + +var TRLTaskTxnCommitAttachment_LoadCostMs_DEFAULT int64 + +func (p *TRLTaskTxnCommitAttachment) GetLoadCostMs() (v int64) { + if !p.IsSetLoadCostMs() { + return TRLTaskTxnCommitAttachment_LoadCostMs_DEFAULT + } + return *p.LoadCostMs +} + +var TRLTaskTxnCommitAttachment_KafkaRLTaskProgress_DEFAULT *TKafkaRLTaskProgress + +func (p *TRLTaskTxnCommitAttachment) GetKafkaRLTaskProgress() (v *TKafkaRLTaskProgress) { + if !p.IsSetKafkaRLTaskProgress() { + return TRLTaskTxnCommitAttachment_KafkaRLTaskProgress_DEFAULT + } + return p.KafkaRLTaskProgress +} + +var TRLTaskTxnCommitAttachment_ErrorLogUrl_DEFAULT string + +func (p *TRLTaskTxnCommitAttachment) GetErrorLogUrl() (v string) { + if !p.IsSetErrorLogUrl() { + return TRLTaskTxnCommitAttachment_ErrorLogUrl_DEFAULT + } + return *p.ErrorLogUrl +} +func (p *TRLTaskTxnCommitAttachment) SetLoadSourceType(val types.TLoadSourceType) { + p.LoadSourceType = val +} +func (p *TRLTaskTxnCommitAttachment) SetId(val *types.TUniqueId) { + p.Id = val +} +func (p *TRLTaskTxnCommitAttachment) SetJobId(val int64) { + p.JobId = val +} +func (p *TRLTaskTxnCommitAttachment) SetLoadedRows(val *int64) { + p.LoadedRows = val +} +func (p *TRLTaskTxnCommitAttachment) SetFilteredRows(val *int64) { + p.FilteredRows = val +} +func (p *TRLTaskTxnCommitAttachment) SetUnselectedRows(val *int64) { + p.UnselectedRows = val +} +func (p *TRLTaskTxnCommitAttachment) SetReceivedBytes(val *int64) { + p.ReceivedBytes = val +} +func (p *TRLTaskTxnCommitAttachment) SetLoadedBytes(val *int64) { + p.LoadedBytes = val +} +func (p *TRLTaskTxnCommitAttachment) SetLoadCostMs(val *int64) { + p.LoadCostMs = val +} +func (p *TRLTaskTxnCommitAttachment) SetKafkaRLTaskProgress(val *TKafkaRLTaskProgress) { + p.KafkaRLTaskProgress = val +} +func (p *TRLTaskTxnCommitAttachment) SetErrorLogUrl(val *string) { + p.ErrorLogUrl = val +} + +var fieldIDToName_TRLTaskTxnCommitAttachment = map[int16]string{ + 1: "loadSourceType", + 2: "id", + 3: "jobId", + 4: "loadedRows", + 5: "filteredRows", + 6: "unselectedRows", + 7: "receivedBytes", + 8: "loadedBytes", + 9: "loadCostMs", + 10: "kafkaRLTaskProgress", + 11: "errorLogUrl", +} + +func (p *TRLTaskTxnCommitAttachment) IsSetId() bool { + return p.Id != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetLoadedRows() bool { + return p.LoadedRows != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetFilteredRows() bool { + return p.FilteredRows != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetUnselectedRows() bool { + return p.UnselectedRows != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetReceivedBytes() bool { + return p.ReceivedBytes != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetLoadedBytes() bool { + return p.LoadedBytes != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetLoadCostMs() bool { + return p.LoadCostMs != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetKafkaRLTaskProgress() bool { + return p.KafkaRLTaskProgress != nil +} + +func (p *TRLTaskTxnCommitAttachment) IsSetErrorLogUrl() bool { + return p.ErrorLogUrl != nil +} + +func (p *TRLTaskTxnCommitAttachment) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetLoadSourceType bool = false + var issetId bool = false + var issetJobId bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetLoadSourceType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetJobId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetLoadSourceType { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetJobId { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRLTaskTxnCommitAttachment[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRLTaskTxnCommitAttachment[fieldId])) +} + +func (p *TRLTaskTxnCommitAttachment) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TLoadSourceType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TLoadSourceType(v) + } + p.LoadSourceType = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.Id = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.JobId = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LoadedRows = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FilteredRows = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.UnselectedRows = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReceivedBytes = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LoadedBytes = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LoadCostMs = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField10(iprot thrift.TProtocol) error { + _field := NewTKafkaRLTaskProgress() + if err := _field.Read(iprot); err != nil { + return err + } + p.KafkaRLTaskProgress = _field + return nil +} +func (p *TRLTaskTxnCommitAttachment) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ErrorLogUrl = _field + return nil +} + +func (p *TRLTaskTxnCommitAttachment) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRLTaskTxnCommitAttachment"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("loadSourceType", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.LoadSourceType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.Id.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("jobId", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.JobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedRows() { + if err = oprot.WriteFieldBegin("loadedRows", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadedRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetFilteredRows() { + if err = oprot.WriteFieldBegin("filteredRows", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FilteredRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUnselectedRows() { + if err = oprot.WriteFieldBegin("unselectedRows", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.UnselectedRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetReceivedBytes() { + if err = oprot.WriteFieldBegin("receivedBytes", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReceivedBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadedBytes() { + if err = oprot.WriteFieldBegin("loadedBytes", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadedBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadCostMs() { + if err = oprot.WriteFieldBegin("loadCostMs", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadCostMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetKafkaRLTaskProgress() { + if err = oprot.WriteFieldBegin("kafkaRLTaskProgress", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.KafkaRLTaskProgress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetErrorLogUrl() { + if err = oprot.WriteFieldBegin("errorLogUrl", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ErrorLogUrl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TRLTaskTxnCommitAttachment) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRLTaskTxnCommitAttachment(%+v)", *p) + +} + +func (p *TRLTaskTxnCommitAttachment) DeepEqual(ano *TRLTaskTxnCommitAttachment) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.LoadSourceType) { + return false + } + if !p.Field2DeepEqual(ano.Id) { + return false + } + if !p.Field3DeepEqual(ano.JobId) { + return false + } + if !p.Field4DeepEqual(ano.LoadedRows) { + return false + } + if !p.Field5DeepEqual(ano.FilteredRows) { + return false + } + if !p.Field6DeepEqual(ano.UnselectedRows) { + return false + } + if !p.Field7DeepEqual(ano.ReceivedBytes) { + return false + } + if !p.Field8DeepEqual(ano.LoadedBytes) { + return false + } + if !p.Field9DeepEqual(ano.LoadCostMs) { + return false + } + if !p.Field10DeepEqual(ano.KafkaRLTaskProgress) { + return false + } + if !p.Field11DeepEqual(ano.ErrorLogUrl) { + return false + } + return true +} + +func (p *TRLTaskTxnCommitAttachment) Field1DeepEqual(src types.TLoadSourceType) bool { + + if p.LoadSourceType != src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field2DeepEqual(src *types.TUniqueId) bool { + + if !p.Id.DeepEqual(src) { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field3DeepEqual(src int64) bool { + + if p.JobId != src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field4DeepEqual(src *int64) bool { + + if p.LoadedRows == src { + return true + } else if p.LoadedRows == nil || src == nil { + return false + } + if *p.LoadedRows != *src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field5DeepEqual(src *int64) bool { + + if p.FilteredRows == src { + return true + } else if p.FilteredRows == nil || src == nil { + return false + } + if *p.FilteredRows != *src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field6DeepEqual(src *int64) bool { + + if p.UnselectedRows == src { + return true + } else if p.UnselectedRows == nil || src == nil { + return false + } + if *p.UnselectedRows != *src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field7DeepEqual(src *int64) bool { + + if p.ReceivedBytes == src { + return true + } else if p.ReceivedBytes == nil || src == nil { + return false + } + if *p.ReceivedBytes != *src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field8DeepEqual(src *int64) bool { + + if p.LoadedBytes == src { + return true + } else if p.LoadedBytes == nil || src == nil { + return false + } + if *p.LoadedBytes != *src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field9DeepEqual(src *int64) bool { + + if p.LoadCostMs == src { + return true + } else if p.LoadCostMs == nil || src == nil { + return false + } + if *p.LoadCostMs != *src { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field10DeepEqual(src *TKafkaRLTaskProgress) bool { + + if !p.KafkaRLTaskProgress.DeepEqual(src) { + return false + } + return true +} +func (p *TRLTaskTxnCommitAttachment) Field11DeepEqual(src *string) bool { + + if p.ErrorLogUrl == src { + return true + } else if p.ErrorLogUrl == nil || src == nil { + return false + } + if strings.Compare(*p.ErrorLogUrl, *src) != 0 { + return false + } + return true +} + +type TTxnCommitAttachment struct { + LoadType types.TLoadType `thrift:"loadType,1,required" frugal:"1,required,TLoadType" json:"loadType"` + RlTaskTxnCommitAttachment *TRLTaskTxnCommitAttachment `thrift:"rlTaskTxnCommitAttachment,2,optional" frugal:"2,optional,TRLTaskTxnCommitAttachment" json:"rlTaskTxnCommitAttachment,omitempty"` +} + +func NewTTxnCommitAttachment() *TTxnCommitAttachment { + return &TTxnCommitAttachment{} +} + +func (p *TTxnCommitAttachment) InitDefault() { +} + +func (p *TTxnCommitAttachment) GetLoadType() (v types.TLoadType) { + return p.LoadType +} + +var TTxnCommitAttachment_RlTaskTxnCommitAttachment_DEFAULT *TRLTaskTxnCommitAttachment + +func (p *TTxnCommitAttachment) GetRlTaskTxnCommitAttachment() (v *TRLTaskTxnCommitAttachment) { + if !p.IsSetRlTaskTxnCommitAttachment() { + return TTxnCommitAttachment_RlTaskTxnCommitAttachment_DEFAULT + } + return p.RlTaskTxnCommitAttachment +} +func (p *TTxnCommitAttachment) SetLoadType(val types.TLoadType) { + p.LoadType = val +} +func (p *TTxnCommitAttachment) SetRlTaskTxnCommitAttachment(val *TRLTaskTxnCommitAttachment) { + p.RlTaskTxnCommitAttachment = val +} + +var fieldIDToName_TTxnCommitAttachment = map[int16]string{ + 1: "loadType", + 2: "rlTaskTxnCommitAttachment", +} + +func (p *TTxnCommitAttachment) IsSetRlTaskTxnCommitAttachment() bool { + return p.RlTaskTxnCommitAttachment != nil +} + +func (p *TTxnCommitAttachment) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetLoadType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetLoadType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetLoadType { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnCommitAttachment[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTxnCommitAttachment[fieldId])) +} + +func (p *TTxnCommitAttachment) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TLoadType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TLoadType(v) + } + p.LoadType = _field + return nil +} +func (p *TTxnCommitAttachment) ReadField2(iprot thrift.TProtocol) error { + _field := NewTRLTaskTxnCommitAttachment() + if err := _field.Read(iprot); err != nil { + return err + } + p.RlTaskTxnCommitAttachment = _field + return nil +} + +func (p *TTxnCommitAttachment) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTxnCommitAttachment"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTxnCommitAttachment) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("loadType", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.LoadType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTxnCommitAttachment) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetRlTaskTxnCommitAttachment() { + if err = oprot.WriteFieldBegin("rlTaskTxnCommitAttachment", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.RlTaskTxnCommitAttachment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTxnCommitAttachment) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTxnCommitAttachment(%+v)", *p) + +} + +func (p *TTxnCommitAttachment) DeepEqual(ano *TTxnCommitAttachment) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.LoadType) { + return false + } + if !p.Field2DeepEqual(ano.RlTaskTxnCommitAttachment) { + return false + } + return true +} + +func (p *TTxnCommitAttachment) Field1DeepEqual(src types.TLoadType) bool { + + if p.LoadType != src { + return false + } + return true +} +func (p *TTxnCommitAttachment) Field2DeepEqual(src *TRLTaskTxnCommitAttachment) bool { + + if !p.RlTaskTxnCommitAttachment.DeepEqual(src) { + return false + } + return true +} + +type TLoadTxnCommitRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` + Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` + Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` + Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` + UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` + TxnId int64 `thrift:"txnId,7,required" frugal:"7,required,i64" json:"txnId"` + Sync bool `thrift:"sync,8,required" frugal:"8,required,bool" json:"sync"` + CommitInfos []*types.TTabletCommitInfo `thrift:"commitInfos,9,optional" frugal:"9,optional,list" json:"commitInfos,omitempty"` + AuthCode *int64 `thrift:"auth_code,10,optional" frugal:"10,optional,i64" json:"auth_code,omitempty"` + TxnCommitAttachment *TTxnCommitAttachment `thrift:"txnCommitAttachment,11,optional" frugal:"11,optional,TTxnCommitAttachment" json:"txnCommitAttachment,omitempty"` + ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,12,optional" frugal:"12,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` + Token *string `thrift:"token,13,optional" frugal:"13,optional,string" json:"token,omitempty"` + DbId *int64 `thrift:"db_id,14,optional" frugal:"14,optional,i64" json:"db_id,omitempty"` + Tbls []string `thrift:"tbls,15,optional" frugal:"15,optional,list" json:"tbls,omitempty"` + TableId *int64 `thrift:"table_id,16,optional" frugal:"16,optional,i64" json:"table_id,omitempty"` + AuthCodeUuid *string `thrift:"auth_code_uuid,17,optional" frugal:"17,optional,string" json:"auth_code_uuid,omitempty"` + GroupCommit *bool `thrift:"groupCommit,18,optional" frugal:"18,optional,bool" json:"groupCommit,omitempty"` + ReceiveBytes *int64 `thrift:"receiveBytes,19,optional" frugal:"19,optional,i64" json:"receiveBytes,omitempty"` + BackendId *int64 `thrift:"backendId,20,optional" frugal:"20,optional,i64" json:"backendId,omitempty"` +} + +func NewTLoadTxnCommitRequest() *TLoadTxnCommitRequest { + return &TLoadTxnCommitRequest{} +} + +func (p *TLoadTxnCommitRequest) InitDefault() { +} + +var TLoadTxnCommitRequest_Cluster_DEFAULT string + +func (p *TLoadTxnCommitRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TLoadTxnCommitRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +func (p *TLoadTxnCommitRequest) GetUser() (v string) { + return p.User +} + +func (p *TLoadTxnCommitRequest) GetPasswd() (v string) { + return p.Passwd +} + +func (p *TLoadTxnCommitRequest) GetDb() (v string) { + return p.Db +} + +func (p *TLoadTxnCommitRequest) GetTbl() (v string) { + return p.Tbl +} + +var TLoadTxnCommitRequest_UserIp_DEFAULT string + +func (p *TLoadTxnCommitRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TLoadTxnCommitRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +func (p *TLoadTxnCommitRequest) GetTxnId() (v int64) { + return p.TxnId +} + +func (p *TLoadTxnCommitRequest) GetSync() (v bool) { + return p.Sync +} + +var TLoadTxnCommitRequest_CommitInfos_DEFAULT []*types.TTabletCommitInfo + +func (p *TLoadTxnCommitRequest) GetCommitInfos() (v []*types.TTabletCommitInfo) { + if !p.IsSetCommitInfos() { + return TLoadTxnCommitRequest_CommitInfos_DEFAULT + } + return p.CommitInfos +} + +var TLoadTxnCommitRequest_AuthCode_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetAuthCode() (v int64) { + if !p.IsSetAuthCode() { + return TLoadTxnCommitRequest_AuthCode_DEFAULT + } + return *p.AuthCode +} + +var TLoadTxnCommitRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment + +func (p *TLoadTxnCommitRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { + if !p.IsSetTxnCommitAttachment() { + return TLoadTxnCommitRequest_TxnCommitAttachment_DEFAULT + } + return p.TxnCommitAttachment +} + +var TLoadTxnCommitRequest_ThriftRpcTimeoutMs_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetThriftRpcTimeoutMs() (v int64) { + if !p.IsSetThriftRpcTimeoutMs() { + return TLoadTxnCommitRequest_ThriftRpcTimeoutMs_DEFAULT + } + return *p.ThriftRpcTimeoutMs +} + +var TLoadTxnCommitRequest_Token_DEFAULT string + +func (p *TLoadTxnCommitRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TLoadTxnCommitRequest_Token_DEFAULT + } + return *p.Token +} + +var TLoadTxnCommitRequest_DbId_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TLoadTxnCommitRequest_DbId_DEFAULT + } + return *p.DbId +} + +var TLoadTxnCommitRequest_Tbls_DEFAULT []string + +func (p *TLoadTxnCommitRequest) GetTbls() (v []string) { + if !p.IsSetTbls() { + return TLoadTxnCommitRequest_Tbls_DEFAULT + } + return p.Tbls +} + +var TLoadTxnCommitRequest_TableId_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TLoadTxnCommitRequest_TableId_DEFAULT + } + return *p.TableId +} + +var TLoadTxnCommitRequest_AuthCodeUuid_DEFAULT string + +func (p *TLoadTxnCommitRequest) GetAuthCodeUuid() (v string) { + if !p.IsSetAuthCodeUuid() { + return TLoadTxnCommitRequest_AuthCodeUuid_DEFAULT + } + return *p.AuthCodeUuid +} + +var TLoadTxnCommitRequest_GroupCommit_DEFAULT bool + +func (p *TLoadTxnCommitRequest) GetGroupCommit() (v bool) { + if !p.IsSetGroupCommit() { + return TLoadTxnCommitRequest_GroupCommit_DEFAULT + } + return *p.GroupCommit +} + +var TLoadTxnCommitRequest_ReceiveBytes_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetReceiveBytes() (v int64) { + if !p.IsSetReceiveBytes() { + return TLoadTxnCommitRequest_ReceiveBytes_DEFAULT + } + return *p.ReceiveBytes +} + +var TLoadTxnCommitRequest_BackendId_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TLoadTxnCommitRequest_BackendId_DEFAULT + } + return *p.BackendId +} +func (p *TLoadTxnCommitRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TLoadTxnCommitRequest) SetUser(val string) { + p.User = val +} +func (p *TLoadTxnCommitRequest) SetPasswd(val string) { + p.Passwd = val +} +func (p *TLoadTxnCommitRequest) SetDb(val string) { + p.Db = val +} +func (p *TLoadTxnCommitRequest) SetTbl(val string) { + p.Tbl = val +} +func (p *TLoadTxnCommitRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TLoadTxnCommitRequest) SetTxnId(val int64) { + p.TxnId = val +} +func (p *TLoadTxnCommitRequest) SetSync(val bool) { + p.Sync = val +} +func (p *TLoadTxnCommitRequest) SetCommitInfos(val []*types.TTabletCommitInfo) { + p.CommitInfos = val +} +func (p *TLoadTxnCommitRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TLoadTxnCommitRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { + p.TxnCommitAttachment = val +} +func (p *TLoadTxnCommitRequest) SetThriftRpcTimeoutMs(val *int64) { + p.ThriftRpcTimeoutMs = val +} +func (p *TLoadTxnCommitRequest) SetToken(val *string) { + p.Token = val +} +func (p *TLoadTxnCommitRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TLoadTxnCommitRequest) SetTbls(val []string) { + p.Tbls = val +} +func (p *TLoadTxnCommitRequest) SetTableId(val *int64) { + p.TableId = val +} +func (p *TLoadTxnCommitRequest) SetAuthCodeUuid(val *string) { + p.AuthCodeUuid = val +} +func (p *TLoadTxnCommitRequest) SetGroupCommit(val *bool) { + p.GroupCommit = val +} +func (p *TLoadTxnCommitRequest) SetReceiveBytes(val *int64) { + p.ReceiveBytes = val +} +func (p *TLoadTxnCommitRequest) SetBackendId(val *int64) { + p.BackendId = val +} + +var fieldIDToName_TLoadTxnCommitRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "tbl", + 6: "user_ip", + 7: "txnId", + 8: "sync", + 9: "commitInfos", + 10: "auth_code", + 11: "txnCommitAttachment", + 12: "thrift_rpc_timeout_ms", + 13: "token", + 14: "db_id", + 15: "tbls", + 16: "table_id", + 17: "auth_code_uuid", + 18: "groupCommit", + 19: "receiveBytes", + 20: "backendId", +} + +func (p *TLoadTxnCommitRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TLoadTxnCommitRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TLoadTxnCommitRequest) IsSetCommitInfos() bool { + return p.CommitInfos != nil +} + +func (p *TLoadTxnCommitRequest) IsSetAuthCode() bool { + return p.AuthCode != nil +} + +func (p *TLoadTxnCommitRequest) IsSetTxnCommitAttachment() bool { + return p.TxnCommitAttachment != nil +} + +func (p *TLoadTxnCommitRequest) IsSetThriftRpcTimeoutMs() bool { + return p.ThriftRpcTimeoutMs != nil +} + +func (p *TLoadTxnCommitRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TLoadTxnCommitRequest) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TLoadTxnCommitRequest) IsSetTbls() bool { + return p.Tbls != nil +} + +func (p *TLoadTxnCommitRequest) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TLoadTxnCommitRequest) IsSetAuthCodeUuid() bool { + return p.AuthCodeUuid != nil +} + +func (p *TLoadTxnCommitRequest) IsSetGroupCommit() bool { + return p.GroupCommit != nil +} + +func (p *TLoadTxnCommitRequest) IsSetReceiveBytes() bool { + return p.ReceiveBytes != nil +} + +func (p *TLoadTxnCommitRequest) IsSetBackendId() bool { + return p.BackendId != nil +} + +func (p *TLoadTxnCommitRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetTxnId bool = false + var issetSync bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPasswd = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetDb = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetTbl = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + issetTxnId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + issetSync = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.LIST { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I64 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRING { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.LIST { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.I64 { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.STRING { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.I64 { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.I64 { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetSync { + fieldId = 8 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitRequest[fieldId])) +} + +func (p *TLoadTxnCommitRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.User = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Passwd = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Db = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Tbl = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TxnId = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.Sync = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TTabletCommitInfo, 0, size) + values := make([]types.TTabletCommitInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.CommitInfos = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField11(iprot thrift.TProtocol) error { + _field := NewTTxnCommitAttachment() + if err := _field.Read(iprot); err != nil { + return err + } + p.TxnCommitAttachment = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ThriftRpcTimeoutMs = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField13(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField15(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tbls = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField16(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField17(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AuthCodeUuid = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField18(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommit = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField19(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReceiveBytes = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField20(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BackendId = _field + return nil +} + +func (p *TLoadTxnCommitRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLoadTxnCommitRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("sync", thrift.BOOL, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.Sync); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetCommitInfos() { + if err = oprot.WriteFieldBegin("commitInfos", thrift.LIST, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommitInfos)); err != nil { + return err + } + for _, v := range p.CommitInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AuthCode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnCommitAttachment() { + if err = oprot.WriteFieldBegin("txnCommitAttachment", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnCommitAttachment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetThriftRpcTimeoutMs() { + if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetTbls() { + if err = oprot.WriteFieldBegin("tbls", thrift.LIST, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Tbls)); err != nil { + return err + } + for _, v := range p.Tbls { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCodeUuid() { + if err = oprot.WriteFieldBegin("auth_code_uuid", thrift.STRING, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AuthCodeUuid); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommit() { + if err = oprot.WriteFieldBegin("groupCommit", thrift.BOOL, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.GroupCommit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetReceiveBytes() { + if err = oprot.WriteFieldBegin("receiveBytes", thrift.I64, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReceiveBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backendId", thrift.I64, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLoadTxnCommitRequest(%+v)", *p) + +} + +func (p *TLoadTxnCommitRequest) DeepEqual(ano *TLoadTxnCommitRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Tbl) { + return false + } + if !p.Field6DeepEqual(ano.UserIp) { + return false + } + if !p.Field7DeepEqual(ano.TxnId) { + return false + } + if !p.Field8DeepEqual(ano.Sync) { + return false + } + if !p.Field9DeepEqual(ano.CommitInfos) { + return false + } + if !p.Field10DeepEqual(ano.AuthCode) { + return false + } + if !p.Field11DeepEqual(ano.TxnCommitAttachment) { + return false + } + if !p.Field12DeepEqual(ano.ThriftRpcTimeoutMs) { + return false + } + if !p.Field13DeepEqual(ano.Token) { + return false + } + if !p.Field14DeepEqual(ano.DbId) { + return false + } + if !p.Field15DeepEqual(ano.Tbls) { + return false + } + if !p.Field16DeepEqual(ano.TableId) { + return false + } + if !p.Field17DeepEqual(ano.AuthCodeUuid) { + return false + } + if !p.Field18DeepEqual(ano.GroupCommit) { + return false + } + if !p.Field19DeepEqual(ano.ReceiveBytes) { + return false + } + if !p.Field20DeepEqual(ano.BackendId) { + return false + } + return true +} + +func (p *TLoadTxnCommitRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.User, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Passwd, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field4DeepEqual(src string) bool { + + if strings.Compare(p.Db, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field5DeepEqual(src string) bool { + + if strings.Compare(p.Tbl, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field6DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field7DeepEqual(src int64) bool { + + if p.TxnId != src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field8DeepEqual(src bool) bool { + + if p.Sync != src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field9DeepEqual(src []*types.TTabletCommitInfo) bool { + + if len(p.CommitInfos) != len(src) { + return false + } + for i, v := range p.CommitInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TLoadTxnCommitRequest) Field10DeepEqual(src *int64) bool { + + if p.AuthCode == src { + return true + } else if p.AuthCode == nil || src == nil { + return false + } + if *p.AuthCode != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field11DeepEqual(src *TTxnCommitAttachment) bool { + + if !p.TxnCommitAttachment.DeepEqual(src) { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field12DeepEqual(src *int64) bool { + + if p.ThriftRpcTimeoutMs == src { + return true + } else if p.ThriftRpcTimeoutMs == nil || src == nil { + return false + } + if *p.ThriftRpcTimeoutMs != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field13DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field14DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field15DeepEqual(src []string) bool { + + if len(p.Tbls) != len(src) { + return false + } + for i, v := range p.Tbls { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TLoadTxnCommitRequest) Field16DeepEqual(src *int64) bool { + + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field17DeepEqual(src *string) bool { + + if p.AuthCodeUuid == src { + return true + } else if p.AuthCodeUuid == nil || src == nil { + return false + } + if strings.Compare(*p.AuthCodeUuid, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field18DeepEqual(src *bool) bool { + + if p.GroupCommit == src { + return true + } else if p.GroupCommit == nil || src == nil { + return false + } + if *p.GroupCommit != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field19DeepEqual(src *int64) bool { + + if p.ReceiveBytes == src { + return true + } else if p.ReceiveBytes == nil || src == nil { + return false + } + if *p.ReceiveBytes != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field20DeepEqual(src *int64) bool { + + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true +} + +type TLoadTxnCommitResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +} + +func NewTLoadTxnCommitResult_() *TLoadTxnCommitResult_ { + return &TLoadTxnCommitResult_{} +} + +func (p *TLoadTxnCommitResult_) InitDefault() { +} + +var TLoadTxnCommitResult__Status_DEFAULT *status.TStatus + +func (p *TLoadTxnCommitResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TLoadTxnCommitResult__Status_DEFAULT + } + return p.Status +} +func (p *TLoadTxnCommitResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TLoadTxnCommitResult_ = map[int16]string{ + 1: "status", +} + +func (p *TLoadTxnCommitResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TLoadTxnCommitResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitResult_[fieldId])) +} + +func (p *TLoadTxnCommitResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TLoadTxnCommitResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLoadTxnCommitResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLoadTxnCommitResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLoadTxnCommitResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLoadTxnCommitResult_(%+v)", *p) + +} + +func (p *TLoadTxnCommitResult_) DeepEqual(ano *TLoadTxnCommitResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TLoadTxnCommitResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TCommitTxnRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + UserIp *string `thrift:"user_ip,5,optional" frugal:"5,optional,string" json:"user_ip,omitempty"` + TxnId *int64 `thrift:"txn_id,6,optional" frugal:"6,optional,i64" json:"txn_id,omitempty"` + CommitInfos []*types.TTabletCommitInfo `thrift:"commit_infos,7,optional" frugal:"7,optional,list" json:"commit_infos,omitempty"` + AuthCode *int64 `thrift:"auth_code,8,optional" frugal:"8,optional,i64" json:"auth_code,omitempty"` + TxnCommitAttachment *TTxnCommitAttachment `thrift:"txn_commit_attachment,9,optional" frugal:"9,optional,TTxnCommitAttachment" json:"txn_commit_attachment,omitempty"` + ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,10,optional" frugal:"10,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` + Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` + DbId *int64 `thrift:"db_id,12,optional" frugal:"12,optional,i64" json:"db_id,omitempty"` + TxnInsert *bool `thrift:"txn_insert,13,optional" frugal:"13,optional,bool" json:"txn_insert,omitempty"` + SubTxnInfos []*TSubTxnInfo `thrift:"sub_txn_infos,14,optional" frugal:"14,optional,list" json:"sub_txn_infos,omitempty"` +} + +func NewTCommitTxnRequest() *TCommitTxnRequest { + return &TCommitTxnRequest{} +} + +func (p *TCommitTxnRequest) InitDefault() { +} + +var TCommitTxnRequest_Cluster_DEFAULT string + +func (p *TCommitTxnRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TCommitTxnRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +var TCommitTxnRequest_User_DEFAULT string + +func (p *TCommitTxnRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TCommitTxnRequest_User_DEFAULT + } + return *p.User +} + +var TCommitTxnRequest_Passwd_DEFAULT string + +func (p *TCommitTxnRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TCommitTxnRequest_Passwd_DEFAULT + } + return *p.Passwd +} + +var TCommitTxnRequest_Db_DEFAULT string + +func (p *TCommitTxnRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TCommitTxnRequest_Db_DEFAULT + } + return *p.Db +} + +var TCommitTxnRequest_UserIp_DEFAULT string + +func (p *TCommitTxnRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TCommitTxnRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +var TCommitTxnRequest_TxnId_DEFAULT int64 + +func (p *TCommitTxnRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TCommitTxnRequest_TxnId_DEFAULT + } + return *p.TxnId +} + +var TCommitTxnRequest_CommitInfos_DEFAULT []*types.TTabletCommitInfo + +func (p *TCommitTxnRequest) GetCommitInfos() (v []*types.TTabletCommitInfo) { + if !p.IsSetCommitInfos() { + return TCommitTxnRequest_CommitInfos_DEFAULT + } + return p.CommitInfos +} + +var TCommitTxnRequest_AuthCode_DEFAULT int64 + +func (p *TCommitTxnRequest) GetAuthCode() (v int64) { + if !p.IsSetAuthCode() { + return TCommitTxnRequest_AuthCode_DEFAULT + } + return *p.AuthCode +} + +var TCommitTxnRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment + +func (p *TCommitTxnRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { + if !p.IsSetTxnCommitAttachment() { + return TCommitTxnRequest_TxnCommitAttachment_DEFAULT + } + return p.TxnCommitAttachment +} + +var TCommitTxnRequest_ThriftRpcTimeoutMs_DEFAULT int64 + +func (p *TCommitTxnRequest) GetThriftRpcTimeoutMs() (v int64) { + if !p.IsSetThriftRpcTimeoutMs() { + return TCommitTxnRequest_ThriftRpcTimeoutMs_DEFAULT + } + return *p.ThriftRpcTimeoutMs +} + +var TCommitTxnRequest_Token_DEFAULT string + +func (p *TCommitTxnRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TCommitTxnRequest_Token_DEFAULT + } + return *p.Token +} + +var TCommitTxnRequest_DbId_DEFAULT int64 + +func (p *TCommitTxnRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TCommitTxnRequest_DbId_DEFAULT + } + return *p.DbId +} + +var TCommitTxnRequest_TxnInsert_DEFAULT bool + +func (p *TCommitTxnRequest) GetTxnInsert() (v bool) { + if !p.IsSetTxnInsert() { + return TCommitTxnRequest_TxnInsert_DEFAULT + } + return *p.TxnInsert +} + +var TCommitTxnRequest_SubTxnInfos_DEFAULT []*TSubTxnInfo + +func (p *TCommitTxnRequest) GetSubTxnInfos() (v []*TSubTxnInfo) { + if !p.IsSetSubTxnInfos() { + return TCommitTxnRequest_SubTxnInfos_DEFAULT + } + return p.SubTxnInfos +} +func (p *TCommitTxnRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TCommitTxnRequest) SetUser(val *string) { + p.User = val +} +func (p *TCommitTxnRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TCommitTxnRequest) SetDb(val *string) { + p.Db = val +} +func (p *TCommitTxnRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TCommitTxnRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TCommitTxnRequest) SetCommitInfos(val []*types.TTabletCommitInfo) { + p.CommitInfos = val +} +func (p *TCommitTxnRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TCommitTxnRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { + p.TxnCommitAttachment = val +} +func (p *TCommitTxnRequest) SetThriftRpcTimeoutMs(val *int64) { + p.ThriftRpcTimeoutMs = val +} +func (p *TCommitTxnRequest) SetToken(val *string) { + p.Token = val +} +func (p *TCommitTxnRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TCommitTxnRequest) SetTxnInsert(val *bool) { + p.TxnInsert = val +} +func (p *TCommitTxnRequest) SetSubTxnInfos(val []*TSubTxnInfo) { + p.SubTxnInfos = val +} + +var fieldIDToName_TCommitTxnRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "user_ip", + 6: "txn_id", + 7: "commit_infos", + 8: "auth_code", + 9: "txn_commit_attachment", + 10: "thrift_rpc_timeout_ms", + 11: "token", + 12: "db_id", + 13: "txn_insert", + 14: "sub_txn_infos", +} + +func (p *TCommitTxnRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TCommitTxnRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TCommitTxnRequest) IsSetPasswd() bool { + return p.Passwd != nil +} + +func (p *TCommitTxnRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TCommitTxnRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TCommitTxnRequest) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TCommitTxnRequest) IsSetCommitInfos() bool { + return p.CommitInfos != nil +} + +func (p *TCommitTxnRequest) IsSetAuthCode() bool { + return p.AuthCode != nil +} + +func (p *TCommitTxnRequest) IsSetTxnCommitAttachment() bool { + return p.TxnCommitAttachment != nil +} + +func (p *TCommitTxnRequest) IsSetThriftRpcTimeoutMs() bool { + return p.ThriftRpcTimeoutMs != nil +} + +func (p *TCommitTxnRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TCommitTxnRequest) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TCommitTxnRequest) IsSetTxnInsert() bool { + return p.TxnInsert != nil +} + +func (p *TCommitTxnRequest) IsSetSubTxnInfos() bool { + return p.SubTxnInfos != nil +} + +func (p *TCommitTxnRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I64 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.LIST { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCommitTxnRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TCommitTxnRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.User = _field + return nil +} +func (p *TCommitTxnRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Passwd = _field + return nil +} +func (p *TCommitTxnRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TCommitTxnRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TCommitTxnRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TCommitTxnRequest) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TTabletCommitInfo, 0, size) + values := make([]types.TTabletCommitInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.CommitInfos = _field + return nil +} +func (p *TCommitTxnRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil +} +func (p *TCommitTxnRequest) ReadField9(iprot thrift.TProtocol) error { + _field := NewTTxnCommitAttachment() + if err := _field.Read(iprot); err != nil { + return err + } + p.TxnCommitAttachment = _field + return nil +} +func (p *TCommitTxnRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ThriftRpcTimeoutMs = _field + return nil +} +func (p *TCommitTxnRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TCommitTxnRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TCommitTxnRequest) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.TxnInsert = _field + return nil +} +func (p *TCommitTxnRequest) ReadField14(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TSubTxnInfo, 0, size) + values := make([]TSubTxnInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SubTxnInfos = _field + return nil +} + +func (p *TCommitTxnRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCommitTxnRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetCommitInfos() { + if err = oprot.WriteFieldBegin("commit_infos", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommitInfos)); err != nil { + return err + } + for _, v := range p.CommitInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AuthCode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnCommitAttachment() { + if err = oprot.WriteFieldBegin("txn_commit_attachment", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnCommitAttachment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetThriftRpcTimeoutMs() { + if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnInsert() { + if err = oprot.WriteFieldBegin("txn_insert", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.TxnInsert); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TCommitTxnRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetSubTxnInfos() { + if err = oprot.WriteFieldBegin("sub_txn_infos", thrift.LIST, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SubTxnInfos)); err != nil { + return err + } + for _, v := range p.SubTxnInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TCommitTxnRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCommitTxnRequest(%+v)", *p) + +} + +func (p *TCommitTxnRequest) DeepEqual(ano *TCommitTxnRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.UserIp) { + return false + } + if !p.Field6DeepEqual(ano.TxnId) { + return false + } + if !p.Field7DeepEqual(ano.CommitInfos) { + return false + } + if !p.Field8DeepEqual(ano.AuthCode) { + return false + } + if !p.Field9DeepEqual(ano.TxnCommitAttachment) { + return false + } + if !p.Field10DeepEqual(ano.ThriftRpcTimeoutMs) { + return false + } + if !p.Field11DeepEqual(ano.Token) { + return false + } + if !p.Field12DeepEqual(ano.DbId) { + return false + } + if !p.Field13DeepEqual(ano.TxnInsert) { + return false + } + if !p.Field14DeepEqual(ano.SubTxnInfos) { + return false + } + return true +} + +func (p *TCommitTxnRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TCommitTxnRequest) Field2DeepEqual(src *string) bool { + + if p.User == src { + return true + } else if p.User == nil || src == nil { + return false + } + if strings.Compare(*p.User, *src) != 0 { + return false + } + return true +} +func (p *TCommitTxnRequest) Field3DeepEqual(src *string) bool { + + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { + return false + } + if strings.Compare(*p.Passwd, *src) != 0 { + return false + } + return true +} +func (p *TCommitTxnRequest) Field4DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TCommitTxnRequest) Field5DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TCommitTxnRequest) Field6DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TCommitTxnRequest) Field7DeepEqual(src []*types.TTabletCommitInfo) bool { + + if len(p.CommitInfos) != len(src) { + return false + } + for i, v := range p.CommitInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TCommitTxnRequest) Field8DeepEqual(src *int64) bool { + + if p.AuthCode == src { + return true + } else if p.AuthCode == nil || src == nil { + return false + } + if *p.AuthCode != *src { + return false + } + return true +} +func (p *TCommitTxnRequest) Field9DeepEqual(src *TTxnCommitAttachment) bool { + + if !p.TxnCommitAttachment.DeepEqual(src) { + return false + } + return true +} +func (p *TCommitTxnRequest) Field10DeepEqual(src *int64) bool { + + if p.ThriftRpcTimeoutMs == src { + return true + } else if p.ThriftRpcTimeoutMs == nil || src == nil { + return false + } + if *p.ThriftRpcTimeoutMs != *src { + return false + } + return true +} +func (p *TCommitTxnRequest) Field11DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TCommitTxnRequest) Field12DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TCommitTxnRequest) Field13DeepEqual(src *bool) bool { + + if p.TxnInsert == src { + return true + } else if p.TxnInsert == nil || src == nil { + return false + } + if *p.TxnInsert != *src { + return false + } + return true +} +func (p *TCommitTxnRequest) Field14DeepEqual(src []*TSubTxnInfo) bool { + + if len(p.SubTxnInfos) != len(src) { + return false + } + for i, v := range p.SubTxnInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TCommitTxnResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,2,optional" frugal:"2,optional,types.TNetworkAddress" json:"master_address,omitempty"` +} + +func NewTCommitTxnResult_() *TCommitTxnResult_ { + return &TCommitTxnResult_{} +} + +func (p *TCommitTxnResult_) InitDefault() { +} + +var TCommitTxnResult__Status_DEFAULT *status.TStatus + +func (p *TCommitTxnResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TCommitTxnResult__Status_DEFAULT + } + return p.Status +} + +var TCommitTxnResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TCommitTxnResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TCommitTxnResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TCommitTxnResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TCommitTxnResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} + +var fieldIDToName_TCommitTxnResult_ = map[int16]string{ + 1: "status", + 2: "master_address", +} + +func (p *TCommitTxnResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCommitTxnResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TCommitTxnResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCommitTxnResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TCommitTxnResult_) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} + +func (p *TCommitTxnResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCommitTxnResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TCommitTxnResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TCommitTxnResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TCommitTxnResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCommitTxnResult_(%+v)", *p) + +} + +func (p *TCommitTxnResult_) DeepEqual(ano *TCommitTxnResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.MasterAddress) { + return false + } + return true +} + +func (p *TCommitTxnResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TCommitTxnResult_) Field2DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} + +type TLoadTxn2PCRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` + Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + UserIp *string `thrift:"user_ip,5,optional" frugal:"5,optional,string" json:"user_ip,omitempty"` + TxnId *int64 `thrift:"txnId,6,optional" frugal:"6,optional,i64" json:"txnId,omitempty"` + Operation *string `thrift:"operation,7,optional" frugal:"7,optional,string" json:"operation,omitempty"` + AuthCode *int64 `thrift:"auth_code,8,optional" frugal:"8,optional,i64" json:"auth_code,omitempty"` + Token *string `thrift:"token,9,optional" frugal:"9,optional,string" json:"token,omitempty"` + ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,10,optional" frugal:"10,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` + Label *string `thrift:"label,11,optional" frugal:"11,optional,string" json:"label,omitempty"` + AuthCodeUuid *string `thrift:"auth_code_uuid,1000,optional" frugal:"1000,optional,string" json:"auth_code_uuid,omitempty"` +} + +func NewTLoadTxn2PCRequest() *TLoadTxn2PCRequest { + return &TLoadTxn2PCRequest{} +} + +func (p *TLoadTxn2PCRequest) InitDefault() { +} + +var TLoadTxn2PCRequest_Cluster_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TLoadTxn2PCRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +func (p *TLoadTxn2PCRequest) GetUser() (v string) { + return p.User +} + +func (p *TLoadTxn2PCRequest) GetPasswd() (v string) { + return p.Passwd +} + +var TLoadTxn2PCRequest_Db_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TLoadTxn2PCRequest_Db_DEFAULT + } + return *p.Db +} + +var TLoadTxn2PCRequest_UserIp_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TLoadTxn2PCRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +var TLoadTxn2PCRequest_TxnId_DEFAULT int64 + +func (p *TLoadTxn2PCRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TLoadTxn2PCRequest_TxnId_DEFAULT + } + return *p.TxnId +} + +var TLoadTxn2PCRequest_Operation_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetOperation() (v string) { + if !p.IsSetOperation() { + return TLoadTxn2PCRequest_Operation_DEFAULT + } + return *p.Operation +} + +var TLoadTxn2PCRequest_AuthCode_DEFAULT int64 + +func (p *TLoadTxn2PCRequest) GetAuthCode() (v int64) { + if !p.IsSetAuthCode() { + return TLoadTxn2PCRequest_AuthCode_DEFAULT + } + return *p.AuthCode +} + +var TLoadTxn2PCRequest_Token_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TLoadTxn2PCRequest_Token_DEFAULT + } + return *p.Token +} + +var TLoadTxn2PCRequest_ThriftRpcTimeoutMs_DEFAULT int64 + +func (p *TLoadTxn2PCRequest) GetThriftRpcTimeoutMs() (v int64) { + if !p.IsSetThriftRpcTimeoutMs() { + return TLoadTxn2PCRequest_ThriftRpcTimeoutMs_DEFAULT + } + return *p.ThriftRpcTimeoutMs +} + +var TLoadTxn2PCRequest_Label_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetLabel() (v string) { + if !p.IsSetLabel() { + return TLoadTxn2PCRequest_Label_DEFAULT + } + return *p.Label +} + +var TLoadTxn2PCRequest_AuthCodeUuid_DEFAULT string + +func (p *TLoadTxn2PCRequest) GetAuthCodeUuid() (v string) { + if !p.IsSetAuthCodeUuid() { + return TLoadTxn2PCRequest_AuthCodeUuid_DEFAULT + } + return *p.AuthCodeUuid +} +func (p *TLoadTxn2PCRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TLoadTxn2PCRequest) SetUser(val string) { + p.User = val +} +func (p *TLoadTxn2PCRequest) SetPasswd(val string) { + p.Passwd = val +} +func (p *TLoadTxn2PCRequest) SetDb(val *string) { + p.Db = val +} +func (p *TLoadTxn2PCRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TLoadTxn2PCRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TLoadTxn2PCRequest) SetOperation(val *string) { + p.Operation = val +} +func (p *TLoadTxn2PCRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TLoadTxn2PCRequest) SetToken(val *string) { + p.Token = val +} +func (p *TLoadTxn2PCRequest) SetThriftRpcTimeoutMs(val *int64) { + p.ThriftRpcTimeoutMs = val +} +func (p *TLoadTxn2PCRequest) SetLabel(val *string) { + p.Label = val +} +func (p *TLoadTxn2PCRequest) SetAuthCodeUuid(val *string) { + p.AuthCodeUuid = val +} + +var fieldIDToName_TLoadTxn2PCRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "user_ip", + 6: "txnId", + 7: "operation", + 8: "auth_code", + 9: "token", + 10: "thrift_rpc_timeout_ms", + 11: "label", + 1000: "auth_code_uuid", +} + +func (p *TLoadTxn2PCRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TLoadTxn2PCRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TLoadTxn2PCRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TLoadTxn2PCRequest) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TLoadTxn2PCRequest) IsSetOperation() bool { + return p.Operation != nil +} + +func (p *TLoadTxn2PCRequest) IsSetAuthCode() bool { + return p.AuthCode != nil +} + +func (p *TLoadTxn2PCRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TLoadTxn2PCRequest) IsSetThriftRpcTimeoutMs() bool { + return p.ThriftRpcTimeoutMs != nil +} + +func (p *TLoadTxn2PCRequest) IsSetLabel() bool { + return p.Label != nil +} + +func (p *TLoadTxn2PCRequest) IsSetAuthCodeUuid() bool { + return p.AuthCodeUuid != nil +} + +func (p *TLoadTxn2PCRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPasswd = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRING { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCRequest[fieldId])) +} + +func (p *TLoadTxn2PCRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.User = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Passwd = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Operation = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ThriftRpcTimeoutMs = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Label = _field + return nil +} +func (p *TLoadTxn2PCRequest) ReadField1000(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AuthCodeUuid = _field + return nil +} + +func (p *TLoadTxn2PCRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLoadTxn2PCRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetOperation() { + if err = oprot.WriteFieldBegin("operation", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Operation); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AuthCode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetThriftRpcTimeoutMs() { + if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCodeUuid() { + if err = oprot.WriteFieldBegin("auth_code_uuid", thrift.STRING, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AuthCodeUuid); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TLoadTxn2PCRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLoadTxn2PCRequest(%+v)", *p) + +} + +func (p *TLoadTxn2PCRequest) DeepEqual(ano *TLoadTxn2PCRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.UserIp) { + return false + } + if !p.Field6DeepEqual(ano.TxnId) { + return false + } + if !p.Field7DeepEqual(ano.Operation) { + return false + } + if !p.Field8DeepEqual(ano.AuthCode) { + return false + } + if !p.Field9DeepEqual(ano.Token) { + return false + } + if !p.Field10DeepEqual(ano.ThriftRpcTimeoutMs) { + return false + } + if !p.Field11DeepEqual(ano.Label) { + return false + } + if !p.Field1000DeepEqual(ano.AuthCodeUuid) { + return false + } + return true +} + +func (p *TLoadTxn2PCRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.User, src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Passwd, src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field4DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field5DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field6DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field7DeepEqual(src *string) bool { + + if p.Operation == src { + return true + } else if p.Operation == nil || src == nil { + return false + } + if strings.Compare(*p.Operation, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field8DeepEqual(src *int64) bool { + + if p.AuthCode == src { + return true + } else if p.AuthCode == nil || src == nil { + return false + } + if *p.AuthCode != *src { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field9DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field10DeepEqual(src *int64) bool { + + if p.ThriftRpcTimeoutMs == src { + return true + } else if p.ThriftRpcTimeoutMs == nil || src == nil { + return false + } + if *p.ThriftRpcTimeoutMs != *src { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field11DeepEqual(src *string) bool { + + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxn2PCRequest) Field1000DeepEqual(src *string) bool { + + if p.AuthCodeUuid == src { + return true + } else if p.AuthCodeUuid == nil || src == nil { + return false + } + if strings.Compare(*p.AuthCodeUuid, *src) != 0 { + return false + } + return true +} + +type TLoadTxn2PCResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +} + +func NewTLoadTxn2PCResult_() *TLoadTxn2PCResult_ { + return &TLoadTxn2PCResult_{} +} + +func (p *TLoadTxn2PCResult_) InitDefault() { +} + +var TLoadTxn2PCResult__Status_DEFAULT *status.TStatus + +func (p *TLoadTxn2PCResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TLoadTxn2PCResult__Status_DEFAULT + } + return p.Status +} +func (p *TLoadTxn2PCResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TLoadTxn2PCResult_ = map[int16]string{ + 1: "status", +} + +func (p *TLoadTxn2PCResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TLoadTxn2PCResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCResult_[fieldId])) +} + +func (p *TLoadTxn2PCResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TLoadTxn2PCResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLoadTxn2PCResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLoadTxn2PCResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLoadTxn2PCResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLoadTxn2PCResult_(%+v)", *p) + +} + +func (p *TLoadTxn2PCResult_) DeepEqual(ano *TLoadTxn2PCResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TLoadTxn2PCResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TRollbackTxnRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + UserIp *string `thrift:"user_ip,5,optional" frugal:"5,optional,string" json:"user_ip,omitempty"` + TxnId *int64 `thrift:"txn_id,6,optional" frugal:"6,optional,i64" json:"txn_id,omitempty"` + Reason *string `thrift:"reason,7,optional" frugal:"7,optional,string" json:"reason,omitempty"` + AuthCode *int64 `thrift:"auth_code,9,optional" frugal:"9,optional,i64" json:"auth_code,omitempty"` + TxnCommitAttachment *TTxnCommitAttachment `thrift:"txn_commit_attachment,10,optional" frugal:"10,optional,TTxnCommitAttachment" json:"txn_commit_attachment,omitempty"` + Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` + DbId *int64 `thrift:"db_id,12,optional" frugal:"12,optional,i64" json:"db_id,omitempty"` +} + +func NewTRollbackTxnRequest() *TRollbackTxnRequest { + return &TRollbackTxnRequest{} +} + +func (p *TRollbackTxnRequest) InitDefault() { +} + +var TRollbackTxnRequest_Cluster_DEFAULT string + +func (p *TRollbackTxnRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TRollbackTxnRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +var TRollbackTxnRequest_User_DEFAULT string + +func (p *TRollbackTxnRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TRollbackTxnRequest_User_DEFAULT + } + return *p.User +} + +var TRollbackTxnRequest_Passwd_DEFAULT string + +func (p *TRollbackTxnRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TRollbackTxnRequest_Passwd_DEFAULT + } + return *p.Passwd +} + +var TRollbackTxnRequest_Db_DEFAULT string + +func (p *TRollbackTxnRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TRollbackTxnRequest_Db_DEFAULT + } + return *p.Db +} + +var TRollbackTxnRequest_UserIp_DEFAULT string + +func (p *TRollbackTxnRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TRollbackTxnRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +var TRollbackTxnRequest_TxnId_DEFAULT int64 + +func (p *TRollbackTxnRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TRollbackTxnRequest_TxnId_DEFAULT + } + return *p.TxnId +} + +var TRollbackTxnRequest_Reason_DEFAULT string + +func (p *TRollbackTxnRequest) GetReason() (v string) { + if !p.IsSetReason() { + return TRollbackTxnRequest_Reason_DEFAULT + } + return *p.Reason +} + +var TRollbackTxnRequest_AuthCode_DEFAULT int64 + +func (p *TRollbackTxnRequest) GetAuthCode() (v int64) { + if !p.IsSetAuthCode() { + return TRollbackTxnRequest_AuthCode_DEFAULT + } + return *p.AuthCode +} + +var TRollbackTxnRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment + +func (p *TRollbackTxnRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { + if !p.IsSetTxnCommitAttachment() { + return TRollbackTxnRequest_TxnCommitAttachment_DEFAULT + } + return p.TxnCommitAttachment +} + +var TRollbackTxnRequest_Token_DEFAULT string + +func (p *TRollbackTxnRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TRollbackTxnRequest_Token_DEFAULT + } + return *p.Token +} + +var TRollbackTxnRequest_DbId_DEFAULT int64 + +func (p *TRollbackTxnRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TRollbackTxnRequest_DbId_DEFAULT + } + return *p.DbId +} +func (p *TRollbackTxnRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TRollbackTxnRequest) SetUser(val *string) { + p.User = val +} +func (p *TRollbackTxnRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TRollbackTxnRequest) SetDb(val *string) { + p.Db = val +} +func (p *TRollbackTxnRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TRollbackTxnRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TRollbackTxnRequest) SetReason(val *string) { + p.Reason = val +} +func (p *TRollbackTxnRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TRollbackTxnRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { + p.TxnCommitAttachment = val +} +func (p *TRollbackTxnRequest) SetToken(val *string) { + p.Token = val +} +func (p *TRollbackTxnRequest) SetDbId(val *int64) { + p.DbId = val +} + +var fieldIDToName_TRollbackTxnRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "user_ip", + 6: "txn_id", + 7: "reason", + 9: "auth_code", + 10: "txn_commit_attachment", + 11: "token", + 12: "db_id", +} + +func (p *TRollbackTxnRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TRollbackTxnRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TRollbackTxnRequest) IsSetPasswd() bool { + return p.Passwd != nil +} + +func (p *TRollbackTxnRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TRollbackTxnRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TRollbackTxnRequest) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TRollbackTxnRequest) IsSetReason() bool { + return p.Reason != nil +} + +func (p *TRollbackTxnRequest) IsSetAuthCode() bool { + return p.AuthCode != nil +} + +func (p *TRollbackTxnRequest) IsSetTxnCommitAttachment() bool { + return p.TxnCommitAttachment != nil +} + +func (p *TRollbackTxnRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TRollbackTxnRequest) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TRollbackTxnRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I64 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRollbackTxnRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.User = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Passwd = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Reason = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField10(iprot thrift.TProtocol) error { + _field := NewTTxnCommitAttachment() + if err := _field.Read(iprot); err != nil { + return err + } + p.TxnCommitAttachment = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TRollbackTxnRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} + +func (p *TRollbackTxnRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRollbackTxnRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetReason() { + if err = oprot.WriteFieldBegin("reason", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Reason); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AuthCode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnCommitAttachment() { + if err = oprot.WriteFieldBegin("txn_commit_attachment", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnCommitAttachment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TRollbackTxnRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRollbackTxnRequest(%+v)", *p) + +} + +func (p *TRollbackTxnRequest) DeepEqual(ano *TRollbackTxnRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.UserIp) { + return false + } + if !p.Field6DeepEqual(ano.TxnId) { + return false + } + if !p.Field7DeepEqual(ano.Reason) { + return false + } + if !p.Field9DeepEqual(ano.AuthCode) { + return false + } + if !p.Field10DeepEqual(ano.TxnCommitAttachment) { + return false + } + if !p.Field11DeepEqual(ano.Token) { + return false + } + if !p.Field12DeepEqual(ano.DbId) { + return false + } + return true +} + +func (p *TRollbackTxnRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field2DeepEqual(src *string) bool { + + if p.User == src { + return true + } else if p.User == nil || src == nil { + return false + } + if strings.Compare(*p.User, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field3DeepEqual(src *string) bool { + + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { + return false + } + if strings.Compare(*p.Passwd, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field4DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field5DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field6DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field7DeepEqual(src *string) bool { + + if p.Reason == src { + return true + } else if p.Reason == nil || src == nil { + return false + } + if strings.Compare(*p.Reason, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field9DeepEqual(src *int64) bool { + + if p.AuthCode == src { + return true + } else if p.AuthCode == nil || src == nil { + return false + } + if *p.AuthCode != *src { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field10DeepEqual(src *TTxnCommitAttachment) bool { + + if !p.TxnCommitAttachment.DeepEqual(src) { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field11DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TRollbackTxnRequest) Field12DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} + +type TRollbackTxnResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,2,optional" frugal:"2,optional,types.TNetworkAddress" json:"master_address,omitempty"` +} + +func NewTRollbackTxnResult_() *TRollbackTxnResult_ { + return &TRollbackTxnResult_{} +} + +func (p *TRollbackTxnResult_) InitDefault() { +} + +var TRollbackTxnResult__Status_DEFAULT *status.TStatus + +func (p *TRollbackTxnResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TRollbackTxnResult__Status_DEFAULT + } + return p.Status +} + +var TRollbackTxnResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TRollbackTxnResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TRollbackTxnResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TRollbackTxnResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TRollbackTxnResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} + +var fieldIDToName_TRollbackTxnResult_ = map[int16]string{ + 1: "status", + 2: "master_address", +} + +func (p *TRollbackTxnResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TRollbackTxnResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TRollbackTxnResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRollbackTxnResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TRollbackTxnResult_) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} + +func (p *TRollbackTxnResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRollbackTxnResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TRollbackTxnResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TRollbackTxnResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TRollbackTxnResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRollbackTxnResult_(%+v)", *p) + +} + +func (p *TRollbackTxnResult_) DeepEqual(ano *TRollbackTxnResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.MasterAddress) { + return false + } + return true +} + +func (p *TRollbackTxnResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TRollbackTxnResult_) Field2DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} + +type TLoadTxnRollbackRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` + Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` + Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` + Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` + UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` + TxnId int64 `thrift:"txnId,7,required" frugal:"7,required,i64" json:"txnId"` + Reason *string `thrift:"reason,8,optional" frugal:"8,optional,string" json:"reason,omitempty"` + AuthCode *int64 `thrift:"auth_code,9,optional" frugal:"9,optional,i64" json:"auth_code,omitempty"` + TxnCommitAttachment *TTxnCommitAttachment `thrift:"txnCommitAttachment,10,optional" frugal:"10,optional,TTxnCommitAttachment" json:"txnCommitAttachment,omitempty"` + Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` + DbId *int64 `thrift:"db_id,12,optional" frugal:"12,optional,i64" json:"db_id,omitempty"` + Tbls []string `thrift:"tbls,13,optional" frugal:"13,optional,list" json:"tbls,omitempty"` + AuthCodeUuid *string `thrift:"auth_code_uuid,14,optional" frugal:"14,optional,string" json:"auth_code_uuid,omitempty"` + Label *string `thrift:"label,15,optional" frugal:"15,optional,string" json:"label,omitempty"` +} + +func NewTLoadTxnRollbackRequest() *TLoadTxnRollbackRequest { + return &TLoadTxnRollbackRequest{} +} + +func (p *TLoadTxnRollbackRequest) InitDefault() { +} + +var TLoadTxnRollbackRequest_Cluster_DEFAULT string + +func (p *TLoadTxnRollbackRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TLoadTxnRollbackRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +func (p *TLoadTxnRollbackRequest) GetUser() (v string) { + return p.User +} + +func (p *TLoadTxnRollbackRequest) GetPasswd() (v string) { + return p.Passwd +} + +func (p *TLoadTxnRollbackRequest) GetDb() (v string) { + return p.Db +} + +func (p *TLoadTxnRollbackRequest) GetTbl() (v string) { + return p.Tbl +} + +var TLoadTxnRollbackRequest_UserIp_DEFAULT string + +func (p *TLoadTxnRollbackRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TLoadTxnRollbackRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +func (p *TLoadTxnRollbackRequest) GetTxnId() (v int64) { + return p.TxnId +} + +var TLoadTxnRollbackRequest_Reason_DEFAULT string + +func (p *TLoadTxnRollbackRequest) GetReason() (v string) { + if !p.IsSetReason() { + return TLoadTxnRollbackRequest_Reason_DEFAULT + } + return *p.Reason +} + +var TLoadTxnRollbackRequest_AuthCode_DEFAULT int64 + +func (p *TLoadTxnRollbackRequest) GetAuthCode() (v int64) { + if !p.IsSetAuthCode() { + return TLoadTxnRollbackRequest_AuthCode_DEFAULT + } + return *p.AuthCode +} + +var TLoadTxnRollbackRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment + +func (p *TLoadTxnRollbackRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { + if !p.IsSetTxnCommitAttachment() { + return TLoadTxnRollbackRequest_TxnCommitAttachment_DEFAULT + } + return p.TxnCommitAttachment +} + +var TLoadTxnRollbackRequest_Token_DEFAULT string + +func (p *TLoadTxnRollbackRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TLoadTxnRollbackRequest_Token_DEFAULT + } + return *p.Token +} + +var TLoadTxnRollbackRequest_DbId_DEFAULT int64 + +func (p *TLoadTxnRollbackRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TLoadTxnRollbackRequest_DbId_DEFAULT + } + return *p.DbId +} + +var TLoadTxnRollbackRequest_Tbls_DEFAULT []string + +func (p *TLoadTxnRollbackRequest) GetTbls() (v []string) { + if !p.IsSetTbls() { + return TLoadTxnRollbackRequest_Tbls_DEFAULT + } + return p.Tbls +} + +var TLoadTxnRollbackRequest_AuthCodeUuid_DEFAULT string + +func (p *TLoadTxnRollbackRequest) GetAuthCodeUuid() (v string) { + if !p.IsSetAuthCodeUuid() { + return TLoadTxnRollbackRequest_AuthCodeUuid_DEFAULT + } + return *p.AuthCodeUuid +} + +var TLoadTxnRollbackRequest_Label_DEFAULT string + +func (p *TLoadTxnRollbackRequest) GetLabel() (v string) { + if !p.IsSetLabel() { + return TLoadTxnRollbackRequest_Label_DEFAULT + } + return *p.Label +} +func (p *TLoadTxnRollbackRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TLoadTxnRollbackRequest) SetUser(val string) { + p.User = val +} +func (p *TLoadTxnRollbackRequest) SetPasswd(val string) { + p.Passwd = val +} +func (p *TLoadTxnRollbackRequest) SetDb(val string) { + p.Db = val +} +func (p *TLoadTxnRollbackRequest) SetTbl(val string) { + p.Tbl = val +} +func (p *TLoadTxnRollbackRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TLoadTxnRollbackRequest) SetTxnId(val int64) { + p.TxnId = val +} +func (p *TLoadTxnRollbackRequest) SetReason(val *string) { + p.Reason = val +} +func (p *TLoadTxnRollbackRequest) SetAuthCode(val *int64) { + p.AuthCode = val +} +func (p *TLoadTxnRollbackRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { + p.TxnCommitAttachment = val +} +func (p *TLoadTxnRollbackRequest) SetToken(val *string) { + p.Token = val +} +func (p *TLoadTxnRollbackRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TLoadTxnRollbackRequest) SetTbls(val []string) { + p.Tbls = val +} +func (p *TLoadTxnRollbackRequest) SetAuthCodeUuid(val *string) { + p.AuthCodeUuid = val +} +func (p *TLoadTxnRollbackRequest) SetLabel(val *string) { + p.Label = val +} + +var fieldIDToName_TLoadTxnRollbackRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "tbl", + 6: "user_ip", + 7: "txnId", + 8: "reason", + 9: "auth_code", + 10: "txnCommitAttachment", + 11: "token", + 12: "db_id", + 13: "tbls", + 14: "auth_code_uuid", + 15: "label", +} + +func (p *TLoadTxnRollbackRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetReason() bool { + return p.Reason != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetAuthCode() bool { + return p.AuthCode != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetTxnCommitAttachment() bool { + return p.TxnCommitAttachment != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetTbls() bool { + return p.Tbls != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetAuthCodeUuid() bool { + return p.AuthCodeUuid != nil +} + +func (p *TLoadTxnRollbackRequest) IsSetLabel() bool { + return p.Label != nil +} + +func (p *TLoadTxnRollbackRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetTxnId bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPasswd = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetDb = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetTbl = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + issetTxnId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I64 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.LIST { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.STRING { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.STRING { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 7 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackRequest[fieldId])) +} + +func (p *TLoadTxnRollbackRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.User = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Passwd = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Db = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Tbl = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TxnId = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Reason = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.AuthCode = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField10(iprot thrift.TProtocol) error { + _field := NewTTxnCommitAttachment() + if err := _field.Read(iprot); err != nil { + return err + } + p.TxnCommitAttachment = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField13(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tbls = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField14(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AuthCodeUuid = _field + return nil +} +func (p *TLoadTxnRollbackRequest) ReadField15(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Label = _field + return nil +} + +func (p *TLoadTxnRollbackRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLoadTxnRollbackRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetReason() { + if err = oprot.WriteFieldBegin("reason", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Reason); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCode() { + if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.AuthCode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnCommitAttachment() { + if err = oprot.WriteFieldBegin("txnCommitAttachment", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnCommitAttachment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetTbls() { + if err = oprot.WriteFieldBegin("tbls", thrift.LIST, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Tbls)); err != nil { + return err + } + for _, v := range p.Tbls { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthCodeUuid() { + if err = oprot.WriteFieldBegin("auth_code_uuid", thrift.STRING, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AuthCodeUuid); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TLoadTxnRollbackRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLoadTxnRollbackRequest(%+v)", *p) + +} + +func (p *TLoadTxnRollbackRequest) DeepEqual(ano *TLoadTxnRollbackRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Tbl) { + return false + } + if !p.Field6DeepEqual(ano.UserIp) { + return false + } + if !p.Field7DeepEqual(ano.TxnId) { + return false + } + if !p.Field8DeepEqual(ano.Reason) { + return false + } + if !p.Field9DeepEqual(ano.AuthCode) { + return false + } + if !p.Field10DeepEqual(ano.TxnCommitAttachment) { + return false + } + if !p.Field11DeepEqual(ano.Token) { + return false + } + if !p.Field12DeepEqual(ano.DbId) { + return false + } + if !p.Field13DeepEqual(ano.Tbls) { + return false + } + if !p.Field14DeepEqual(ano.AuthCodeUuid) { + return false + } + if !p.Field15DeepEqual(ano.Label) { + return false + } + return true +} + +func (p *TLoadTxnRollbackRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.User, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Passwd, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field4DeepEqual(src string) bool { + + if strings.Compare(p.Db, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field5DeepEqual(src string) bool { + + if strings.Compare(p.Tbl, src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field6DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field7DeepEqual(src int64) bool { + + if p.TxnId != src { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field8DeepEqual(src *string) bool { + + if p.Reason == src { + return true + } else if p.Reason == nil || src == nil { + return false + } + if strings.Compare(*p.Reason, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field9DeepEqual(src *int64) bool { + + if p.AuthCode == src { + return true + } else if p.AuthCode == nil || src == nil { + return false + } + if *p.AuthCode != *src { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field10DeepEqual(src *TTxnCommitAttachment) bool { + + if !p.TxnCommitAttachment.DeepEqual(src) { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field11DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field12DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field13DeepEqual(src []string) bool { + + if len(p.Tbls) != len(src) { + return false + } + for i, v := range p.Tbls { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TLoadTxnRollbackRequest) Field14DeepEqual(src *string) bool { + + if p.AuthCodeUuid == src { + return true + } else if p.AuthCodeUuid == nil || src == nil { + return false + } + if strings.Compare(*p.AuthCodeUuid, *src) != 0 { + return false + } + return true +} +func (p *TLoadTxnRollbackRequest) Field15DeepEqual(src *string) bool { + + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true +} + +type TLoadTxnRollbackResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +} + +func NewTLoadTxnRollbackResult_() *TLoadTxnRollbackResult_ { + return &TLoadTxnRollbackResult_{} +} + +func (p *TLoadTxnRollbackResult_) InitDefault() { +} + +var TLoadTxnRollbackResult__Status_DEFAULT *status.TStatus + +func (p *TLoadTxnRollbackResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TLoadTxnRollbackResult__Status_DEFAULT + } + return p.Status +} +func (p *TLoadTxnRollbackResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TLoadTxnRollbackResult_ = map[int16]string{ + 1: "status", +} + +func (p *TLoadTxnRollbackResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TLoadTxnRollbackResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackResult_[fieldId])) +} + +func (p *TLoadTxnRollbackResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TLoadTxnRollbackResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLoadTxnRollbackResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLoadTxnRollbackResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLoadTxnRollbackResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLoadTxnRollbackResult_(%+v)", *p) + +} + +func (p *TLoadTxnRollbackResult_) DeepEqual(ano *TLoadTxnRollbackResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TLoadTxnRollbackResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TSnapshotLoaderReportRequest struct { + JobId int64 `thrift:"job_id,1,required" frugal:"1,required,i64" json:"job_id"` + TaskId int64 `thrift:"task_id,2,required" frugal:"2,required,i64" json:"task_id"` + TaskType types.TTaskType `thrift:"task_type,3,required" frugal:"3,required,TTaskType" json:"task_type"` + FinishedNum *int32 `thrift:"finished_num,4,optional" frugal:"4,optional,i32" json:"finished_num,omitempty"` + TotalNum *int32 `thrift:"total_num,5,optional" frugal:"5,optional,i32" json:"total_num,omitempty"` +} + +func NewTSnapshotLoaderReportRequest() *TSnapshotLoaderReportRequest { + return &TSnapshotLoaderReportRequest{} +} + +func (p *TSnapshotLoaderReportRequest) InitDefault() { +} + +func (p *TSnapshotLoaderReportRequest) GetJobId() (v int64) { + return p.JobId +} + +func (p *TSnapshotLoaderReportRequest) GetTaskId() (v int64) { + return p.TaskId +} + +func (p *TSnapshotLoaderReportRequest) GetTaskType() (v types.TTaskType) { + return p.TaskType +} + +var TSnapshotLoaderReportRequest_FinishedNum_DEFAULT int32 + +func (p *TSnapshotLoaderReportRequest) GetFinishedNum() (v int32) { + if !p.IsSetFinishedNum() { + return TSnapshotLoaderReportRequest_FinishedNum_DEFAULT + } + return *p.FinishedNum +} + +var TSnapshotLoaderReportRequest_TotalNum_DEFAULT int32 + +func (p *TSnapshotLoaderReportRequest) GetTotalNum() (v int32) { + if !p.IsSetTotalNum() { + return TSnapshotLoaderReportRequest_TotalNum_DEFAULT + } + return *p.TotalNum +} +func (p *TSnapshotLoaderReportRequest) SetJobId(val int64) { + p.JobId = val +} +func (p *TSnapshotLoaderReportRequest) SetTaskId(val int64) { + p.TaskId = val +} +func (p *TSnapshotLoaderReportRequest) SetTaskType(val types.TTaskType) { + p.TaskType = val +} +func (p *TSnapshotLoaderReportRequest) SetFinishedNum(val *int32) { + p.FinishedNum = val +} +func (p *TSnapshotLoaderReportRequest) SetTotalNum(val *int32) { + p.TotalNum = val +} + +var fieldIDToName_TSnapshotLoaderReportRequest = map[int16]string{ + 1: "job_id", + 2: "task_id", + 3: "task_type", + 4: "finished_num", + 5: "total_num", +} + +func (p *TSnapshotLoaderReportRequest) IsSetFinishedNum() bool { + return p.FinishedNum != nil +} + +func (p *TSnapshotLoaderReportRequest) IsSetTotalNum() bool { + return p.TotalNum != nil +} + +func (p *TSnapshotLoaderReportRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetJobId bool = false + var issetTaskId bool = false + var issetTaskType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetJobId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetTaskId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetTaskType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I32 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetJobId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTaskId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskType { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotLoaderReportRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotLoaderReportRequest[fieldId])) +} + +func (p *TSnapshotLoaderReportRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.JobId = _field + return nil +} +func (p *TSnapshotLoaderReportRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TaskId = _field + return nil +} +func (p *TSnapshotLoaderReportRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TTaskType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = types.TTaskType(v) + } + p.TaskType = _field + return nil +} +func (p *TSnapshotLoaderReportRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.FinishedNum = _field + return nil +} +func (p *TSnapshotLoaderReportRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.TotalNum = _field + return nil +} + +func (p *TSnapshotLoaderReportRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSnapshotLoaderReportRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSnapshotLoaderReportRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("job_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.JobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSnapshotLoaderReportRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TaskId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TSnapshotLoaderReportRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_type", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.TaskType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TSnapshotLoaderReportRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFinishedNum() { + if err = oprot.WriteFieldBegin("finished_num", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FinishedNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TSnapshotLoaderReportRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalNum() { + if err = oprot.WriteFieldBegin("total_num", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.TotalNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TSnapshotLoaderReportRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSnapshotLoaderReportRequest(%+v)", *p) + +} + +func (p *TSnapshotLoaderReportRequest) DeepEqual(ano *TSnapshotLoaderReportRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.JobId) { + return false + } + if !p.Field2DeepEqual(ano.TaskId) { + return false + } + if !p.Field3DeepEqual(ano.TaskType) { + return false + } + if !p.Field4DeepEqual(ano.FinishedNum) { + return false + } + if !p.Field5DeepEqual(ano.TotalNum) { + return false + } + return true +} + +func (p *TSnapshotLoaderReportRequest) Field1DeepEqual(src int64) bool { + + if p.JobId != src { + return false + } + return true +} +func (p *TSnapshotLoaderReportRequest) Field2DeepEqual(src int64) bool { + + if p.TaskId != src { + return false + } + return true +} +func (p *TSnapshotLoaderReportRequest) Field3DeepEqual(src types.TTaskType) bool { + + if p.TaskType != src { + return false + } + return true +} +func (p *TSnapshotLoaderReportRequest) Field4DeepEqual(src *int32) bool { + + if p.FinishedNum == src { + return true + } else if p.FinishedNum == nil || src == nil { + return false + } + if *p.FinishedNum != *src { + return false + } + return true +} +func (p *TSnapshotLoaderReportRequest) Field5DeepEqual(src *int32) bool { + + if p.TotalNum == src { + return true + } else if p.TotalNum == nil || src == nil { + return false + } + if *p.TotalNum != *src { + return false + } + return true +} + +type TFrontendPingFrontendRequest struct { + ClusterId int32 `thrift:"clusterId,1,required" frugal:"1,required,i32" json:"clusterId"` + Token string `thrift:"token,2,required" frugal:"2,required,string" json:"token"` + DeployMode *string `thrift:"deployMode,3,optional" frugal:"3,optional,string" json:"deployMode,omitempty"` +} + +func NewTFrontendPingFrontendRequest() *TFrontendPingFrontendRequest { + return &TFrontendPingFrontendRequest{} +} + +func (p *TFrontendPingFrontendRequest) InitDefault() { +} + +func (p *TFrontendPingFrontendRequest) GetClusterId() (v int32) { + return p.ClusterId +} + +func (p *TFrontendPingFrontendRequest) GetToken() (v string) { + return p.Token +} + +var TFrontendPingFrontendRequest_DeployMode_DEFAULT string + +func (p *TFrontendPingFrontendRequest) GetDeployMode() (v string) { + if !p.IsSetDeployMode() { + return TFrontendPingFrontendRequest_DeployMode_DEFAULT + } + return *p.DeployMode +} +func (p *TFrontendPingFrontendRequest) SetClusterId(val int32) { + p.ClusterId = val +} +func (p *TFrontendPingFrontendRequest) SetToken(val string) { + p.Token = val +} +func (p *TFrontendPingFrontendRequest) SetDeployMode(val *string) { + p.DeployMode = val +} + +var fieldIDToName_TFrontendPingFrontendRequest = map[int16]string{ + 1: "clusterId", + 2: "token", + 3: "deployMode", +} + +func (p *TFrontendPingFrontendRequest) IsSetDeployMode() bool { + return p.DeployMode != nil +} + +func (p *TFrontendPingFrontendRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetClusterId bool = false + var issetToken bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetClusterId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetToken = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetClusterId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetToken { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendRequest[fieldId])) +} + +func (p *TFrontendPingFrontendRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ClusterId = _field + return nil +} +func (p *TFrontendPingFrontendRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Token = _field + return nil +} +func (p *TFrontendPingFrontendRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DeployMode = _field + return nil +} + +func (p *TFrontendPingFrontendRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFrontendPingFrontendRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFrontendPingFrontendRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("clusterId", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ClusterId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFrontendPingFrontendRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFrontendPingFrontendRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDeployMode() { + if err = oprot.WriteFieldBegin("deployMode", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DeployMode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TFrontendPingFrontendRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFrontendPingFrontendRequest(%+v)", *p) + +} + +func (p *TFrontendPingFrontendRequest) DeepEqual(ano *TFrontendPingFrontendRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ClusterId) { + return false + } + if !p.Field2DeepEqual(ano.Token) { + return false + } + if !p.Field3DeepEqual(ano.DeployMode) { + return false + } + return true +} + +func (p *TFrontendPingFrontendRequest) Field1DeepEqual(src int32) bool { + + if p.ClusterId != src { + return false + } + return true +} +func (p *TFrontendPingFrontendRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.Token, src) != 0 { + return false + } + return true +} +func (p *TFrontendPingFrontendRequest) Field3DeepEqual(src *string) bool { + + if p.DeployMode == src { + return true + } else if p.DeployMode == nil || src == nil { + return false + } + if strings.Compare(*p.DeployMode, *src) != 0 { + return false + } + return true +} + +type TDiskInfo struct { + DirType string `thrift:"dirType,1,required" frugal:"1,required,string" json:"dirType"` + Dir string `thrift:"dir,2,required" frugal:"2,required,string" json:"dir"` + Filesystem string `thrift:"filesystem,3,required" frugal:"3,required,string" json:"filesystem"` + Blocks int64 `thrift:"blocks,4,required" frugal:"4,required,i64" json:"blocks"` + Used int64 `thrift:"used,5,required" frugal:"5,required,i64" json:"used"` + Available int64 `thrift:"available,6,required" frugal:"6,required,i64" json:"available"` + UseRate int32 `thrift:"useRate,7,required" frugal:"7,required,i32" json:"useRate"` + MountedOn string `thrift:"mountedOn,8,required" frugal:"8,required,string" json:"mountedOn"` +} + +func NewTDiskInfo() *TDiskInfo { + return &TDiskInfo{} +} + +func (p *TDiskInfo) InitDefault() { +} + +func (p *TDiskInfo) GetDirType() (v string) { + return p.DirType +} + +func (p *TDiskInfo) GetDir() (v string) { + return p.Dir +} + +func (p *TDiskInfo) GetFilesystem() (v string) { + return p.Filesystem +} + +func (p *TDiskInfo) GetBlocks() (v int64) { + return p.Blocks +} + +func (p *TDiskInfo) GetUsed() (v int64) { + return p.Used +} + +func (p *TDiskInfo) GetAvailable() (v int64) { + return p.Available +} + +func (p *TDiskInfo) GetUseRate() (v int32) { + return p.UseRate +} + +func (p *TDiskInfo) GetMountedOn() (v string) { + return p.MountedOn +} +func (p *TDiskInfo) SetDirType(val string) { + p.DirType = val +} +func (p *TDiskInfo) SetDir(val string) { + p.Dir = val +} +func (p *TDiskInfo) SetFilesystem(val string) { + p.Filesystem = val +} +func (p *TDiskInfo) SetBlocks(val int64) { + p.Blocks = val +} +func (p *TDiskInfo) SetUsed(val int64) { + p.Used = val +} +func (p *TDiskInfo) SetAvailable(val int64) { + p.Available = val +} +func (p *TDiskInfo) SetUseRate(val int32) { + p.UseRate = val +} +func (p *TDiskInfo) SetMountedOn(val string) { + p.MountedOn = val +} + +var fieldIDToName_TDiskInfo = map[int16]string{ + 1: "dirType", + 2: "dir", + 3: "filesystem", + 4: "blocks", + 5: "used", + 6: "available", + 7: "useRate", + 8: "mountedOn", +} + +func (p *TDiskInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetDirType bool = false + var issetDir bool = false + var issetFilesystem bool = false + var issetBlocks bool = false + var issetUsed bool = false + var issetAvailable bool = false + var issetUseRate bool = false + var issetMountedOn bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetDirType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetDir = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetFilesystem = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetBlocks = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetUsed = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + issetAvailable = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + issetUseRate = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + issetMountedOn = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetDirType { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetDir { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetFilesystem { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetBlocks { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetUsed { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetAvailable { + fieldId = 6 + goto RequiredFieldNotSetError + } + + if !issetUseRate { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetMountedOn { + fieldId = 8 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDiskInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TDiskInfo[fieldId])) +} + +func (p *TDiskInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.DirType = _field + return nil +} +func (p *TDiskInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Dir = _field + return nil +} +func (p *TDiskInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Filesystem = _field + return nil +} +func (p *TDiskInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.Blocks = _field + return nil +} +func (p *TDiskInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.Used = _field + return nil +} +func (p *TDiskInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.Available = _field + return nil +} +func (p *TDiskInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.UseRate = _field + return nil +} +func (p *TDiskInfo) ReadField8(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.MountedOn = _field + return nil +} + +func (p *TDiskInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TDiskInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TDiskInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("dirType", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.DirType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TDiskInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("dir", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Dir); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TDiskInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("filesystem", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Filesystem); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TDiskInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("blocks", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.Blocks); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TDiskInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("used", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.Used); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TDiskInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("available", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.Available); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TDiskInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("useRate", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.UseRate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TDiskInfo) writeField8(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("mountedOn", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.MountedOn); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TDiskInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDiskInfo(%+v)", *p) + +} + +func (p *TDiskInfo) DeepEqual(ano *TDiskInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DirType) { + return false + } + if !p.Field2DeepEqual(ano.Dir) { + return false + } + if !p.Field3DeepEqual(ano.Filesystem) { + return false + } + if !p.Field4DeepEqual(ano.Blocks) { + return false + } + if !p.Field5DeepEqual(ano.Used) { + return false + } + if !p.Field6DeepEqual(ano.Available) { + return false + } + if !p.Field7DeepEqual(ano.UseRate) { + return false + } + if !p.Field8DeepEqual(ano.MountedOn) { + return false + } + return true +} + +func (p *TDiskInfo) Field1DeepEqual(src string) bool { + + if strings.Compare(p.DirType, src) != 0 { + return false + } + return true +} +func (p *TDiskInfo) Field2DeepEqual(src string) bool { + + if strings.Compare(p.Dir, src) != 0 { + return false + } + return true +} +func (p *TDiskInfo) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Filesystem, src) != 0 { + return false + } + return true +} +func (p *TDiskInfo) Field4DeepEqual(src int64) bool { + + if p.Blocks != src { + return false + } + return true +} +func (p *TDiskInfo) Field5DeepEqual(src int64) bool { + + if p.Used != src { + return false + } + return true +} +func (p *TDiskInfo) Field6DeepEqual(src int64) bool { + + if p.Available != src { + return false + } + return true +} +func (p *TDiskInfo) Field7DeepEqual(src int32) bool { + + if p.UseRate != src { + return false + } + return true +} +func (p *TDiskInfo) Field8DeepEqual(src string) bool { + + if strings.Compare(p.MountedOn, src) != 0 { + return false + } + return true +} + +type TFrontendPingFrontendResult_ struct { + Status TFrontendPingFrontendStatusCode `thrift:"status,1,required" frugal:"1,required,TFrontendPingFrontendStatusCode" json:"status"` + Msg string `thrift:"msg,2,required" frugal:"2,required,string" json:"msg"` + QueryPort int32 `thrift:"queryPort,3,required" frugal:"3,required,i32" json:"queryPort"` + RpcPort int32 `thrift:"rpcPort,4,required" frugal:"4,required,i32" json:"rpcPort"` + ReplayedJournalId int64 `thrift:"replayedJournalId,5,required" frugal:"5,required,i64" json:"replayedJournalId"` + Version string `thrift:"version,6,required" frugal:"6,required,string" json:"version"` + LastStartupTime *int64 `thrift:"lastStartupTime,7,optional" frugal:"7,optional,i64" json:"lastStartupTime,omitempty"` + DiskInfos []*TDiskInfo `thrift:"diskInfos,8,optional" frugal:"8,optional,list" json:"diskInfos,omitempty"` + ProcessUUID *int64 `thrift:"processUUID,9,optional" frugal:"9,optional,i64" json:"processUUID,omitempty"` + ArrowFlightSqlPort *int32 `thrift:"arrowFlightSqlPort,10,optional" frugal:"10,optional,i32" json:"arrowFlightSqlPort,omitempty"` +} + +func NewTFrontendPingFrontendResult_() *TFrontendPingFrontendResult_ { + return &TFrontendPingFrontendResult_{} +} + +func (p *TFrontendPingFrontendResult_) InitDefault() { +} + +func (p *TFrontendPingFrontendResult_) GetStatus() (v TFrontendPingFrontendStatusCode) { + return p.Status +} + +func (p *TFrontendPingFrontendResult_) GetMsg() (v string) { + return p.Msg +} + +func (p *TFrontendPingFrontendResult_) GetQueryPort() (v int32) { + return p.QueryPort +} + +func (p *TFrontendPingFrontendResult_) GetRpcPort() (v int32) { + return p.RpcPort +} + +func (p *TFrontendPingFrontendResult_) GetReplayedJournalId() (v int64) { + return p.ReplayedJournalId +} + +func (p *TFrontendPingFrontendResult_) GetVersion() (v string) { + return p.Version +} + +var TFrontendPingFrontendResult__LastStartupTime_DEFAULT int64 + +func (p *TFrontendPingFrontendResult_) GetLastStartupTime() (v int64) { + if !p.IsSetLastStartupTime() { + return TFrontendPingFrontendResult__LastStartupTime_DEFAULT + } + return *p.LastStartupTime +} + +var TFrontendPingFrontendResult__DiskInfos_DEFAULT []*TDiskInfo + +func (p *TFrontendPingFrontendResult_) GetDiskInfos() (v []*TDiskInfo) { + if !p.IsSetDiskInfos() { + return TFrontendPingFrontendResult__DiskInfos_DEFAULT + } + return p.DiskInfos +} + +var TFrontendPingFrontendResult__ProcessUUID_DEFAULT int64 + +func (p *TFrontendPingFrontendResult_) GetProcessUUID() (v int64) { + if !p.IsSetProcessUUID() { + return TFrontendPingFrontendResult__ProcessUUID_DEFAULT + } + return *p.ProcessUUID +} + +var TFrontendPingFrontendResult__ArrowFlightSqlPort_DEFAULT int32 + +func (p *TFrontendPingFrontendResult_) GetArrowFlightSqlPort() (v int32) { + if !p.IsSetArrowFlightSqlPort() { + return TFrontendPingFrontendResult__ArrowFlightSqlPort_DEFAULT + } + return *p.ArrowFlightSqlPort +} +func (p *TFrontendPingFrontendResult_) SetStatus(val TFrontendPingFrontendStatusCode) { + p.Status = val +} +func (p *TFrontendPingFrontendResult_) SetMsg(val string) { + p.Msg = val +} +func (p *TFrontendPingFrontendResult_) SetQueryPort(val int32) { + p.QueryPort = val +} +func (p *TFrontendPingFrontendResult_) SetRpcPort(val int32) { + p.RpcPort = val +} +func (p *TFrontendPingFrontendResult_) SetReplayedJournalId(val int64) { + p.ReplayedJournalId = val +} +func (p *TFrontendPingFrontendResult_) SetVersion(val string) { + p.Version = val +} +func (p *TFrontendPingFrontendResult_) SetLastStartupTime(val *int64) { + p.LastStartupTime = val +} +func (p *TFrontendPingFrontendResult_) SetDiskInfos(val []*TDiskInfo) { + p.DiskInfos = val +} +func (p *TFrontendPingFrontendResult_) SetProcessUUID(val *int64) { + p.ProcessUUID = val +} +func (p *TFrontendPingFrontendResult_) SetArrowFlightSqlPort(val *int32) { + p.ArrowFlightSqlPort = val +} + +var fieldIDToName_TFrontendPingFrontendResult_ = map[int16]string{ + 1: "status", + 2: "msg", + 3: "queryPort", + 4: "rpcPort", + 5: "replayedJournalId", + 6: "version", + 7: "lastStartupTime", + 8: "diskInfos", + 9: "processUUID", + 10: "arrowFlightSqlPort", +} + +func (p *TFrontendPingFrontendResult_) IsSetLastStartupTime() bool { + return p.LastStartupTime != nil +} + +func (p *TFrontendPingFrontendResult_) IsSetDiskInfos() bool { + return p.DiskInfos != nil +} + +func (p *TFrontendPingFrontendResult_) IsSetProcessUUID() bool { + return p.ProcessUUID != nil +} + +func (p *TFrontendPingFrontendResult_) IsSetArrowFlightSqlPort() bool { + return p.ArrowFlightSqlPort != nil +} + +func (p *TFrontendPingFrontendResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + var issetMsg bool = false + var issetQueryPort bool = false + var issetRpcPort bool = false + var issetReplayedJournalId bool = false + var issetVersion bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetMsg = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetQueryPort = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetRpcPort = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetReplayedJournalId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + issetVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.LIST { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetMsg { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetQueryPort { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetRpcPort { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetReplayedJournalId { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetVersion { + fieldId = 6 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendResult_[fieldId])) +} + +func (p *TFrontendPingFrontendResult_) ReadField1(iprot thrift.TProtocol) error { + + var _field TFrontendPingFrontendStatusCode + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TFrontendPingFrontendStatusCode(v) + } + p.Status = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Msg = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.QueryPort = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RpcPort = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ReplayedJournalId = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField6(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Version = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LastStartupTime = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField8(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TDiskInfo, 0, size) + values := make([]TDiskInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DiskInfos = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ProcessUUID = _field + return nil +} +func (p *TFrontendPingFrontendResult_) ReadField10(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ArrowFlightSqlPort = _field + return nil +} + +func (p *TFrontendPingFrontendResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFrontendPingFrontendResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("msg", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Msg); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("queryPort", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.QueryPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("rpcPort", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.RpcPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("replayedJournalId", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ReplayedJournalId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField6(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("version", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetLastStartupTime() { + if err = oprot.WriteFieldBegin("lastStartupTime", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LastStartupTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetDiskInfos() { + if err = oprot.WriteFieldBegin("diskInfos", thrift.LIST, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DiskInfos)); err != nil { + return err + } + for _, v := range p.DiskInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetProcessUUID() { + if err = oprot.WriteFieldBegin("processUUID", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ProcessUUID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetArrowFlightSqlPort() { + if err = oprot.WriteFieldBegin("arrowFlightSqlPort", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ArrowFlightSqlPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TFrontendPingFrontendResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFrontendPingFrontendResult_(%+v)", *p) + +} + +func (p *TFrontendPingFrontendResult_) DeepEqual(ano *TFrontendPingFrontendResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Msg) { + return false + } + if !p.Field3DeepEqual(ano.QueryPort) { + return false + } + if !p.Field4DeepEqual(ano.RpcPort) { + return false + } + if !p.Field5DeepEqual(ano.ReplayedJournalId) { + return false + } + if !p.Field6DeepEqual(ano.Version) { + return false + } + if !p.Field7DeepEqual(ano.LastStartupTime) { + return false + } + if !p.Field8DeepEqual(ano.DiskInfos) { + return false + } + if !p.Field9DeepEqual(ano.ProcessUUID) { + return false + } + if !p.Field10DeepEqual(ano.ArrowFlightSqlPort) { + return false + } + return true +} + +func (p *TFrontendPingFrontendResult_) Field1DeepEqual(src TFrontendPingFrontendStatusCode) bool { + + if p.Status != src { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field2DeepEqual(src string) bool { + + if strings.Compare(p.Msg, src) != 0 { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field3DeepEqual(src int32) bool { + + if p.QueryPort != src { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field4DeepEqual(src int32) bool { + + if p.RpcPort != src { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field5DeepEqual(src int64) bool { + + if p.ReplayedJournalId != src { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field6DeepEqual(src string) bool { + + if strings.Compare(p.Version, src) != 0 { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field7DeepEqual(src *int64) bool { + + if p.LastStartupTime == src { + return true + } else if p.LastStartupTime == nil || src == nil { + return false + } + if *p.LastStartupTime != *src { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field8DeepEqual(src []*TDiskInfo) bool { + + if len(p.DiskInfos) != len(src) { + return false + } + for i, v := range p.DiskInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TFrontendPingFrontendResult_) Field9DeepEqual(src *int64) bool { + + if p.ProcessUUID == src { + return true + } else if p.ProcessUUID == nil || src == nil { + return false + } + if *p.ProcessUUID != *src { + return false + } + return true +} +func (p *TFrontendPingFrontendResult_) Field10DeepEqual(src *int32) bool { + + if p.ArrowFlightSqlPort == src { + return true + } else if p.ArrowFlightSqlPort == nil || src == nil { + return false + } + if *p.ArrowFlightSqlPort != *src { + return false + } + return true +} + +type TPropertyVal struct { + StrVal *string `thrift:"strVal,1,optional" frugal:"1,optional,string" json:"strVal,omitempty"` + IntVal *int32 `thrift:"intVal,2,optional" frugal:"2,optional,i32" json:"intVal,omitempty"` + LongVal *int64 `thrift:"longVal,3,optional" frugal:"3,optional,i64" json:"longVal,omitempty"` + BoolVal *bool `thrift:"boolVal,4,optional" frugal:"4,optional,bool" json:"boolVal,omitempty"` +} + +func NewTPropertyVal() *TPropertyVal { + return &TPropertyVal{} +} + +func (p *TPropertyVal) InitDefault() { +} + +var TPropertyVal_StrVal_DEFAULT string + +func (p *TPropertyVal) GetStrVal() (v string) { + if !p.IsSetStrVal() { + return TPropertyVal_StrVal_DEFAULT + } + return *p.StrVal +} + +var TPropertyVal_IntVal_DEFAULT int32 + +func (p *TPropertyVal) GetIntVal() (v int32) { + if !p.IsSetIntVal() { + return TPropertyVal_IntVal_DEFAULT + } + return *p.IntVal +} + +var TPropertyVal_LongVal_DEFAULT int64 + +func (p *TPropertyVal) GetLongVal() (v int64) { + if !p.IsSetLongVal() { + return TPropertyVal_LongVal_DEFAULT + } + return *p.LongVal +} + +var TPropertyVal_BoolVal_DEFAULT bool + +func (p *TPropertyVal) GetBoolVal() (v bool) { + if !p.IsSetBoolVal() { + return TPropertyVal_BoolVal_DEFAULT + } + return *p.BoolVal +} +func (p *TPropertyVal) SetStrVal(val *string) { + p.StrVal = val +} +func (p *TPropertyVal) SetIntVal(val *int32) { + p.IntVal = val +} +func (p *TPropertyVal) SetLongVal(val *int64) { + p.LongVal = val +} +func (p *TPropertyVal) SetBoolVal(val *bool) { + p.BoolVal = val +} + +var fieldIDToName_TPropertyVal = map[int16]string{ + 1: "strVal", + 2: "intVal", + 3: "longVal", + 4: "boolVal", +} + +func (p *TPropertyVal) IsSetStrVal() bool { + return p.StrVal != nil +} + +func (p *TPropertyVal) IsSetIntVal() bool { + return p.IntVal != nil +} + +func (p *TPropertyVal) IsSetLongVal() bool { + return p.LongVal != nil +} + +func (p *TPropertyVal) IsSetBoolVal() bool { + return p.BoolVal != nil +} + +func (p *TPropertyVal) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPropertyVal[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPropertyVal) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.StrVal = _field + return nil +} +func (p *TPropertyVal) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.IntVal = _field + return nil +} +func (p *TPropertyVal) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LongVal = _field + return nil +} +func (p *TPropertyVal) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.BoolVal = _field + return nil +} + +func (p *TPropertyVal) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPropertyVal"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPropertyVal) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStrVal() { + if err = oprot.WriteFieldBegin("strVal", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.StrVal); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPropertyVal) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIntVal() { + if err = oprot.WriteFieldBegin("intVal", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.IntVal); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPropertyVal) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLongVal() { + if err = oprot.WriteFieldBegin("longVal", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LongVal); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPropertyVal) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBoolVal() { + if err = oprot.WriteFieldBegin("boolVal", thrift.BOOL, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.BoolVal); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPropertyVal) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPropertyVal(%+v)", *p) + +} + +func (p *TPropertyVal) DeepEqual(ano *TPropertyVal) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.StrVal) { + return false + } + if !p.Field2DeepEqual(ano.IntVal) { + return false + } + if !p.Field3DeepEqual(ano.LongVal) { + return false + } + if !p.Field4DeepEqual(ano.BoolVal) { + return false + } + return true +} + +func (p *TPropertyVal) Field1DeepEqual(src *string) bool { + + if p.StrVal == src { + return true + } else if p.StrVal == nil || src == nil { + return false + } + if strings.Compare(*p.StrVal, *src) != 0 { + return false + } + return true +} +func (p *TPropertyVal) Field2DeepEqual(src *int32) bool { + + if p.IntVal == src { + return true + } else if p.IntVal == nil || src == nil { + return false + } + if *p.IntVal != *src { + return false + } + return true +} +func (p *TPropertyVal) Field3DeepEqual(src *int64) bool { + + if p.LongVal == src { + return true + } else if p.LongVal == nil || src == nil { + return false + } + if *p.LongVal != *src { + return false + } + return true +} +func (p *TPropertyVal) Field4DeepEqual(src *bool) bool { + + if p.BoolVal == src { + return true + } else if p.BoolVal == nil || src == nil { + return false + } + if *p.BoolVal != *src { + return false + } + return true +} + +type TWaitingTxnStatusRequest struct { + DbId *int64 `thrift:"db_id,1,optional" frugal:"1,optional,i64" json:"db_id,omitempty"` + TxnId *int64 `thrift:"txn_id,2,optional" frugal:"2,optional,i64" json:"txn_id,omitempty"` + Label *string `thrift:"label,3,optional" frugal:"3,optional,string" json:"label,omitempty"` +} + +func NewTWaitingTxnStatusRequest() *TWaitingTxnStatusRequest { + return &TWaitingTxnStatusRequest{} +} + +func (p *TWaitingTxnStatusRequest) InitDefault() { +} + +var TWaitingTxnStatusRequest_DbId_DEFAULT int64 + +func (p *TWaitingTxnStatusRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TWaitingTxnStatusRequest_DbId_DEFAULT + } + return *p.DbId +} + +var TWaitingTxnStatusRequest_TxnId_DEFAULT int64 + +func (p *TWaitingTxnStatusRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TWaitingTxnStatusRequest_TxnId_DEFAULT + } + return *p.TxnId +} + +var TWaitingTxnStatusRequest_Label_DEFAULT string + +func (p *TWaitingTxnStatusRequest) GetLabel() (v string) { + if !p.IsSetLabel() { + return TWaitingTxnStatusRequest_Label_DEFAULT + } + return *p.Label +} +func (p *TWaitingTxnStatusRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TWaitingTxnStatusRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TWaitingTxnStatusRequest) SetLabel(val *string) { + p.Label = val +} + +var fieldIDToName_TWaitingTxnStatusRequest = map[int16]string{ + 1: "db_id", + 2: "txn_id", + 3: "label", +} + +func (p *TWaitingTxnStatusRequest) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TWaitingTxnStatusRequest) IsSetTxnId() bool { + return p.TxnId != nil +} + +func (p *TWaitingTxnStatusRequest) IsSetLabel() bool { + return p.Label != nil +} + +func (p *TWaitingTxnStatusRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWaitingTxnStatusRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TWaitingTxnStatusRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TWaitingTxnStatusRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Label = _field + return nil +} + +func (p *TWaitingTxnStatusRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWaitingTxnStatusRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWaitingTxnStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWaitingTxnStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWaitingTxnStatusRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TWaitingTxnStatusRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWaitingTxnStatusRequest(%+v)", *p) + +} + +func (p *TWaitingTxnStatusRequest) DeepEqual(ano *TWaitingTxnStatusRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DbId) { + return false + } + if !p.Field2DeepEqual(ano.TxnId) { + return false + } + if !p.Field3DeepEqual(ano.Label) { + return false + } + return true +} + +func (p *TWaitingTxnStatusRequest) Field1DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TWaitingTxnStatusRequest) Field2DeepEqual(src *int64) bool { + + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true +} +func (p *TWaitingTxnStatusRequest) Field3DeepEqual(src *string) bool { + + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true +} + +type TWaitingTxnStatusResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + TxnStatusId *int32 `thrift:"txn_status_id,2,optional" frugal:"2,optional,i32" json:"txn_status_id,omitempty"` +} + +func NewTWaitingTxnStatusResult_() *TWaitingTxnStatusResult_ { + return &TWaitingTxnStatusResult_{} +} + +func (p *TWaitingTxnStatusResult_) InitDefault() { +} + +var TWaitingTxnStatusResult__Status_DEFAULT *status.TStatus + +func (p *TWaitingTxnStatusResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TWaitingTxnStatusResult__Status_DEFAULT + } + return p.Status +} + +var TWaitingTxnStatusResult__TxnStatusId_DEFAULT int32 + +func (p *TWaitingTxnStatusResult_) GetTxnStatusId() (v int32) { + if !p.IsSetTxnStatusId() { + return TWaitingTxnStatusResult__TxnStatusId_DEFAULT + } + return *p.TxnStatusId +} +func (p *TWaitingTxnStatusResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TWaitingTxnStatusResult_) SetTxnStatusId(val *int32) { + p.TxnStatusId = val +} + +var fieldIDToName_TWaitingTxnStatusResult_ = map[int16]string{ + 1: "status", + 2: "txn_status_id", +} + +func (p *TWaitingTxnStatusResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TWaitingTxnStatusResult_) IsSetTxnStatusId() bool { + return p.TxnStatusId != nil +} + +func (p *TWaitingTxnStatusResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWaitingTxnStatusResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TWaitingTxnStatusResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.TxnStatusId = _field + return nil +} + +func (p *TWaitingTxnStatusResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TWaitingTxnStatusResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TWaitingTxnStatusResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TWaitingTxnStatusResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnStatusId() { + if err = oprot.WriteFieldBegin("txn_status_id", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.TxnStatusId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TWaitingTxnStatusResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TWaitingTxnStatusResult_(%+v)", *p) + +} + +func (p *TWaitingTxnStatusResult_) DeepEqual(ano *TWaitingTxnStatusResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.TxnStatusId) { + return false + } + return true +} + +func (p *TWaitingTxnStatusResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TWaitingTxnStatusResult_) Field2DeepEqual(src *int32) bool { + + if p.TxnStatusId == src { + return true + } else if p.TxnStatusId == nil || src == nil { + return false + } + if *p.TxnStatusId != *src { + return false + } + return true +} + +type TInitExternalCtlMetaRequest struct { + CatalogId *int64 `thrift:"catalogId,1,optional" frugal:"1,optional,i64" json:"catalogId,omitempty"` + DbId *int64 `thrift:"dbId,2,optional" frugal:"2,optional,i64" json:"dbId,omitempty"` + TableId *int64 `thrift:"tableId,3,optional" frugal:"3,optional,i64" json:"tableId,omitempty"` +} + +func NewTInitExternalCtlMetaRequest() *TInitExternalCtlMetaRequest { + return &TInitExternalCtlMetaRequest{} +} + +func (p *TInitExternalCtlMetaRequest) InitDefault() { +} + +var TInitExternalCtlMetaRequest_CatalogId_DEFAULT int64 + +func (p *TInitExternalCtlMetaRequest) GetCatalogId() (v int64) { + if !p.IsSetCatalogId() { + return TInitExternalCtlMetaRequest_CatalogId_DEFAULT + } + return *p.CatalogId +} + +var TInitExternalCtlMetaRequest_DbId_DEFAULT int64 + +func (p *TInitExternalCtlMetaRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TInitExternalCtlMetaRequest_DbId_DEFAULT + } + return *p.DbId +} + +var TInitExternalCtlMetaRequest_TableId_DEFAULT int64 + +func (p *TInitExternalCtlMetaRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TInitExternalCtlMetaRequest_TableId_DEFAULT + } + return *p.TableId +} +func (p *TInitExternalCtlMetaRequest) SetCatalogId(val *int64) { + p.CatalogId = val +} +func (p *TInitExternalCtlMetaRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TInitExternalCtlMetaRequest) SetTableId(val *int64) { + p.TableId = val +} + +var fieldIDToName_TInitExternalCtlMetaRequest = map[int16]string{ + 1: "catalogId", + 2: "dbId", + 3: "tableId", +} + +func (p *TInitExternalCtlMetaRequest) IsSetCatalogId() bool { + return p.CatalogId != nil +} + +func (p *TInitExternalCtlMetaRequest) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TInitExternalCtlMetaRequest) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TInitExternalCtlMetaRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TInitExternalCtlMetaRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CatalogId = _field + return nil +} +func (p *TInitExternalCtlMetaRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TInitExternalCtlMetaRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} + +func (p *TInitExternalCtlMetaRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TInitExternalCtlMetaRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TInitExternalCtlMetaRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalogId", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CatalogId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TInitExternalCtlMetaRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TInitExternalCtlMetaRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("tableId", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TInitExternalCtlMetaRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TInitExternalCtlMetaRequest(%+v)", *p) + +} + +func (p *TInitExternalCtlMetaRequest) DeepEqual(ano *TInitExternalCtlMetaRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.CatalogId) { + return false + } + if !p.Field2DeepEqual(ano.DbId) { + return false + } + if !p.Field3DeepEqual(ano.TableId) { + return false + } + return true +} + +func (p *TInitExternalCtlMetaRequest) Field1DeepEqual(src *int64) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if *p.CatalogId != *src { + return false + } + return true +} +func (p *TInitExternalCtlMetaRequest) Field2DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TInitExternalCtlMetaRequest) Field3DeepEqual(src *int64) bool { + + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true +} + +type TInitExternalCtlMetaResult_ struct { + MaxJournalId *int64 `thrift:"maxJournalId,1,optional" frugal:"1,optional,i64" json:"maxJournalId,omitempty"` + Status *string `thrift:"status,2,optional" frugal:"2,optional,string" json:"status,omitempty"` +} + +func NewTInitExternalCtlMetaResult_() *TInitExternalCtlMetaResult_ { + return &TInitExternalCtlMetaResult_{} +} + +func (p *TInitExternalCtlMetaResult_) InitDefault() { +} + +var TInitExternalCtlMetaResult__MaxJournalId_DEFAULT int64 + +func (p *TInitExternalCtlMetaResult_) GetMaxJournalId() (v int64) { + if !p.IsSetMaxJournalId() { + return TInitExternalCtlMetaResult__MaxJournalId_DEFAULT + } + return *p.MaxJournalId +} + +var TInitExternalCtlMetaResult__Status_DEFAULT string + +func (p *TInitExternalCtlMetaResult_) GetStatus() (v string) { + if !p.IsSetStatus() { + return TInitExternalCtlMetaResult__Status_DEFAULT + } + return *p.Status +} +func (p *TInitExternalCtlMetaResult_) SetMaxJournalId(val *int64) { + p.MaxJournalId = val +} +func (p *TInitExternalCtlMetaResult_) SetStatus(val *string) { + p.Status = val +} + +var fieldIDToName_TInitExternalCtlMetaResult_ = map[int16]string{ + 1: "maxJournalId", + 2: "status", +} + +func (p *TInitExternalCtlMetaResult_) IsSetMaxJournalId() bool { + return p.MaxJournalId != nil +} + +func (p *TInitExternalCtlMetaResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TInitExternalCtlMetaResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TInitExternalCtlMetaResult_) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.MaxJournalId = _field + return nil +} +func (p *TInitExternalCtlMetaResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Status = _field + return nil +} + +func (p *TInitExternalCtlMetaResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TInitExternalCtlMetaResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TInitExternalCtlMetaResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxJournalId() { + if err = oprot.WriteFieldBegin("maxJournalId", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.MaxJournalId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TInitExternalCtlMetaResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Status); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TInitExternalCtlMetaResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TInitExternalCtlMetaResult_(%+v)", *p) + +} + +func (p *TInitExternalCtlMetaResult_) DeepEqual(ano *TInitExternalCtlMetaResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.MaxJournalId) { + return false + } + if !p.Field2DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TInitExternalCtlMetaResult_) Field1DeepEqual(src *int64) bool { + + if p.MaxJournalId == src { + return true + } else if p.MaxJournalId == nil || src == nil { + return false + } + if *p.MaxJournalId != *src { + return false + } + return true +} +func (p *TInitExternalCtlMetaResult_) Field2DeepEqual(src *string) bool { + + if p.Status == src { + return true + } else if p.Status == nil || src == nil { + return false + } + if strings.Compare(*p.Status, *src) != 0 { + return false + } + return true +} + +type TMetadataTableRequestParams struct { + MetadataType *types.TMetadataType `thrift:"metadata_type,1,optional" frugal:"1,optional,TMetadataType" json:"metadata_type,omitempty"` + IcebergMetadataParams *plannodes.TIcebergMetadataParams `thrift:"iceberg_metadata_params,2,optional" frugal:"2,optional,plannodes.TIcebergMetadataParams" json:"iceberg_metadata_params,omitempty"` + BackendsMetadataParams *plannodes.TBackendsMetadataParams `thrift:"backends_metadata_params,3,optional" frugal:"3,optional,plannodes.TBackendsMetadataParams" json:"backends_metadata_params,omitempty"` + ColumnsName []string `thrift:"columns_name,4,optional" frugal:"4,optional,list" json:"columns_name,omitempty"` + FrontendsMetadataParams *plannodes.TFrontendsMetadataParams `thrift:"frontends_metadata_params,5,optional" frugal:"5,optional,plannodes.TFrontendsMetadataParams" json:"frontends_metadata_params,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,6,optional" frugal:"6,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` + QueriesMetadataParams *plannodes.TQueriesMetadataParams `thrift:"queries_metadata_params,7,optional" frugal:"7,optional,plannodes.TQueriesMetadataParams" json:"queries_metadata_params,omitempty"` + MaterializedViewsMetadataParams *plannodes.TMaterializedViewsMetadataParams `thrift:"materialized_views_metadata_params,8,optional" frugal:"8,optional,plannodes.TMaterializedViewsMetadataParams" json:"materialized_views_metadata_params,omitempty"` + JobsMetadataParams *plannodes.TJobsMetadataParams `thrift:"jobs_metadata_params,9,optional" frugal:"9,optional,plannodes.TJobsMetadataParams" json:"jobs_metadata_params,omitempty"` + TasksMetadataParams *plannodes.TTasksMetadataParams `thrift:"tasks_metadata_params,10,optional" frugal:"10,optional,plannodes.TTasksMetadataParams" json:"tasks_metadata_params,omitempty"` + PartitionsMetadataParams *plannodes.TPartitionsMetadataParams `thrift:"partitions_metadata_params,11,optional" frugal:"11,optional,plannodes.TPartitionsMetadataParams" json:"partitions_metadata_params,omitempty"` + MetaCacheStatsParams *plannodes.TMetaCacheStatsParams `thrift:"meta_cache_stats_params,12,optional" frugal:"12,optional,plannodes.TMetaCacheStatsParams" json:"meta_cache_stats_params,omitempty"` + PartitionValuesMetadataParams *plannodes.TPartitionValuesMetadataParams `thrift:"partition_values_metadata_params,13,optional" frugal:"13,optional,plannodes.TPartitionValuesMetadataParams" json:"partition_values_metadata_params,omitempty"` +} + +func NewTMetadataTableRequestParams() *TMetadataTableRequestParams { + return &TMetadataTableRequestParams{} +} + +func (p *TMetadataTableRequestParams) InitDefault() { +} + +var TMetadataTableRequestParams_MetadataType_DEFAULT types.TMetadataType + +func (p *TMetadataTableRequestParams) GetMetadataType() (v types.TMetadataType) { + if !p.IsSetMetadataType() { + return TMetadataTableRequestParams_MetadataType_DEFAULT + } + return *p.MetadataType +} + +var TMetadataTableRequestParams_IcebergMetadataParams_DEFAULT *plannodes.TIcebergMetadataParams + +func (p *TMetadataTableRequestParams) GetIcebergMetadataParams() (v *plannodes.TIcebergMetadataParams) { + if !p.IsSetIcebergMetadataParams() { + return TMetadataTableRequestParams_IcebergMetadataParams_DEFAULT + } + return p.IcebergMetadataParams +} + +var TMetadataTableRequestParams_BackendsMetadataParams_DEFAULT *plannodes.TBackendsMetadataParams + +func (p *TMetadataTableRequestParams) GetBackendsMetadataParams() (v *plannodes.TBackendsMetadataParams) { + if !p.IsSetBackendsMetadataParams() { + return TMetadataTableRequestParams_BackendsMetadataParams_DEFAULT + } + return p.BackendsMetadataParams +} + +var TMetadataTableRequestParams_ColumnsName_DEFAULT []string + +func (p *TMetadataTableRequestParams) GetColumnsName() (v []string) { + if !p.IsSetColumnsName() { + return TMetadataTableRequestParams_ColumnsName_DEFAULT + } + return p.ColumnsName +} + +var TMetadataTableRequestParams_FrontendsMetadataParams_DEFAULT *plannodes.TFrontendsMetadataParams + +func (p *TMetadataTableRequestParams) GetFrontendsMetadataParams() (v *plannodes.TFrontendsMetadataParams) { + if !p.IsSetFrontendsMetadataParams() { + return TMetadataTableRequestParams_FrontendsMetadataParams_DEFAULT + } + return p.FrontendsMetadataParams +} + +var TMetadataTableRequestParams_CurrentUserIdent_DEFAULT *types.TUserIdentity + +func (p *TMetadataTableRequestParams) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TMetadataTableRequestParams_CurrentUserIdent_DEFAULT + } + return p.CurrentUserIdent +} + +var TMetadataTableRequestParams_QueriesMetadataParams_DEFAULT *plannodes.TQueriesMetadataParams + +func (p *TMetadataTableRequestParams) GetQueriesMetadataParams() (v *plannodes.TQueriesMetadataParams) { + if !p.IsSetQueriesMetadataParams() { + return TMetadataTableRequestParams_QueriesMetadataParams_DEFAULT + } + return p.QueriesMetadataParams +} + +var TMetadataTableRequestParams_MaterializedViewsMetadataParams_DEFAULT *plannodes.TMaterializedViewsMetadataParams + +func (p *TMetadataTableRequestParams) GetMaterializedViewsMetadataParams() (v *plannodes.TMaterializedViewsMetadataParams) { + if !p.IsSetMaterializedViewsMetadataParams() { + return TMetadataTableRequestParams_MaterializedViewsMetadataParams_DEFAULT + } + return p.MaterializedViewsMetadataParams +} + +var TMetadataTableRequestParams_JobsMetadataParams_DEFAULT *plannodes.TJobsMetadataParams + +func (p *TMetadataTableRequestParams) GetJobsMetadataParams() (v *plannodes.TJobsMetadataParams) { + if !p.IsSetJobsMetadataParams() { + return TMetadataTableRequestParams_JobsMetadataParams_DEFAULT + } + return p.JobsMetadataParams +} + +var TMetadataTableRequestParams_TasksMetadataParams_DEFAULT *plannodes.TTasksMetadataParams + +func (p *TMetadataTableRequestParams) GetTasksMetadataParams() (v *plannodes.TTasksMetadataParams) { + if !p.IsSetTasksMetadataParams() { + return TMetadataTableRequestParams_TasksMetadataParams_DEFAULT + } + return p.TasksMetadataParams +} + +var TMetadataTableRequestParams_PartitionsMetadataParams_DEFAULT *plannodes.TPartitionsMetadataParams + +func (p *TMetadataTableRequestParams) GetPartitionsMetadataParams() (v *plannodes.TPartitionsMetadataParams) { + if !p.IsSetPartitionsMetadataParams() { + return TMetadataTableRequestParams_PartitionsMetadataParams_DEFAULT + } + return p.PartitionsMetadataParams +} + +var TMetadataTableRequestParams_MetaCacheStatsParams_DEFAULT *plannodes.TMetaCacheStatsParams + +func (p *TMetadataTableRequestParams) GetMetaCacheStatsParams() (v *plannodes.TMetaCacheStatsParams) { + if !p.IsSetMetaCacheStatsParams() { + return TMetadataTableRequestParams_MetaCacheStatsParams_DEFAULT + } + return p.MetaCacheStatsParams +} + +var TMetadataTableRequestParams_PartitionValuesMetadataParams_DEFAULT *plannodes.TPartitionValuesMetadataParams + +func (p *TMetadataTableRequestParams) GetPartitionValuesMetadataParams() (v *plannodes.TPartitionValuesMetadataParams) { + if !p.IsSetPartitionValuesMetadataParams() { + return TMetadataTableRequestParams_PartitionValuesMetadataParams_DEFAULT + } + return p.PartitionValuesMetadataParams +} +func (p *TMetadataTableRequestParams) SetMetadataType(val *types.TMetadataType) { + p.MetadataType = val +} +func (p *TMetadataTableRequestParams) SetIcebergMetadataParams(val *plannodes.TIcebergMetadataParams) { + p.IcebergMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetBackendsMetadataParams(val *plannodes.TBackendsMetadataParams) { + p.BackendsMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetColumnsName(val []string) { + p.ColumnsName = val +} +func (p *TMetadataTableRequestParams) SetFrontendsMetadataParams(val *plannodes.TFrontendsMetadataParams) { + p.FrontendsMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val +} +func (p *TMetadataTableRequestParams) SetQueriesMetadataParams(val *plannodes.TQueriesMetadataParams) { + p.QueriesMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetMaterializedViewsMetadataParams(val *plannodes.TMaterializedViewsMetadataParams) { + p.MaterializedViewsMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetJobsMetadataParams(val *plannodes.TJobsMetadataParams) { + p.JobsMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetTasksMetadataParams(val *plannodes.TTasksMetadataParams) { + p.TasksMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetPartitionsMetadataParams(val *plannodes.TPartitionsMetadataParams) { + p.PartitionsMetadataParams = val +} +func (p *TMetadataTableRequestParams) SetMetaCacheStatsParams(val *plannodes.TMetaCacheStatsParams) { + p.MetaCacheStatsParams = val +} +func (p *TMetadataTableRequestParams) SetPartitionValuesMetadataParams(val *plannodes.TPartitionValuesMetadataParams) { + p.PartitionValuesMetadataParams = val +} + +var fieldIDToName_TMetadataTableRequestParams = map[int16]string{ + 1: "metadata_type", + 2: "iceberg_metadata_params", + 3: "backends_metadata_params", + 4: "columns_name", + 5: "frontends_metadata_params", + 6: "current_user_ident", + 7: "queries_metadata_params", + 8: "materialized_views_metadata_params", + 9: "jobs_metadata_params", + 10: "tasks_metadata_params", + 11: "partitions_metadata_params", + 12: "meta_cache_stats_params", + 13: "partition_values_metadata_params", +} + +func (p *TMetadataTableRequestParams) IsSetMetadataType() bool { + return p.MetadataType != nil +} + +func (p *TMetadataTableRequestParams) IsSetIcebergMetadataParams() bool { + return p.IcebergMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetBackendsMetadataParams() bool { + return p.BackendsMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetColumnsName() bool { + return p.ColumnsName != nil +} + +func (p *TMetadataTableRequestParams) IsSetFrontendsMetadataParams() bool { + return p.FrontendsMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil +} + +func (p *TMetadataTableRequestParams) IsSetQueriesMetadataParams() bool { + return p.QueriesMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetMaterializedViewsMetadataParams() bool { + return p.MaterializedViewsMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetJobsMetadataParams() bool { + return p.JobsMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetTasksMetadataParams() bool { + return p.TasksMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetPartitionsMetadataParams() bool { + return p.PartitionsMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetMetaCacheStatsParams() bool { + return p.MetaCacheStatsParams != nil +} + +func (p *TMetadataTableRequestParams) IsSetPartitionValuesMetadataParams() bool { + return p.PartitionValuesMetadataParams != nil +} + +func (p *TMetadataTableRequestParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMetadataTableRequestParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TMetadataType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TMetadataType(v) + _field = &tmp + } + p.MetadataType = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField2(iprot thrift.TProtocol) error { + _field := plannodes.NewTIcebergMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.IcebergMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField3(iprot thrift.TProtocol) error { + _field := plannodes.NewTBackendsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.BackendsMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ColumnsName = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField5(iprot thrift.TProtocol) error { + _field := plannodes.NewTFrontendsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.FrontendsMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField6(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { + return err + } + p.CurrentUserIdent = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField7(iprot thrift.TProtocol) error { + _field := plannodes.NewTQueriesMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueriesMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField8(iprot thrift.TProtocol) error { + _field := plannodes.NewTMaterializedViewsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.MaterializedViewsMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField9(iprot thrift.TProtocol) error { + _field := plannodes.NewTJobsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.JobsMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField10(iprot thrift.TProtocol) error { + _field := plannodes.NewTTasksMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.TasksMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField11(iprot thrift.TProtocol) error { + _field := plannodes.NewTPartitionsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.PartitionsMetadataParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField12(iprot thrift.TProtocol) error { + _field := plannodes.NewTMetaCacheStatsParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.MetaCacheStatsParams = _field + return nil +} +func (p *TMetadataTableRequestParams) ReadField13(iprot thrift.TProtocol) error { + _field := plannodes.NewTPartitionValuesMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.PartitionValuesMetadataParams = _field + return nil +} + +func (p *TMetadataTableRequestParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TMetadataTableRequestParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetMetadataType() { + if err = oprot.WriteFieldBegin("metadata_type", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.MetadataType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIcebergMetadataParams() { + if err = oprot.WriteFieldBegin("iceberg_metadata_params", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.IcebergMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendsMetadataParams() { + if err = oprot.WriteFieldBegin("backends_metadata_params", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.BackendsMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnsName() { + if err = oprot.WriteFieldBegin("columns_name", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsName)); err != nil { + return err + } + for _, v := range p.ColumnsName { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetFrontendsMetadataParams() { + if err = oprot.WriteFieldBegin("frontends_metadata_params", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.FrontendsMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.CurrentUserIdent.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetQueriesMetadataParams() { + if err = oprot.WriteFieldBegin("queries_metadata_params", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.QueriesMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetMaterializedViewsMetadataParams() { + if err = oprot.WriteFieldBegin("materialized_views_metadata_params", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.MaterializedViewsMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetJobsMetadataParams() { + if err = oprot.WriteFieldBegin("jobs_metadata_params", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.JobsMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTasksMetadataParams() { + if err = oprot.WriteFieldBegin("tasks_metadata_params", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.TasksMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionsMetadataParams() { + if err = oprot.WriteFieldBegin("partitions_metadata_params", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.PartitionsMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetMetaCacheStatsParams() { + if err = oprot.WriteFieldBegin("meta_cache_stats_params", thrift.STRUCT, 12); err != nil { + goto WriteFieldBeginError + } + if err := p.MetaCacheStatsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionValuesMetadataParams() { + if err = oprot.WriteFieldBegin("partition_values_metadata_params", thrift.STRUCT, 13); err != nil { + goto WriteFieldBeginError + } + if err := p.PartitionValuesMetadataParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TMetadataTableRequestParams(%+v)", *p) + +} + +func (p *TMetadataTableRequestParams) DeepEqual(ano *TMetadataTableRequestParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.MetadataType) { + return false + } + if !p.Field2DeepEqual(ano.IcebergMetadataParams) { + return false + } + if !p.Field3DeepEqual(ano.BackendsMetadataParams) { + return false + } + if !p.Field4DeepEqual(ano.ColumnsName) { + return false + } + if !p.Field5DeepEqual(ano.FrontendsMetadataParams) { + return false + } + if !p.Field6DeepEqual(ano.CurrentUserIdent) { + return false + } + if !p.Field7DeepEqual(ano.QueriesMetadataParams) { + return false + } + if !p.Field8DeepEqual(ano.MaterializedViewsMetadataParams) { + return false + } + if !p.Field9DeepEqual(ano.JobsMetadataParams) { + return false + } + if !p.Field10DeepEqual(ano.TasksMetadataParams) { + return false + } + if !p.Field11DeepEqual(ano.PartitionsMetadataParams) { + return false + } + if !p.Field12DeepEqual(ano.MetaCacheStatsParams) { + return false + } + if !p.Field13DeepEqual(ano.PartitionValuesMetadataParams) { + return false + } + return true +} + +func (p *TMetadataTableRequestParams) Field1DeepEqual(src *types.TMetadataType) bool { + + if p.MetadataType == src { + return true + } else if p.MetadataType == nil || src == nil { + return false + } + if *p.MetadataType != *src { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field2DeepEqual(src *plannodes.TIcebergMetadataParams) bool { + + if !p.IcebergMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field3DeepEqual(src *plannodes.TBackendsMetadataParams) bool { + + if !p.BackendsMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field4DeepEqual(src []string) bool { + + if len(p.ColumnsName) != len(src) { + return false + } + for i, v := range p.ColumnsName { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TMetadataTableRequestParams) Field5DeepEqual(src *plannodes.TFrontendsMetadataParams) bool { + + if !p.FrontendsMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field6DeepEqual(src *types.TUserIdentity) bool { + + if !p.CurrentUserIdent.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field7DeepEqual(src *plannodes.TQueriesMetadataParams) bool { + + if !p.QueriesMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field8DeepEqual(src *plannodes.TMaterializedViewsMetadataParams) bool { + + if !p.MaterializedViewsMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field9DeepEqual(src *plannodes.TJobsMetadataParams) bool { + + if !p.JobsMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field10DeepEqual(src *plannodes.TTasksMetadataParams) bool { + + if !p.TasksMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field11DeepEqual(src *plannodes.TPartitionsMetadataParams) bool { + + if !p.PartitionsMetadataParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field12DeepEqual(src *plannodes.TMetaCacheStatsParams) bool { + + if !p.MetaCacheStatsParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetadataTableRequestParams) Field13DeepEqual(src *plannodes.TPartitionValuesMetadataParams) bool { + + if !p.PartitionValuesMetadataParams.DeepEqual(src) { + return false + } + return true +} + +type TSchemaTableRequestParams struct { + ColumnsName []string `thrift:"columns_name,1,optional" frugal:"1,optional,list" json:"columns_name,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,2,optional" frugal:"2,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` + ReplayToOtherFe *bool `thrift:"replay_to_other_fe,3,optional" frugal:"3,optional,bool" json:"replay_to_other_fe,omitempty"` + Catalog *string `thrift:"catalog,4,optional" frugal:"4,optional,string" json:"catalog,omitempty"` + DbId *int64 `thrift:"dbId,5,optional" frugal:"5,optional,i64" json:"dbId,omitempty"` +} + +func NewTSchemaTableRequestParams() *TSchemaTableRequestParams { + return &TSchemaTableRequestParams{} +} + +func (p *TSchemaTableRequestParams) InitDefault() { +} + +var TSchemaTableRequestParams_ColumnsName_DEFAULT []string + +func (p *TSchemaTableRequestParams) GetColumnsName() (v []string) { + if !p.IsSetColumnsName() { + return TSchemaTableRequestParams_ColumnsName_DEFAULT + } + return p.ColumnsName +} + +var TSchemaTableRequestParams_CurrentUserIdent_DEFAULT *types.TUserIdentity + +func (p *TSchemaTableRequestParams) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TSchemaTableRequestParams_CurrentUserIdent_DEFAULT + } + return p.CurrentUserIdent +} + +var TSchemaTableRequestParams_ReplayToOtherFe_DEFAULT bool + +func (p *TSchemaTableRequestParams) GetReplayToOtherFe() (v bool) { + if !p.IsSetReplayToOtherFe() { + return TSchemaTableRequestParams_ReplayToOtherFe_DEFAULT + } + return *p.ReplayToOtherFe +} + +var TSchemaTableRequestParams_Catalog_DEFAULT string + +func (p *TSchemaTableRequestParams) GetCatalog() (v string) { + if !p.IsSetCatalog() { + return TSchemaTableRequestParams_Catalog_DEFAULT + } + return *p.Catalog +} + +var TSchemaTableRequestParams_DbId_DEFAULT int64 + +func (p *TSchemaTableRequestParams) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TSchemaTableRequestParams_DbId_DEFAULT + } + return *p.DbId +} +func (p *TSchemaTableRequestParams) SetColumnsName(val []string) { + p.ColumnsName = val +} +func (p *TSchemaTableRequestParams) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val +} +func (p *TSchemaTableRequestParams) SetReplayToOtherFe(val *bool) { + p.ReplayToOtherFe = val +} +func (p *TSchemaTableRequestParams) SetCatalog(val *string) { + p.Catalog = val +} +func (p *TSchemaTableRequestParams) SetDbId(val *int64) { + p.DbId = val +} + +var fieldIDToName_TSchemaTableRequestParams = map[int16]string{ + 1: "columns_name", + 2: "current_user_ident", + 3: "replay_to_other_fe", + 4: "catalog", + 5: "dbId", +} + +func (p *TSchemaTableRequestParams) IsSetColumnsName() bool { + return p.ColumnsName != nil +} + +func (p *TSchemaTableRequestParams) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil +} + +func (p *TSchemaTableRequestParams) IsSetReplayToOtherFe() bool { + return p.ReplayToOtherFe != nil +} + +func (p *TSchemaTableRequestParams) IsSetCatalog() bool { + return p.Catalog != nil +} + +func (p *TSchemaTableRequestParams) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TSchemaTableRequestParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSchemaTableRequestParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ColumnsName = _field + return nil +} +func (p *TSchemaTableRequestParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { + return err + } + p.CurrentUserIdent = _field + return nil +} +func (p *TSchemaTableRequestParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ReplayToOtherFe = _field + return nil +} +func (p *TSchemaTableRequestParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Catalog = _field + return nil +} +func (p *TSchemaTableRequestParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} + +func (p *TSchemaTableRequestParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSchemaTableRequestParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnsName() { + if err = oprot.WriteFieldBegin("columns_name", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsName)); err != nil { + return err + } + for _, v := range p.ColumnsName { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.CurrentUserIdent.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetReplayToOtherFe() { + if err = oprot.WriteFieldBegin("replay_to_other_fe", thrift.BOOL, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.ReplayToOtherFe); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalog() { + if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Catalog); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSchemaTableRequestParams(%+v)", *p) + +} + +func (p *TSchemaTableRequestParams) DeepEqual(ano *TSchemaTableRequestParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ColumnsName) { + return false + } + if !p.Field2DeepEqual(ano.CurrentUserIdent) { + return false + } + if !p.Field3DeepEqual(ano.ReplayToOtherFe) { + return false + } + if !p.Field4DeepEqual(ano.Catalog) { + return false + } + if !p.Field5DeepEqual(ano.DbId) { + return false + } + return true +} + +func (p *TSchemaTableRequestParams) Field1DeepEqual(src []string) bool { + + if len(p.ColumnsName) != len(src) { + return false + } + for i, v := range p.ColumnsName { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TSchemaTableRequestParams) Field2DeepEqual(src *types.TUserIdentity) bool { + + if !p.CurrentUserIdent.DeepEqual(src) { + return false + } + return true +} +func (p *TSchemaTableRequestParams) Field3DeepEqual(src *bool) bool { + + if p.ReplayToOtherFe == src { + return true + } else if p.ReplayToOtherFe == nil || src == nil { + return false + } + if *p.ReplayToOtherFe != *src { + return false + } + return true +} +func (p *TSchemaTableRequestParams) Field4DeepEqual(src *string) bool { + + if p.Catalog == src { + return true + } else if p.Catalog == nil || src == nil { + return false + } + if strings.Compare(*p.Catalog, *src) != 0 { + return false + } + return true +} +func (p *TSchemaTableRequestParams) Field5DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} + +type TFetchSchemaTableDataRequest struct { + ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` + SchemaTableName *TSchemaTableName `thrift:"schema_table_name,2,optional" frugal:"2,optional,TSchemaTableName" json:"schema_table_name,omitempty"` + MetadaTableParams *TMetadataTableRequestParams `thrift:"metada_table_params,3,optional" frugal:"3,optional,TMetadataTableRequestParams" json:"metada_table_params,omitempty"` + SchemaTableParams *TSchemaTableRequestParams `thrift:"schema_table_params,4,optional" frugal:"4,optional,TSchemaTableRequestParams" json:"schema_table_params,omitempty"` +} + +func NewTFetchSchemaTableDataRequest() *TFetchSchemaTableDataRequest { + return &TFetchSchemaTableDataRequest{} +} + +func (p *TFetchSchemaTableDataRequest) InitDefault() { +} + +var TFetchSchemaTableDataRequest_ClusterName_DEFAULT string + +func (p *TFetchSchemaTableDataRequest) GetClusterName() (v string) { + if !p.IsSetClusterName() { + return TFetchSchemaTableDataRequest_ClusterName_DEFAULT + } + return *p.ClusterName +} + +var TFetchSchemaTableDataRequest_SchemaTableName_DEFAULT TSchemaTableName + +func (p *TFetchSchemaTableDataRequest) GetSchemaTableName() (v TSchemaTableName) { + if !p.IsSetSchemaTableName() { + return TFetchSchemaTableDataRequest_SchemaTableName_DEFAULT + } + return *p.SchemaTableName +} + +var TFetchSchemaTableDataRequest_MetadaTableParams_DEFAULT *TMetadataTableRequestParams + +func (p *TFetchSchemaTableDataRequest) GetMetadaTableParams() (v *TMetadataTableRequestParams) { + if !p.IsSetMetadaTableParams() { + return TFetchSchemaTableDataRequest_MetadaTableParams_DEFAULT + } + return p.MetadaTableParams +} + +var TFetchSchemaTableDataRequest_SchemaTableParams_DEFAULT *TSchemaTableRequestParams + +func (p *TFetchSchemaTableDataRequest) GetSchemaTableParams() (v *TSchemaTableRequestParams) { + if !p.IsSetSchemaTableParams() { + return TFetchSchemaTableDataRequest_SchemaTableParams_DEFAULT + } + return p.SchemaTableParams +} +func (p *TFetchSchemaTableDataRequest) SetClusterName(val *string) { + p.ClusterName = val +} +func (p *TFetchSchemaTableDataRequest) SetSchemaTableName(val *TSchemaTableName) { + p.SchemaTableName = val +} +func (p *TFetchSchemaTableDataRequest) SetMetadaTableParams(val *TMetadataTableRequestParams) { + p.MetadaTableParams = val +} +func (p *TFetchSchemaTableDataRequest) SetSchemaTableParams(val *TSchemaTableRequestParams) { + p.SchemaTableParams = val +} + +var fieldIDToName_TFetchSchemaTableDataRequest = map[int16]string{ + 1: "cluster_name", + 2: "schema_table_name", + 3: "metada_table_params", + 4: "schema_table_params", +} + +func (p *TFetchSchemaTableDataRequest) IsSetClusterName() bool { + return p.ClusterName != nil +} + +func (p *TFetchSchemaTableDataRequest) IsSetSchemaTableName() bool { + return p.SchemaTableName != nil +} + +func (p *TFetchSchemaTableDataRequest) IsSetMetadaTableParams() bool { + return p.MetadaTableParams != nil +} + +func (p *TFetchSchemaTableDataRequest) IsSetSchemaTableParams() bool { + return p.SchemaTableParams != nil +} + +func (p *TFetchSchemaTableDataRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ClusterName = _field + return nil +} +func (p *TFetchSchemaTableDataRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *TSchemaTableName + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TSchemaTableName(v) + _field = &tmp + } + p.SchemaTableName = _field + return nil +} +func (p *TFetchSchemaTableDataRequest) ReadField3(iprot thrift.TProtocol) error { + _field := NewTMetadataTableRequestParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.MetadaTableParams = _field + return nil +} +func (p *TFetchSchemaTableDataRequest) ReadField4(iprot thrift.TProtocol) error { + _field := NewTSchemaTableRequestParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.SchemaTableParams = _field + return nil +} + +func (p *TFetchSchemaTableDataRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFetchSchemaTableDataRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetClusterName() { + if err = oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ClusterName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaTableName() { + if err = oprot.WriteFieldBegin("schema_table_name", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.SchemaTableName)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMetadaTableParams() { + if err = oprot.WriteFieldBegin("metada_table_params", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.MetadaTableParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaTableParams() { + if err = oprot.WriteFieldBegin("schema_table_params", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.SchemaTableParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFetchSchemaTableDataRequest(%+v)", *p) + +} + +func (p *TFetchSchemaTableDataRequest) DeepEqual(ano *TFetchSchemaTableDataRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ClusterName) { + return false + } + if !p.Field2DeepEqual(ano.SchemaTableName) { + return false + } + if !p.Field3DeepEqual(ano.MetadaTableParams) { + return false + } + if !p.Field4DeepEqual(ano.SchemaTableParams) { + return false + } + return true +} + +func (p *TFetchSchemaTableDataRequest) Field1DeepEqual(src *string) bool { + + if p.ClusterName == src { + return true + } else if p.ClusterName == nil || src == nil { + return false + } + if strings.Compare(*p.ClusterName, *src) != 0 { + return false + } + return true +} +func (p *TFetchSchemaTableDataRequest) Field2DeepEqual(src *TSchemaTableName) bool { + + if p.SchemaTableName == src { + return true + } else if p.SchemaTableName == nil || src == nil { + return false + } + if *p.SchemaTableName != *src { + return false + } + return true +} +func (p *TFetchSchemaTableDataRequest) Field3DeepEqual(src *TMetadataTableRequestParams) bool { + + if !p.MetadaTableParams.DeepEqual(src) { + return false + } + return true +} +func (p *TFetchSchemaTableDataRequest) Field4DeepEqual(src *TSchemaTableRequestParams) bool { + + if !p.SchemaTableParams.DeepEqual(src) { + return false + } + return true +} + +type TFetchSchemaTableDataResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + DataBatch []*data.TRow `thrift:"data_batch,2,optional" frugal:"2,optional,list" json:"data_batch,omitempty"` +} + +func NewTFetchSchemaTableDataResult_() *TFetchSchemaTableDataResult_ { + return &TFetchSchemaTableDataResult_{} +} + +func (p *TFetchSchemaTableDataResult_) InitDefault() { +} + +var TFetchSchemaTableDataResult__Status_DEFAULT *status.TStatus + +func (p *TFetchSchemaTableDataResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TFetchSchemaTableDataResult__Status_DEFAULT + } + return p.Status +} + +var TFetchSchemaTableDataResult__DataBatch_DEFAULT []*data.TRow + +func (p *TFetchSchemaTableDataResult_) GetDataBatch() (v []*data.TRow) { + if !p.IsSetDataBatch() { + return TFetchSchemaTableDataResult__DataBatch_DEFAULT + } + return p.DataBatch +} +func (p *TFetchSchemaTableDataResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TFetchSchemaTableDataResult_) SetDataBatch(val []*data.TRow) { + p.DataBatch = val +} + +var fieldIDToName_TFetchSchemaTableDataResult_ = map[int16]string{ + 1: "status", + 2: "data_batch", +} + +func (p *TFetchSchemaTableDataResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TFetchSchemaTableDataResult_) IsSetDataBatch() bool { + return p.DataBatch != nil +} + +func (p *TFetchSchemaTableDataResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchSchemaTableDataResult_[fieldId])) +} + +func (p *TFetchSchemaTableDataResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TFetchSchemaTableDataResult_) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*data.TRow, 0, size) + values := make([]data.TRow, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DataBatch = _field + return nil +} + +func (p *TFetchSchemaTableDataResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFetchSchemaTableDataResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFetchSchemaTableDataResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFetchSchemaTableDataResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDataBatch() { + if err = oprot.WriteFieldBegin("data_batch", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DataBatch)); err != nil { + return err + } + for _, v := range p.DataBatch { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFetchSchemaTableDataResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFetchSchemaTableDataResult_(%+v)", *p) + +} + +func (p *TFetchSchemaTableDataResult_) DeepEqual(ano *TFetchSchemaTableDataResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.DataBatch) { + return false + } + return true +} + +func (p *TFetchSchemaTableDataResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TFetchSchemaTableDataResult_) Field2DeepEqual(src []*data.TRow) bool { + + if len(p.DataBatch) != len(src) { + return false + } + for i, v := range p.DataBatch { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TMySqlLoadAcquireTokenResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Token *string `thrift:"token,2,optional" frugal:"2,optional,string" json:"token,omitempty"` +} + +func NewTMySqlLoadAcquireTokenResult_() *TMySqlLoadAcquireTokenResult_ { + return &TMySqlLoadAcquireTokenResult_{} +} + +func (p *TMySqlLoadAcquireTokenResult_) InitDefault() { +} + +var TMySqlLoadAcquireTokenResult__Status_DEFAULT *status.TStatus + +func (p *TMySqlLoadAcquireTokenResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TMySqlLoadAcquireTokenResult__Status_DEFAULT + } + return p.Status +} + +var TMySqlLoadAcquireTokenResult__Token_DEFAULT string + +func (p *TMySqlLoadAcquireTokenResult_) GetToken() (v string) { + if !p.IsSetToken() { + return TMySqlLoadAcquireTokenResult__Token_DEFAULT + } + return *p.Token +} +func (p *TMySqlLoadAcquireTokenResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TMySqlLoadAcquireTokenResult_) SetToken(val *string) { + p.Token = val +} + +var fieldIDToName_TMySqlLoadAcquireTokenResult_ = map[int16]string{ + 1: "status", + 2: "token", +} + +func (p *TMySqlLoadAcquireTokenResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TMySqlLoadAcquireTokenResult_) IsSetToken() bool { + return p.Token != nil +} + +func (p *TMySqlLoadAcquireTokenResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMySqlLoadAcquireTokenResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TMySqlLoadAcquireTokenResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TMySqlLoadAcquireTokenResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} + +func (p *TMySqlLoadAcquireTokenResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TMySqlLoadAcquireTokenResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TMySqlLoadAcquireTokenResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TMySqlLoadAcquireTokenResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TMySqlLoadAcquireTokenResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TMySqlLoadAcquireTokenResult_(%+v)", *p) + +} + +func (p *TMySqlLoadAcquireTokenResult_) DeepEqual(ano *TMySqlLoadAcquireTokenResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Token) { + return false + } + return true +} + +func (p *TMySqlLoadAcquireTokenResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TMySqlLoadAcquireTokenResult_) Field2DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} + +type TTabletCooldownInfo struct { + TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"` + CooldownReplicaId *types.TReplicaId `thrift:"cooldown_replica_id,2,optional" frugal:"2,optional,i64" json:"cooldown_replica_id,omitempty"` + CooldownMetaId *types.TUniqueId `thrift:"cooldown_meta_id,3,optional" frugal:"3,optional,types.TUniqueId" json:"cooldown_meta_id,omitempty"` +} + +func NewTTabletCooldownInfo() *TTabletCooldownInfo { + return &TTabletCooldownInfo{} +} + +func (p *TTabletCooldownInfo) InitDefault() { +} + +var TTabletCooldownInfo_TabletId_DEFAULT types.TTabletId + +func (p *TTabletCooldownInfo) GetTabletId() (v types.TTabletId) { + if !p.IsSetTabletId() { + return TTabletCooldownInfo_TabletId_DEFAULT + } + return *p.TabletId +} + +var TTabletCooldownInfo_CooldownReplicaId_DEFAULT types.TReplicaId + +func (p *TTabletCooldownInfo) GetCooldownReplicaId() (v types.TReplicaId) { + if !p.IsSetCooldownReplicaId() { + return TTabletCooldownInfo_CooldownReplicaId_DEFAULT + } + return *p.CooldownReplicaId +} + +var TTabletCooldownInfo_CooldownMetaId_DEFAULT *types.TUniqueId + +func (p *TTabletCooldownInfo) GetCooldownMetaId() (v *types.TUniqueId) { + if !p.IsSetCooldownMetaId() { + return TTabletCooldownInfo_CooldownMetaId_DEFAULT + } + return p.CooldownMetaId +} +func (p *TTabletCooldownInfo) SetTabletId(val *types.TTabletId) { + p.TabletId = val +} +func (p *TTabletCooldownInfo) SetCooldownReplicaId(val *types.TReplicaId) { + p.CooldownReplicaId = val +} +func (p *TTabletCooldownInfo) SetCooldownMetaId(val *types.TUniqueId) { + p.CooldownMetaId = val +} + +var fieldIDToName_TTabletCooldownInfo = map[int16]string{ + 1: "tablet_id", + 2: "cooldown_replica_id", + 3: "cooldown_meta_id", +} + +func (p *TTabletCooldownInfo) IsSetTabletId() bool { + return p.TabletId != nil +} + +func (p *TTabletCooldownInfo) IsSetCooldownReplicaId() bool { + return p.CooldownReplicaId != nil +} + +func (p *TTabletCooldownInfo) IsSetCooldownMetaId() bool { + return p.CooldownMetaId != nil +} + +func (p *TTabletCooldownInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletCooldownInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTabletCooldownInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTabletId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TabletId = _field + return nil +} +func (p *TTabletCooldownInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TReplicaId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CooldownReplicaId = _field + return nil +} +func (p *TTabletCooldownInfo) ReadField3(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.CooldownMetaId = _field + return nil +} + +func (p *TTabletCooldownInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTabletCooldownInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTabletCooldownInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletId() { + if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTabletCooldownInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCooldownReplicaId() { + if err = oprot.WriteFieldBegin("cooldown_replica_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CooldownReplicaId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTabletCooldownInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCooldownMetaId() { + if err = oprot.WriteFieldBegin("cooldown_meta_id", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.CooldownMetaId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TTabletCooldownInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTabletCooldownInfo(%+v)", *p) + +} + +func (p *TTabletCooldownInfo) DeepEqual(ano *TTabletCooldownInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TabletId) { + return false + } + if !p.Field2DeepEqual(ano.CooldownReplicaId) { + return false + } + if !p.Field3DeepEqual(ano.CooldownMetaId) { + return false + } + return true +} + +func (p *TTabletCooldownInfo) Field1DeepEqual(src *types.TTabletId) bool { + + if p.TabletId == src { + return true + } else if p.TabletId == nil || src == nil { + return false + } + if *p.TabletId != *src { + return false + } + return true +} +func (p *TTabletCooldownInfo) Field2DeepEqual(src *types.TReplicaId) bool { + + if p.CooldownReplicaId == src { + return true + } else if p.CooldownReplicaId == nil || src == nil { + return false + } + if *p.CooldownReplicaId != *src { + return false + } + return true +} +func (p *TTabletCooldownInfo) Field3DeepEqual(src *types.TUniqueId) bool { + + if !p.CooldownMetaId.DeepEqual(src) { + return false + } + return true +} + +type TConfirmUnusedRemoteFilesRequest struct { + ConfirmList []*TTabletCooldownInfo `thrift:"confirm_list,1,optional" frugal:"1,optional,list" json:"confirm_list,omitempty"` +} + +func NewTConfirmUnusedRemoteFilesRequest() *TConfirmUnusedRemoteFilesRequest { + return &TConfirmUnusedRemoteFilesRequest{} +} + +func (p *TConfirmUnusedRemoteFilesRequest) InitDefault() { +} + +var TConfirmUnusedRemoteFilesRequest_ConfirmList_DEFAULT []*TTabletCooldownInfo + +func (p *TConfirmUnusedRemoteFilesRequest) GetConfirmList() (v []*TTabletCooldownInfo) { + if !p.IsSetConfirmList() { + return TConfirmUnusedRemoteFilesRequest_ConfirmList_DEFAULT + } + return p.ConfirmList +} +func (p *TConfirmUnusedRemoteFilesRequest) SetConfirmList(val []*TTabletCooldownInfo) { + p.ConfirmList = val +} + +var fieldIDToName_TConfirmUnusedRemoteFilesRequest = map[int16]string{ + 1: "confirm_list", +} + +func (p *TConfirmUnusedRemoteFilesRequest) IsSetConfirmList() bool { + return p.ConfirmList != nil +} + +func (p *TConfirmUnusedRemoteFilesRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTabletCooldownInfo, 0, size) + values := make([]TTabletCooldownInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ConfirmList = _field + return nil +} + +func (p *TConfirmUnusedRemoteFilesRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TConfirmUnusedRemoteFilesRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetConfirmList() { + if err = oprot.WriteFieldBegin("confirm_list", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ConfirmList)); err != nil { + return err + } + for _, v := range p.ConfirmList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TConfirmUnusedRemoteFilesRequest(%+v)", *p) + +} + +func (p *TConfirmUnusedRemoteFilesRequest) DeepEqual(ano *TConfirmUnusedRemoteFilesRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ConfirmList) { + return false + } + return true +} + +func (p *TConfirmUnusedRemoteFilesRequest) Field1DeepEqual(src []*TTabletCooldownInfo) bool { + + if len(p.ConfirmList) != len(src) { + return false + } + for i, v := range p.ConfirmList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TConfirmUnusedRemoteFilesResult_ struct { + ConfirmedTablets []types.TTabletId `thrift:"confirmed_tablets,1,optional" frugal:"1,optional,list" json:"confirmed_tablets,omitempty"` +} + +func NewTConfirmUnusedRemoteFilesResult_() *TConfirmUnusedRemoteFilesResult_ { + return &TConfirmUnusedRemoteFilesResult_{} +} + +func (p *TConfirmUnusedRemoteFilesResult_) InitDefault() { +} + +var TConfirmUnusedRemoteFilesResult__ConfirmedTablets_DEFAULT []types.TTabletId + +func (p *TConfirmUnusedRemoteFilesResult_) GetConfirmedTablets() (v []types.TTabletId) { + if !p.IsSetConfirmedTablets() { + return TConfirmUnusedRemoteFilesResult__ConfirmedTablets_DEFAULT + } + return p.ConfirmedTablets +} +func (p *TConfirmUnusedRemoteFilesResult_) SetConfirmedTablets(val []types.TTabletId) { + p.ConfirmedTablets = val +} + +var fieldIDToName_TConfirmUnusedRemoteFilesResult_ = map[int16]string{ + 1: "confirmed_tablets", +} + +func (p *TConfirmUnusedRemoteFilesResult_) IsSetConfirmedTablets() bool { + return p.ConfirmedTablets != nil +} + +func (p *TConfirmUnusedRemoteFilesResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesResult_) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]types.TTabletId, 0, size) + for i := 0; i < size; i++ { + + var _elem types.TTabletId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ConfirmedTablets = _field + return nil +} + +func (p *TConfirmUnusedRemoteFilesResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TConfirmUnusedRemoteFilesResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetConfirmedTablets() { + if err = oprot.WriteFieldBegin("confirmed_tablets", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.ConfirmedTablets)); err != nil { + return err + } + for _, v := range p.ConfirmedTablets { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TConfirmUnusedRemoteFilesResult_(%+v)", *p) + +} + +func (p *TConfirmUnusedRemoteFilesResult_) DeepEqual(ano *TConfirmUnusedRemoteFilesResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ConfirmedTablets) { + return false + } + return true +} + +func (p *TConfirmUnusedRemoteFilesResult_) Field1DeepEqual(src []types.TTabletId) bool { + + if len(p.ConfirmedTablets) != len(src) { + return false + } + for i, v := range p.ConfirmedTablets { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TPrivilegeCtrl struct { + PrivHier TPrivilegeHier `thrift:"priv_hier,1,required" frugal:"1,required,TPrivilegeHier" json:"priv_hier"` + Ctl *string `thrift:"ctl,2,optional" frugal:"2,optional,string" json:"ctl,omitempty"` + Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` + Tbl *string `thrift:"tbl,4,optional" frugal:"4,optional,string" json:"tbl,omitempty"` + Cols []string `thrift:"cols,5,optional" frugal:"5,optional,set" json:"cols,omitempty"` + Res *string `thrift:"res,6,optional" frugal:"6,optional,string" json:"res,omitempty"` +} + +func NewTPrivilegeCtrl() *TPrivilegeCtrl { + return &TPrivilegeCtrl{} +} + +func (p *TPrivilegeCtrl) InitDefault() { +} + +func (p *TPrivilegeCtrl) GetPrivHier() (v TPrivilegeHier) { + return p.PrivHier +} + +var TPrivilegeCtrl_Ctl_DEFAULT string + +func (p *TPrivilegeCtrl) GetCtl() (v string) { + if !p.IsSetCtl() { + return TPrivilegeCtrl_Ctl_DEFAULT + } + return *p.Ctl +} + +var TPrivilegeCtrl_Db_DEFAULT string + +func (p *TPrivilegeCtrl) GetDb() (v string) { + if !p.IsSetDb() { + return TPrivilegeCtrl_Db_DEFAULT + } + return *p.Db +} + +var TPrivilegeCtrl_Tbl_DEFAULT string + +func (p *TPrivilegeCtrl) GetTbl() (v string) { + if !p.IsSetTbl() { + return TPrivilegeCtrl_Tbl_DEFAULT + } + return *p.Tbl +} + +var TPrivilegeCtrl_Cols_DEFAULT []string + +func (p *TPrivilegeCtrl) GetCols() (v []string) { + if !p.IsSetCols() { + return TPrivilegeCtrl_Cols_DEFAULT + } + return p.Cols +} + +var TPrivilegeCtrl_Res_DEFAULT string + +func (p *TPrivilegeCtrl) GetRes() (v string) { + if !p.IsSetRes() { + return TPrivilegeCtrl_Res_DEFAULT + } + return *p.Res +} +func (p *TPrivilegeCtrl) SetPrivHier(val TPrivilegeHier) { + p.PrivHier = val +} +func (p *TPrivilegeCtrl) SetCtl(val *string) { + p.Ctl = val +} +func (p *TPrivilegeCtrl) SetDb(val *string) { + p.Db = val +} +func (p *TPrivilegeCtrl) SetTbl(val *string) { + p.Tbl = val +} +func (p *TPrivilegeCtrl) SetCols(val []string) { + p.Cols = val +} +func (p *TPrivilegeCtrl) SetRes(val *string) { + p.Res = val +} + +var fieldIDToName_TPrivilegeCtrl = map[int16]string{ + 1: "priv_hier", + 2: "ctl", + 3: "db", + 4: "tbl", + 5: "cols", + 6: "res", +} + +func (p *TPrivilegeCtrl) IsSetCtl() bool { + return p.Ctl != nil +} + +func (p *TPrivilegeCtrl) IsSetDb() bool { + return p.Db != nil +} + +func (p *TPrivilegeCtrl) IsSetTbl() bool { + return p.Tbl != nil +} + +func (p *TPrivilegeCtrl) IsSetCols() bool { + return p.Cols != nil +} + +func (p *TPrivilegeCtrl) IsSetRes() bool { + return p.Res != nil +} + +func (p *TPrivilegeCtrl) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetPrivHier bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetPrivHier = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.SET { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetPrivHier { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPrivilegeCtrl[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPrivilegeCtrl[fieldId])) +} + +func (p *TPrivilegeCtrl) ReadField1(iprot thrift.TProtocol) error { + + var _field TPrivilegeHier + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TPrivilegeHier(v) + } + p.PrivHier = _field + return nil +} +func (p *TPrivilegeCtrl) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Ctl = _field + return nil +} +func (p *TPrivilegeCtrl) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TPrivilegeCtrl) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Tbl = _field + return nil +} +func (p *TPrivilegeCtrl) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadSetEnd(); err != nil { + return err + } + p.Cols = _field + return nil +} +func (p *TPrivilegeCtrl) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Res = _field + return nil +} + +func (p *TPrivilegeCtrl) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPrivilegeCtrl"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPrivilegeCtrl) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("priv_hier", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.PrivHier)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPrivilegeCtrl) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCtl() { + if err = oprot.WriteFieldBegin("ctl", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Ctl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPrivilegeCtrl) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPrivilegeCtrl) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTbl() { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPrivilegeCtrl) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCols() { + if err = oprot.WriteFieldBegin("cols", thrift.SET, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteSetBegin(thrift.STRING, len(p.Cols)); err != nil { + return err + } + for i := 0; i < len(p.Cols); i++ { + for j := i + 1; j < len(p.Cols); j++ { + if func(tgt, src string) bool { + if strings.Compare(tgt, src) != 0 { + return false + } + return true + }(p.Cols[i], p.Cols[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.Cols[i])) + } + } + } + for _, v := range p.Cols { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteSetEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TPrivilegeCtrl) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetRes() { + if err = oprot.WriteFieldBegin("res", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Res); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TPrivilegeCtrl) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPrivilegeCtrl(%+v)", *p) + +} + +func (p *TPrivilegeCtrl) DeepEqual(ano *TPrivilegeCtrl) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PrivHier) { + return false + } + if !p.Field2DeepEqual(ano.Ctl) { + return false + } + if !p.Field3DeepEqual(ano.Db) { + return false + } + if !p.Field4DeepEqual(ano.Tbl) { + return false + } + if !p.Field5DeepEqual(ano.Cols) { + return false + } + if !p.Field6DeepEqual(ano.Res) { + return false + } + return true +} + +func (p *TPrivilegeCtrl) Field1DeepEqual(src TPrivilegeHier) bool { + + if p.PrivHier != src { + return false + } + return true +} +func (p *TPrivilegeCtrl) Field2DeepEqual(src *string) bool { + + if p.Ctl == src { + return true + } else if p.Ctl == nil || src == nil { + return false + } + if strings.Compare(*p.Ctl, *src) != 0 { + return false + } + return true +} +func (p *TPrivilegeCtrl) Field3DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TPrivilegeCtrl) Field4DeepEqual(src *string) bool { + + if p.Tbl == src { + return true + } else if p.Tbl == nil || src == nil { + return false + } + if strings.Compare(*p.Tbl, *src) != 0 { + return false + } + return true +} +func (p *TPrivilegeCtrl) Field5DeepEqual(src []string) bool { + + if len(p.Cols) != len(src) { + return false + } + for i, v := range p.Cols { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TPrivilegeCtrl) Field6DeepEqual(src *string) bool { + + if p.Res == src { + return true + } else if p.Res == nil || src == nil { + return false + } + if strings.Compare(*p.Res, *src) != 0 { + return false + } + return true +} + +type TCheckAuthRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` + Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` + UserIp *string `thrift:"user_ip,4,optional" frugal:"4,optional,string" json:"user_ip,omitempty"` + PrivCtrl *TPrivilegeCtrl `thrift:"priv_ctrl,5,optional" frugal:"5,optional,TPrivilegeCtrl" json:"priv_ctrl,omitempty"` + PrivType *TPrivilegeType `thrift:"priv_type,6,optional" frugal:"6,optional,TPrivilegeType" json:"priv_type,omitempty"` + ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,7,optional" frugal:"7,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` +} + +func NewTCheckAuthRequest() *TCheckAuthRequest { + return &TCheckAuthRequest{} +} + +func (p *TCheckAuthRequest) InitDefault() { +} + +var TCheckAuthRequest_Cluster_DEFAULT string + +func (p *TCheckAuthRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TCheckAuthRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +func (p *TCheckAuthRequest) GetUser() (v string) { + return p.User +} + +func (p *TCheckAuthRequest) GetPasswd() (v string) { + return p.Passwd +} + +var TCheckAuthRequest_UserIp_DEFAULT string + +func (p *TCheckAuthRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TCheckAuthRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +var TCheckAuthRequest_PrivCtrl_DEFAULT *TPrivilegeCtrl + +func (p *TCheckAuthRequest) GetPrivCtrl() (v *TPrivilegeCtrl) { + if !p.IsSetPrivCtrl() { + return TCheckAuthRequest_PrivCtrl_DEFAULT + } + return p.PrivCtrl +} + +var TCheckAuthRequest_PrivType_DEFAULT TPrivilegeType + +func (p *TCheckAuthRequest) GetPrivType() (v TPrivilegeType) { + if !p.IsSetPrivType() { + return TCheckAuthRequest_PrivType_DEFAULT + } + return *p.PrivType +} + +var TCheckAuthRequest_ThriftRpcTimeoutMs_DEFAULT int64 + +func (p *TCheckAuthRequest) GetThriftRpcTimeoutMs() (v int64) { + if !p.IsSetThriftRpcTimeoutMs() { + return TCheckAuthRequest_ThriftRpcTimeoutMs_DEFAULT + } + return *p.ThriftRpcTimeoutMs +} +func (p *TCheckAuthRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TCheckAuthRequest) SetUser(val string) { + p.User = val +} +func (p *TCheckAuthRequest) SetPasswd(val string) { + p.Passwd = val +} +func (p *TCheckAuthRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TCheckAuthRequest) SetPrivCtrl(val *TPrivilegeCtrl) { + p.PrivCtrl = val +} +func (p *TCheckAuthRequest) SetPrivType(val *TPrivilegeType) { + p.PrivType = val +} +func (p *TCheckAuthRequest) SetThriftRpcTimeoutMs(val *int64) { + p.ThriftRpcTimeoutMs = val +} + +var fieldIDToName_TCheckAuthRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "user_ip", + 5: "priv_ctrl", + 6: "priv_type", + 7: "thrift_rpc_timeout_ms", +} + +func (p *TCheckAuthRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TCheckAuthRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TCheckAuthRequest) IsSetPrivCtrl() bool { + return p.PrivCtrl != nil +} + +func (p *TCheckAuthRequest) IsSetPrivType() bool { + return p.PrivType != nil +} + +func (p *TCheckAuthRequest) IsSetThriftRpcTimeoutMs() bool { + return p.ThriftRpcTimeoutMs != nil +} + +func (p *TCheckAuthRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetUser = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPasswd = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthRequest[fieldId])) +} + +func (p *TCheckAuthRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TCheckAuthRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.User = _field + return nil +} +func (p *TCheckAuthRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Passwd = _field + return nil +} +func (p *TCheckAuthRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TCheckAuthRequest) ReadField5(iprot thrift.TProtocol) error { + _field := NewTPrivilegeCtrl() + if err := _field.Read(iprot); err != nil { + return err + } + p.PrivCtrl = _field + return nil +} +func (p *TCheckAuthRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *TPrivilegeType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TPrivilegeType(v) + _field = &tmp + } + p.PrivType = _field + return nil +} +func (p *TCheckAuthRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ThriftRpcTimeoutMs = _field + return nil +} + +func (p *TCheckAuthRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCheckAuthRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetPrivCtrl() { + if err = oprot.WriteFieldBegin("priv_ctrl", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.PrivCtrl.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPrivType() { + if err = oprot.WriteFieldBegin("priv_type", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.PrivType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TCheckAuthRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetThriftRpcTimeoutMs() { + if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TCheckAuthRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCheckAuthRequest(%+v)", *p) + +} + +func (p *TCheckAuthRequest) DeepEqual(ano *TCheckAuthRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.UserIp) { + return false + } + if !p.Field5DeepEqual(ano.PrivCtrl) { + return false + } + if !p.Field6DeepEqual(ano.PrivType) { + return false + } + if !p.Field7DeepEqual(ano.ThriftRpcTimeoutMs) { + return false + } + return true +} + +func (p *TCheckAuthRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TCheckAuthRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.User, src) != 0 { + return false + } + return true +} +func (p *TCheckAuthRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.Passwd, src) != 0 { + return false + } + return true +} +func (p *TCheckAuthRequest) Field4DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TCheckAuthRequest) Field5DeepEqual(src *TPrivilegeCtrl) bool { + + if !p.PrivCtrl.DeepEqual(src) { + return false + } + return true +} +func (p *TCheckAuthRequest) Field6DeepEqual(src *TPrivilegeType) bool { + + if p.PrivType == src { + return true + } else if p.PrivType == nil || src == nil { + return false + } + if *p.PrivType != *src { + return false + } + return true +} +func (p *TCheckAuthRequest) Field7DeepEqual(src *int64) bool { + + if p.ThriftRpcTimeoutMs == src { + return true + } else if p.ThriftRpcTimeoutMs == nil || src == nil { + return false + } + if *p.ThriftRpcTimeoutMs != *src { + return false + } + return true +} + +type TCheckAuthResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +} + +func NewTCheckAuthResult_() *TCheckAuthResult_ { + return &TCheckAuthResult_{} +} + +func (p *TCheckAuthResult_) InitDefault() { +} + +var TCheckAuthResult__Status_DEFAULT *status.TStatus + +func (p *TCheckAuthResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TCheckAuthResult__Status_DEFAULT + } + return p.Status +} +func (p *TCheckAuthResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TCheckAuthResult_ = map[int16]string{ + 1: "status", +} + +func (p *TCheckAuthResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCheckAuthResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthResult_[fieldId])) +} + +func (p *TCheckAuthResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TCheckAuthResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCheckAuthResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TCheckAuthResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TCheckAuthResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCheckAuthResult_(%+v)", *p) + +} + +func (p *TCheckAuthResult_) DeepEqual(ano *TCheckAuthResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TCheckAuthResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TGetQueryStatsRequest struct { + Type *TQueryStatsType `thrift:"type,1,optional" frugal:"1,optional,TQueryStatsType" json:"type,omitempty"` + Catalog *string `thrift:"catalog,2,optional" frugal:"2,optional,string" json:"catalog,omitempty"` + Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` + Tbl *string `thrift:"tbl,4,optional" frugal:"4,optional,string" json:"tbl,omitempty"` + ReplicaId *int64 `thrift:"replica_id,5,optional" frugal:"5,optional,i64" json:"replica_id,omitempty"` + ReplicaIds []int64 `thrift:"replica_ids,6,optional" frugal:"6,optional,list" json:"replica_ids,omitempty"` +} + +func NewTGetQueryStatsRequest() *TGetQueryStatsRequest { + return &TGetQueryStatsRequest{} +} + +func (p *TGetQueryStatsRequest) InitDefault() { +} + +var TGetQueryStatsRequest_Type_DEFAULT TQueryStatsType + +func (p *TGetQueryStatsRequest) GetType() (v TQueryStatsType) { + if !p.IsSetType() { + return TGetQueryStatsRequest_Type_DEFAULT + } + return *p.Type +} + +var TGetQueryStatsRequest_Catalog_DEFAULT string + +func (p *TGetQueryStatsRequest) GetCatalog() (v string) { + if !p.IsSetCatalog() { + return TGetQueryStatsRequest_Catalog_DEFAULT + } + return *p.Catalog +} + +var TGetQueryStatsRequest_Db_DEFAULT string + +func (p *TGetQueryStatsRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TGetQueryStatsRequest_Db_DEFAULT + } + return *p.Db +} + +var TGetQueryStatsRequest_Tbl_DEFAULT string + +func (p *TGetQueryStatsRequest) GetTbl() (v string) { + if !p.IsSetTbl() { + return TGetQueryStatsRequest_Tbl_DEFAULT + } + return *p.Tbl +} + +var TGetQueryStatsRequest_ReplicaId_DEFAULT int64 + +func (p *TGetQueryStatsRequest) GetReplicaId() (v int64) { + if !p.IsSetReplicaId() { + return TGetQueryStatsRequest_ReplicaId_DEFAULT + } + return *p.ReplicaId +} + +var TGetQueryStatsRequest_ReplicaIds_DEFAULT []int64 + +func (p *TGetQueryStatsRequest) GetReplicaIds() (v []int64) { + if !p.IsSetReplicaIds() { + return TGetQueryStatsRequest_ReplicaIds_DEFAULT + } + return p.ReplicaIds +} +func (p *TGetQueryStatsRequest) SetType(val *TQueryStatsType) { + p.Type = val +} +func (p *TGetQueryStatsRequest) SetCatalog(val *string) { + p.Catalog = val +} +func (p *TGetQueryStatsRequest) SetDb(val *string) { + p.Db = val +} +func (p *TGetQueryStatsRequest) SetTbl(val *string) { + p.Tbl = val +} +func (p *TGetQueryStatsRequest) SetReplicaId(val *int64) { + p.ReplicaId = val +} +func (p *TGetQueryStatsRequest) SetReplicaIds(val []int64) { + p.ReplicaIds = val +} + +var fieldIDToName_TGetQueryStatsRequest = map[int16]string{ + 1: "type", + 2: "catalog", + 3: "db", + 4: "tbl", + 5: "replica_id", + 6: "replica_ids", +} + +func (p *TGetQueryStatsRequest) IsSetType() bool { + return p.Type != nil +} + +func (p *TGetQueryStatsRequest) IsSetCatalog() bool { + return p.Catalog != nil +} + +func (p *TGetQueryStatsRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TGetQueryStatsRequest) IsSetTbl() bool { + return p.Tbl != nil +} + +func (p *TGetQueryStatsRequest) IsSetReplicaId() bool { + return p.ReplicaId != nil +} + +func (p *TGetQueryStatsRequest) IsSetReplicaIds() bool { + return p.ReplicaIds != nil +} + +func (p *TGetQueryStatsRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetQueryStatsRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *TQueryStatsType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TQueryStatsType(v) + _field = &tmp + } + p.Type = _field + return nil +} +func (p *TGetQueryStatsRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Catalog = _field + return nil +} +func (p *TGetQueryStatsRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TGetQueryStatsRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Tbl = _field + return nil +} +func (p *TGetQueryStatsRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReplicaId = _field + return nil +} +func (p *TGetQueryStatsRequest) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ReplicaIds = _field + return nil +} + +func (p *TGetQueryStatsRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetQueryStatsRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err = oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Type)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalog() { + if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Catalog); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTbl() { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetReplicaId() { + if err = oprot.WriteFieldBegin("replica_id", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReplicaId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetReplicaIds() { + if err = oprot.WriteFieldBegin("replica_ids", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.ReplicaIds)); err != nil { + return err + } + for _, v := range p.ReplicaIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetQueryStatsRequest(%+v)", *p) + +} + +func (p *TGetQueryStatsRequest) DeepEqual(ano *TGetQueryStatsRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Type) { + return false + } + if !p.Field2DeepEqual(ano.Catalog) { + return false + } + if !p.Field3DeepEqual(ano.Db) { + return false + } + if !p.Field4DeepEqual(ano.Tbl) { + return false + } + if !p.Field5DeepEqual(ano.ReplicaId) { + return false + } + if !p.Field6DeepEqual(ano.ReplicaIds) { + return false + } + return true +} + +func (p *TGetQueryStatsRequest) Field1DeepEqual(src *TQueryStatsType) bool { + + if p.Type == src { + return true + } else if p.Type == nil || src == nil { + return false + } + if *p.Type != *src { + return false + } + return true +} +func (p *TGetQueryStatsRequest) Field2DeepEqual(src *string) bool { + + if p.Catalog == src { + return true + } else if p.Catalog == nil || src == nil { + return false + } + if strings.Compare(*p.Catalog, *src) != 0 { + return false + } + return true +} +func (p *TGetQueryStatsRequest) Field3DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TGetQueryStatsRequest) Field4DeepEqual(src *string) bool { + + if p.Tbl == src { + return true + } else if p.Tbl == nil || src == nil { + return false + } + if strings.Compare(*p.Tbl, *src) != 0 { + return false + } + return true +} +func (p *TGetQueryStatsRequest) Field5DeepEqual(src *int64) bool { + + if p.ReplicaId == src { + return true + } else if p.ReplicaId == nil || src == nil { + return false + } + if *p.ReplicaId != *src { + return false + } + return true +} +func (p *TGetQueryStatsRequest) Field6DeepEqual(src []int64) bool { + + if len(p.ReplicaIds) != len(src) { + return false + } + for i, v := range p.ReplicaIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TTableQueryStats struct { + Field *string `thrift:"field,1,optional" frugal:"1,optional,string" json:"field,omitempty"` + QueryStats *int64 `thrift:"query_stats,2,optional" frugal:"2,optional,i64" json:"query_stats,omitempty"` + FilterStats *int64 `thrift:"filter_stats,3,optional" frugal:"3,optional,i64" json:"filter_stats,omitempty"` +} + +func NewTTableQueryStats() *TTableQueryStats { + return &TTableQueryStats{} +} + +func (p *TTableQueryStats) InitDefault() { +} + +var TTableQueryStats_Field_DEFAULT string + +func (p *TTableQueryStats) GetField() (v string) { + if !p.IsSetField() { + return TTableQueryStats_Field_DEFAULT + } + return *p.Field +} + +var TTableQueryStats_QueryStats_DEFAULT int64 + +func (p *TTableQueryStats) GetQueryStats() (v int64) { + if !p.IsSetQueryStats() { + return TTableQueryStats_QueryStats_DEFAULT + } + return *p.QueryStats +} + +var TTableQueryStats_FilterStats_DEFAULT int64 + +func (p *TTableQueryStats) GetFilterStats() (v int64) { + if !p.IsSetFilterStats() { + return TTableQueryStats_FilterStats_DEFAULT + } + return *p.FilterStats +} +func (p *TTableQueryStats) SetField(val *string) { + p.Field = val +} +func (p *TTableQueryStats) SetQueryStats(val *int64) { + p.QueryStats = val +} +func (p *TTableQueryStats) SetFilterStats(val *int64) { + p.FilterStats = val +} + +var fieldIDToName_TTableQueryStats = map[int16]string{ + 1: "field", + 2: "query_stats", + 3: "filter_stats", +} + +func (p *TTableQueryStats) IsSetField() bool { + return p.Field != nil +} + +func (p *TTableQueryStats) IsSetQueryStats() bool { + return p.QueryStats != nil +} + +func (p *TTableQueryStats) IsSetFilterStats() bool { + return p.FilterStats != nil +} + +func (p *TTableQueryStats) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableQueryStats[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableQueryStats) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Field = _field + return nil +} +func (p *TTableQueryStats) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.QueryStats = _field + return nil +} +func (p *TTableQueryStats) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FilterStats = _field + return nil +} + +func (p *TTableQueryStats) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTableQueryStats"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTableQueryStats) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetField() { + if err = oprot.WriteFieldBegin("field", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Field); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTableQueryStats) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryStats() { + if err = oprot.WriteFieldBegin("query_stats", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.QueryStats); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTableQueryStats) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFilterStats() { + if err = oprot.WriteFieldBegin("filter_stats", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FilterStats); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TTableQueryStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTableQueryStats(%+v)", *p) + +} + +func (p *TTableQueryStats) DeepEqual(ano *TTableQueryStats) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Field) { + return false + } + if !p.Field2DeepEqual(ano.QueryStats) { + return false + } + if !p.Field3DeepEqual(ano.FilterStats) { + return false + } + return true +} + +func (p *TTableQueryStats) Field1DeepEqual(src *string) bool { + + if p.Field == src { + return true + } else if p.Field == nil || src == nil { + return false + } + if strings.Compare(*p.Field, *src) != 0 { + return false + } + return true +} +func (p *TTableQueryStats) Field2DeepEqual(src *int64) bool { + + if p.QueryStats == src { + return true + } else if p.QueryStats == nil || src == nil { + return false + } + if *p.QueryStats != *src { + return false + } + return true +} +func (p *TTableQueryStats) Field3DeepEqual(src *int64) bool { + + if p.FilterStats == src { + return true + } else if p.FilterStats == nil || src == nil { + return false + } + if *p.FilterStats != *src { + return false + } + return true +} + +type TTableIndexQueryStats struct { + IndexName *string `thrift:"index_name,1,optional" frugal:"1,optional,string" json:"index_name,omitempty"` + TableStats []*TTableQueryStats `thrift:"table_stats,2,optional" frugal:"2,optional,list" json:"table_stats,omitempty"` +} + +func NewTTableIndexQueryStats() *TTableIndexQueryStats { + return &TTableIndexQueryStats{} +} + +func (p *TTableIndexQueryStats) InitDefault() { +} + +var TTableIndexQueryStats_IndexName_DEFAULT string + +func (p *TTableIndexQueryStats) GetIndexName() (v string) { + if !p.IsSetIndexName() { + return TTableIndexQueryStats_IndexName_DEFAULT + } + return *p.IndexName +} + +var TTableIndexQueryStats_TableStats_DEFAULT []*TTableQueryStats + +func (p *TTableIndexQueryStats) GetTableStats() (v []*TTableQueryStats) { + if !p.IsSetTableStats() { + return TTableIndexQueryStats_TableStats_DEFAULT + } + return p.TableStats +} +func (p *TTableIndexQueryStats) SetIndexName(val *string) { + p.IndexName = val +} +func (p *TTableIndexQueryStats) SetTableStats(val []*TTableQueryStats) { + p.TableStats = val +} + +var fieldIDToName_TTableIndexQueryStats = map[int16]string{ + 1: "index_name", + 2: "table_stats", +} + +func (p *TTableIndexQueryStats) IsSetIndexName() bool { + return p.IndexName != nil +} + +func (p *TTableIndexQueryStats) IsSetTableStats() bool { + return p.TableStats != nil +} + +func (p *TTableIndexQueryStats) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableIndexQueryStats[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableIndexQueryStats) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.IndexName = _field + return nil +} +func (p *TTableIndexQueryStats) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTableQueryStats, 0, size) + values := make([]TTableQueryStats, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TableStats = _field + return nil +} + +func (p *TTableIndexQueryStats) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTableIndexQueryStats"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTableIndexQueryStats) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetIndexName() { + if err = oprot.WriteFieldBegin("index_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.IndexName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTableIndexQueryStats) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableStats() { + if err = oprot.WriteFieldBegin("table_stats", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableStats)); err != nil { + return err + } + for _, v := range p.TableStats { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTableIndexQueryStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTableIndexQueryStats(%+v)", *p) + +} + +func (p *TTableIndexQueryStats) DeepEqual(ano *TTableIndexQueryStats) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.IndexName) { + return false + } + if !p.Field2DeepEqual(ano.TableStats) { + return false + } + return true +} + +func (p *TTableIndexQueryStats) Field1DeepEqual(src *string) bool { + + if p.IndexName == src { + return true + } else if p.IndexName == nil || src == nil { + return false + } + if strings.Compare(*p.IndexName, *src) != 0 { + return false + } + return true +} +func (p *TTableIndexQueryStats) Field2DeepEqual(src []*TTableQueryStats) bool { + + if len(p.TableStats) != len(src) { + return false + } + for i, v := range p.TableStats { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TQueryStatsResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + SimpleResult_ map[string]int64 `thrift:"simple_result,2,optional" frugal:"2,optional,map" json:"simple_result,omitempty"` + TableStats []*TTableQueryStats `thrift:"table_stats,3,optional" frugal:"3,optional,list" json:"table_stats,omitempty"` + TableVerbosStats []*TTableIndexQueryStats `thrift:"table_verbos_stats,4,optional" frugal:"4,optional,list" json:"table_verbos_stats,omitempty"` + TabletStats map[int64]int64 `thrift:"tablet_stats,5,optional" frugal:"5,optional,map" json:"tablet_stats,omitempty"` +} + +func NewTQueryStatsResult_() *TQueryStatsResult_ { + return &TQueryStatsResult_{} +} + +func (p *TQueryStatsResult_) InitDefault() { +} + +var TQueryStatsResult__Status_DEFAULT *status.TStatus + +func (p *TQueryStatsResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TQueryStatsResult__Status_DEFAULT + } + return p.Status +} + +var TQueryStatsResult__SimpleResult__DEFAULT map[string]int64 + +func (p *TQueryStatsResult_) GetSimpleResult_() (v map[string]int64) { + if !p.IsSetSimpleResult_() { + return TQueryStatsResult__SimpleResult__DEFAULT + } + return p.SimpleResult_ +} + +var TQueryStatsResult__TableStats_DEFAULT []*TTableQueryStats + +func (p *TQueryStatsResult_) GetTableStats() (v []*TTableQueryStats) { + if !p.IsSetTableStats() { + return TQueryStatsResult__TableStats_DEFAULT + } + return p.TableStats +} + +var TQueryStatsResult__TableVerbosStats_DEFAULT []*TTableIndexQueryStats + +func (p *TQueryStatsResult_) GetTableVerbosStats() (v []*TTableIndexQueryStats) { + if !p.IsSetTableVerbosStats() { + return TQueryStatsResult__TableVerbosStats_DEFAULT + } + return p.TableVerbosStats +} + +var TQueryStatsResult__TabletStats_DEFAULT map[int64]int64 + +func (p *TQueryStatsResult_) GetTabletStats() (v map[int64]int64) { + if !p.IsSetTabletStats() { + return TQueryStatsResult__TabletStats_DEFAULT + } + return p.TabletStats +} +func (p *TQueryStatsResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TQueryStatsResult_) SetSimpleResult_(val map[string]int64) { + p.SimpleResult_ = val +} +func (p *TQueryStatsResult_) SetTableStats(val []*TTableQueryStats) { + p.TableStats = val +} +func (p *TQueryStatsResult_) SetTableVerbosStats(val []*TTableIndexQueryStats) { + p.TableVerbosStats = val +} +func (p *TQueryStatsResult_) SetTabletStats(val map[int64]int64) { + p.TabletStats = val +} + +var fieldIDToName_TQueryStatsResult_ = map[int16]string{ + 1: "status", + 2: "simple_result", + 3: "table_stats", + 4: "table_verbos_stats", + 5: "tablet_stats", +} + +func (p *TQueryStatsResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TQueryStatsResult_) IsSetSimpleResult_() bool { + return p.SimpleResult_ != nil +} + +func (p *TQueryStatsResult_) IsSetTableStats() bool { + return p.TableStats != nil +} + +func (p *TQueryStatsResult_) IsSetTableVerbosStats() bool { + return p.TableVerbosStats != nil +} + +func (p *TQueryStatsResult_) IsSetTabletStats() bool { + return p.TabletStats != nil +} + +func (p *TQueryStatsResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.MAP { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.MAP { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryStatsResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryStatsResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TQueryStatsResult_) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]int64, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.SimpleResult_ = _field + return nil +} +func (p *TQueryStatsResult_) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTableQueryStats, 0, size) + values := make([]TTableQueryStats, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TableStats = _field + return nil +} +func (p *TQueryStatsResult_) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTableIndexQueryStats, 0, size) + values := make([]TTableIndexQueryStats, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TableVerbosStats = _field + return nil +} +func (p *TQueryStatsResult_) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int64]int64, size) + for i := 0; i < size; i++ { + var _key int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } + + var _val int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TabletStats = _field + return nil +} + +func (p *TQueryStatsResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TQueryStatsResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TQueryStatsResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TQueryStatsResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSimpleResult_() { + if err = oprot.WriteFieldBegin("simple_result", thrift.MAP, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.I64, len(p.SimpleResult_)); err != nil { + return err + } + for k, v := range p.SimpleResult_ { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TQueryStatsResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableStats() { + if err = oprot.WriteFieldBegin("table_stats", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableStats)); err != nil { + return err + } + for _, v := range p.TableStats { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TQueryStatsResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTableVerbosStats() { + if err = oprot.WriteFieldBegin("table_verbos_stats", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableVerbosStats)); err != nil { + return err + } + for _, v := range p.TableVerbosStats { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueryStatsResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletStats() { + if err = oprot.WriteFieldBegin("tablet_stats", thrift.MAP, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.TabletStats)); err != nil { + return err + } + for k, v := range p.TabletStats { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TQueryStatsResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryStatsResult_(%+v)", *p) + +} + +func (p *TQueryStatsResult_) DeepEqual(ano *TQueryStatsResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.SimpleResult_) { + return false + } + if !p.Field3DeepEqual(ano.TableStats) { + return false + } + if !p.Field4DeepEqual(ano.TableVerbosStats) { + return false + } + if !p.Field5DeepEqual(ano.TabletStats) { + return false + } + return true +} + +func (p *TQueryStatsResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TQueryStatsResult_) Field2DeepEqual(src map[string]int64) bool { + + if len(p.SimpleResult_) != len(src) { + return false + } + for k, v := range p.SimpleResult_ { + _src := src[k] + if v != _src { + return false + } + } + return true +} +func (p *TQueryStatsResult_) Field3DeepEqual(src []*TTableQueryStats) bool { + + if len(p.TableStats) != len(src) { + return false + } + for i, v := range p.TableStats { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TQueryStatsResult_) Field4DeepEqual(src []*TTableIndexQueryStats) bool { + + if len(p.TableVerbosStats) != len(src) { + return false + } + for i, v := range p.TableVerbosStats { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TQueryStatsResult_) Field5DeepEqual(src map[int64]int64) bool { + + if len(p.TabletStats) != len(src) { + return false + } + for k, v := range p.TabletStats { + _src := src[k] + if v != _src { + return false + } + } + return true +} + +type TGetBinlogRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` + TableId *int64 `thrift:"table_id,6,optional" frugal:"6,optional,i64" json:"table_id,omitempty"` + UserIp *string `thrift:"user_ip,7,optional" frugal:"7,optional,string" json:"user_ip,omitempty"` + Token *string `thrift:"token,8,optional" frugal:"8,optional,string" json:"token,omitempty"` + PrevCommitSeq *int64 `thrift:"prev_commit_seq,9,optional" frugal:"9,optional,i64" json:"prev_commit_seq,omitempty"` +} + +func NewTGetBinlogRequest() *TGetBinlogRequest { + return &TGetBinlogRequest{} +} + +func (p *TGetBinlogRequest) InitDefault() { +} + +var TGetBinlogRequest_Cluster_DEFAULT string + +func (p *TGetBinlogRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGetBinlogRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +var TGetBinlogRequest_User_DEFAULT string + +func (p *TGetBinlogRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TGetBinlogRequest_User_DEFAULT + } + return *p.User +} + +var TGetBinlogRequest_Passwd_DEFAULT string + +func (p *TGetBinlogRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TGetBinlogRequest_Passwd_DEFAULT + } + return *p.Passwd +} + +var TGetBinlogRequest_Db_DEFAULT string + +func (p *TGetBinlogRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TGetBinlogRequest_Db_DEFAULT + } + return *p.Db +} + +var TGetBinlogRequest_Table_DEFAULT string + +func (p *TGetBinlogRequest) GetTable() (v string) { + if !p.IsSetTable() { + return TGetBinlogRequest_Table_DEFAULT + } + return *p.Table +} + +var TGetBinlogRequest_TableId_DEFAULT int64 + +func (p *TGetBinlogRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TGetBinlogRequest_TableId_DEFAULT + } + return *p.TableId +} + +var TGetBinlogRequest_UserIp_DEFAULT string + +func (p *TGetBinlogRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TGetBinlogRequest_UserIp_DEFAULT + } + return *p.UserIp +} + +var TGetBinlogRequest_Token_DEFAULT string + +func (p *TGetBinlogRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TGetBinlogRequest_Token_DEFAULT + } + return *p.Token +} + +var TGetBinlogRequest_PrevCommitSeq_DEFAULT int64 + +func (p *TGetBinlogRequest) GetPrevCommitSeq() (v int64) { + if !p.IsSetPrevCommitSeq() { + return TGetBinlogRequest_PrevCommitSeq_DEFAULT + } + return *p.PrevCommitSeq +} +func (p *TGetBinlogRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TGetBinlogRequest) SetUser(val *string) { + p.User = val +} +func (p *TGetBinlogRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TGetBinlogRequest) SetDb(val *string) { + p.Db = val +} +func (p *TGetBinlogRequest) SetTable(val *string) { + p.Table = val +} +func (p *TGetBinlogRequest) SetTableId(val *int64) { + p.TableId = val +} +func (p *TGetBinlogRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TGetBinlogRequest) SetToken(val *string) { + p.Token = val +} +func (p *TGetBinlogRequest) SetPrevCommitSeq(val *int64) { + p.PrevCommitSeq = val +} + +var fieldIDToName_TGetBinlogRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "table", + 6: "table_id", + 7: "user_ip", + 8: "token", + 9: "prev_commit_seq", +} + +func (p *TGetBinlogRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TGetBinlogRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TGetBinlogRequest) IsSetPasswd() bool { + return p.Passwd != nil +} + +func (p *TGetBinlogRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TGetBinlogRequest) IsSetTable() bool { + return p.Table != nil +} + +func (p *TGetBinlogRequest) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TGetBinlogRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TGetBinlogRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TGetBinlogRequest) IsSetPrevCommitSeq() bool { + return p.PrevCommitSeq != nil +} + +func (p *TGetBinlogRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetBinlogRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TGetBinlogRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.User = _field + return nil +} +func (p *TGetBinlogRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Passwd = _field + return nil +} +func (p *TGetBinlogRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TGetBinlogRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Table = _field + return nil +} +func (p *TGetBinlogRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TGetBinlogRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TGetBinlogRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TGetBinlogRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.PrevCommitSeq = _field + return nil +} + +func (p *TGetBinlogRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetBinlogRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Table); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TGetBinlogRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetPrevCommitSeq() { + if err = oprot.WriteFieldBegin("prev_commit_seq", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.PrevCommitSeq); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TGetBinlogRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetBinlogRequest(%+v)", *p) + +} + +func (p *TGetBinlogRequest) DeepEqual(ano *TGetBinlogRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Table) { + return false + } + if !p.Field6DeepEqual(ano.TableId) { + return false + } + if !p.Field7DeepEqual(ano.UserIp) { + return false + } + if !p.Field8DeepEqual(ano.Token) { + return false + } + if !p.Field9DeepEqual(ano.PrevCommitSeq) { + return false + } + return true +} + +func (p *TGetBinlogRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field2DeepEqual(src *string) bool { + + if p.User == src { + return true + } else if p.User == nil || src == nil { + return false + } + if strings.Compare(*p.User, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field3DeepEqual(src *string) bool { + + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { + return false + } + if strings.Compare(*p.Passwd, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field4DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field5DeepEqual(src *string) bool { + + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field6DeepEqual(src *int64) bool { + + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true +} +func (p *TGetBinlogRequest) Field7DeepEqual(src *string) bool { + + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field8DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogRequest) Field9DeepEqual(src *int64) bool { + + if p.PrevCommitSeq == src { + return true + } else if p.PrevCommitSeq == nil || src == nil { + return false + } + if *p.PrevCommitSeq != *src { + return false + } + return true +} + +type TBinlog struct { + CommitSeq *int64 `thrift:"commit_seq,1,optional" frugal:"1,optional,i64" json:"commit_seq,omitempty"` + Timestamp *int64 `thrift:"timestamp,2,optional" frugal:"2,optional,i64" json:"timestamp,omitempty"` + Type *TBinlogType `thrift:"type,3,optional" frugal:"3,optional,TBinlogType" json:"type,omitempty"` + DbId *int64 `thrift:"db_id,4,optional" frugal:"4,optional,i64" json:"db_id,omitempty"` + TableIds []int64 `thrift:"table_ids,5,optional" frugal:"5,optional,list" json:"table_ids,omitempty"` + Data *string `thrift:"data,6,optional" frugal:"6,optional,string" json:"data,omitempty"` + Belong *int64 `thrift:"belong,7,optional" frugal:"7,optional,i64" json:"belong,omitempty"` + TableRef *int64 `thrift:"table_ref,8,optional" frugal:"8,optional,i64" json:"table_ref,omitempty"` + RemoveEnableCache *bool `thrift:"remove_enable_cache,9,optional" frugal:"9,optional,bool" json:"remove_enable_cache,omitempty"` +} + +func NewTBinlog() *TBinlog { + return &TBinlog{} +} + +func (p *TBinlog) InitDefault() { +} + +var TBinlog_CommitSeq_DEFAULT int64 + +func (p *TBinlog) GetCommitSeq() (v int64) { + if !p.IsSetCommitSeq() { + return TBinlog_CommitSeq_DEFAULT + } + return *p.CommitSeq +} + +var TBinlog_Timestamp_DEFAULT int64 + +func (p *TBinlog) GetTimestamp() (v int64) { + if !p.IsSetTimestamp() { + return TBinlog_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var TBinlog_Type_DEFAULT TBinlogType + +func (p *TBinlog) GetType() (v TBinlogType) { + if !p.IsSetType() { + return TBinlog_Type_DEFAULT + } + return *p.Type +} + +var TBinlog_DbId_DEFAULT int64 + +func (p *TBinlog) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TBinlog_DbId_DEFAULT + } + return *p.DbId +} + +var TBinlog_TableIds_DEFAULT []int64 + +func (p *TBinlog) GetTableIds() (v []int64) { + if !p.IsSetTableIds() { + return TBinlog_TableIds_DEFAULT + } + return p.TableIds +} + +var TBinlog_Data_DEFAULT string + +func (p *TBinlog) GetData() (v string) { + if !p.IsSetData() { + return TBinlog_Data_DEFAULT + } + return *p.Data +} + +var TBinlog_Belong_DEFAULT int64 + +func (p *TBinlog) GetBelong() (v int64) { + if !p.IsSetBelong() { + return TBinlog_Belong_DEFAULT + } + return *p.Belong +} + +var TBinlog_TableRef_DEFAULT int64 + +func (p *TBinlog) GetTableRef() (v int64) { + if !p.IsSetTableRef() { + return TBinlog_TableRef_DEFAULT + } + return *p.TableRef +} + +var TBinlog_RemoveEnableCache_DEFAULT bool + +func (p *TBinlog) GetRemoveEnableCache() (v bool) { + if !p.IsSetRemoveEnableCache() { + return TBinlog_RemoveEnableCache_DEFAULT + } + return *p.RemoveEnableCache +} +func (p *TBinlog) SetCommitSeq(val *int64) { + p.CommitSeq = val +} +func (p *TBinlog) SetTimestamp(val *int64) { + p.Timestamp = val +} +func (p *TBinlog) SetType(val *TBinlogType) { + p.Type = val +} +func (p *TBinlog) SetDbId(val *int64) { + p.DbId = val +} +func (p *TBinlog) SetTableIds(val []int64) { + p.TableIds = val +} +func (p *TBinlog) SetData(val *string) { + p.Data = val +} +func (p *TBinlog) SetBelong(val *int64) { + p.Belong = val +} +func (p *TBinlog) SetTableRef(val *int64) { + p.TableRef = val +} +func (p *TBinlog) SetRemoveEnableCache(val *bool) { + p.RemoveEnableCache = val +} + +var fieldIDToName_TBinlog = map[int16]string{ + 1: "commit_seq", + 2: "timestamp", + 3: "type", + 4: "db_id", + 5: "table_ids", + 6: "data", + 7: "belong", + 8: "table_ref", + 9: "remove_enable_cache", +} + +func (p *TBinlog) IsSetCommitSeq() bool { + return p.CommitSeq != nil +} + +func (p *TBinlog) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *TBinlog) IsSetType() bool { + return p.Type != nil +} + +func (p *TBinlog) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TBinlog) IsSetTableIds() bool { + return p.TableIds != nil +} + +func (p *TBinlog) IsSetData() bool { + return p.Data != nil +} + +func (p *TBinlog) IsSetBelong() bool { + return p.Belong != nil +} + +func (p *TBinlog) IsSetTableRef() bool { + return p.TableRef != nil +} + +func (p *TBinlog) IsSetRemoveEnableCache() bool { + return p.RemoveEnableCache != nil +} + +func (p *TBinlog) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.LIST { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlog[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBinlog) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CommitSeq = _field + return nil +} +func (p *TBinlog) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Timestamp = _field + return nil +} +func (p *TBinlog) ReadField3(iprot thrift.TProtocol) error { + + var _field *TBinlogType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TBinlogType(v) + _field = &tmp + } + p.Type = _field + return nil +} +func (p *TBinlog) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TBinlog) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TableIds = _field + return nil +} +func (p *TBinlog) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Data = _field + return nil +} +func (p *TBinlog) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Belong = _field + return nil +} +func (p *TBinlog) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableRef = _field + return nil +} +func (p *TBinlog) ReadField9(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.RemoveEnableCache = _field + return nil +} + +func (p *TBinlog) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TBinlog"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TBinlog) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCommitSeq() { + if err = oprot.WriteFieldBegin("commit_seq", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CommitSeq); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TBinlog) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err = oprot.WriteFieldBegin("timestamp", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Timestamp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TBinlog) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err = oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Type)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TBinlog) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TBinlog) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTableIds() { + if err = oprot.WriteFieldBegin("table_ids", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.TableIds)); err != nil { + return err + } + for _, v := range p.TableIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TBinlog) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetData() { + if err = oprot.WriteFieldBegin("data", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Data); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TBinlog) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetBelong() { + if err = oprot.WriteFieldBegin("belong", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Belong); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TBinlog) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTableRef() { + if err = oprot.WriteFieldBegin("table_ref", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableRef); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TBinlog) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoveEnableCache() { + if err = oprot.WriteFieldBegin("remove_enable_cache", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.RemoveEnableCache); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TBinlog) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBinlog(%+v)", *p) + +} + +func (p *TBinlog) DeepEqual(ano *TBinlog) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.CommitSeq) { + return false + } + if !p.Field2DeepEqual(ano.Timestamp) { + return false + } + if !p.Field3DeepEqual(ano.Type) { + return false + } + if !p.Field4DeepEqual(ano.DbId) { + return false + } + if !p.Field5DeepEqual(ano.TableIds) { + return false + } + if !p.Field6DeepEqual(ano.Data) { + return false + } + if !p.Field7DeepEqual(ano.Belong) { + return false + } + if !p.Field8DeepEqual(ano.TableRef) { + return false + } + if !p.Field9DeepEqual(ano.RemoveEnableCache) { + return false + } + return true +} + +func (p *TBinlog) Field1DeepEqual(src *int64) bool { + + if p.CommitSeq == src { + return true + } else if p.CommitSeq == nil || src == nil { + return false + } + if *p.CommitSeq != *src { + return false + } + return true +} +func (p *TBinlog) Field2DeepEqual(src *int64) bool { + + if p.Timestamp == src { + return true + } else if p.Timestamp == nil || src == nil { + return false + } + if *p.Timestamp != *src { + return false + } + return true +} +func (p *TBinlog) Field3DeepEqual(src *TBinlogType) bool { + + if p.Type == src { + return true + } else if p.Type == nil || src == nil { + return false + } + if *p.Type != *src { + return false + } + return true +} +func (p *TBinlog) Field4DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TBinlog) Field5DeepEqual(src []int64) bool { + + if len(p.TableIds) != len(src) { + return false + } + for i, v := range p.TableIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TBinlog) Field6DeepEqual(src *string) bool { + + if p.Data == src { + return true + } else if p.Data == nil || src == nil { + return false + } + if strings.Compare(*p.Data, *src) != 0 { + return false + } + return true +} +func (p *TBinlog) Field7DeepEqual(src *int64) bool { + + if p.Belong == src { + return true + } else if p.Belong == nil || src == nil { + return false + } + if *p.Belong != *src { + return false + } + return true +} +func (p *TBinlog) Field8DeepEqual(src *int64) bool { + + if p.TableRef == src { + return true + } else if p.TableRef == nil || src == nil { + return false + } + if *p.TableRef != *src { + return false + } + return true +} +func (p *TBinlog) Field9DeepEqual(src *bool) bool { + + if p.RemoveEnableCache == src { + return true + } else if p.RemoveEnableCache == nil || src == nil { + return false + } + if *p.RemoveEnableCache != *src { + return false + } + return true +} + +type TGetBinlogResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + NextCommitSeq *int64 `thrift:"next_commit_seq,2,optional" frugal:"2,optional,i64" json:"next_commit_seq,omitempty"` + Binlogs []*TBinlog `thrift:"binlogs,3,optional" frugal:"3,optional,list" json:"binlogs,omitempty"` + FeVersion *string `thrift:"fe_version,4,optional" frugal:"4,optional,string" json:"fe_version,omitempty"` + FeMetaVersion *int64 `thrift:"fe_meta_version,5,optional" frugal:"5,optional,i64" json:"fe_meta_version,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,6,optional" frugal:"6,optional,types.TNetworkAddress" json:"master_address,omitempty"` +} + +func NewTGetBinlogResult_() *TGetBinlogResult_ { + return &TGetBinlogResult_{} +} + +func (p *TGetBinlogResult_) InitDefault() { +} + +var TGetBinlogResult__Status_DEFAULT *status.TStatus + +func (p *TGetBinlogResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetBinlogResult__Status_DEFAULT + } + return p.Status +} + +var TGetBinlogResult__NextCommitSeq_DEFAULT int64 + +func (p *TGetBinlogResult_) GetNextCommitSeq() (v int64) { + if !p.IsSetNextCommitSeq() { + return TGetBinlogResult__NextCommitSeq_DEFAULT + } + return *p.NextCommitSeq +} + +var TGetBinlogResult__Binlogs_DEFAULT []*TBinlog + +func (p *TGetBinlogResult_) GetBinlogs() (v []*TBinlog) { + if !p.IsSetBinlogs() { + return TGetBinlogResult__Binlogs_DEFAULT + } + return p.Binlogs +} + +var TGetBinlogResult__FeVersion_DEFAULT string + +func (p *TGetBinlogResult_) GetFeVersion() (v string) { + if !p.IsSetFeVersion() { + return TGetBinlogResult__FeVersion_DEFAULT + } + return *p.FeVersion +} + +var TGetBinlogResult__FeMetaVersion_DEFAULT int64 + +func (p *TGetBinlogResult_) GetFeMetaVersion() (v int64) { + if !p.IsSetFeMetaVersion() { + return TGetBinlogResult__FeMetaVersion_DEFAULT + } + return *p.FeMetaVersion +} + +var TGetBinlogResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TGetBinlogResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TGetBinlogResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TGetBinlogResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetBinlogResult_) SetNextCommitSeq(val *int64) { + p.NextCommitSeq = val +} +func (p *TGetBinlogResult_) SetBinlogs(val []*TBinlog) { + p.Binlogs = val +} +func (p *TGetBinlogResult_) SetFeVersion(val *string) { + p.FeVersion = val +} +func (p *TGetBinlogResult_) SetFeMetaVersion(val *int64) { + p.FeMetaVersion = val +} +func (p *TGetBinlogResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} + +var fieldIDToName_TGetBinlogResult_ = map[int16]string{ + 1: "status", + 2: "next_commit_seq", + 3: "binlogs", + 4: "fe_version", + 5: "fe_meta_version", + 6: "master_address", +} + +func (p *TGetBinlogResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetBinlogResult_) IsSetNextCommitSeq() bool { + return p.NextCommitSeq != nil +} + +func (p *TGetBinlogResult_) IsSetBinlogs() bool { + return p.Binlogs != nil +} + +func (p *TGetBinlogResult_) IsSetFeVersion() bool { + return p.FeVersion != nil +} + +func (p *TGetBinlogResult_) IsSetFeMetaVersion() bool { + return p.FeMetaVersion != nil +} + +func (p *TGetBinlogResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TGetBinlogResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetBinlogResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetBinlogResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.NextCommitSeq = _field + return nil +} +func (p *TGetBinlogResult_) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TBinlog, 0, size) + values := make([]TBinlog, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Binlogs = _field + return nil +} +func (p *TGetBinlogResult_) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FeVersion = _field + return nil +} +func (p *TGetBinlogResult_) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FeMetaVersion = _field + return nil +} +func (p *TGetBinlogResult_) ReadField6(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} + +func (p *TGetBinlogResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetBinlogResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetBinlogResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetNextCommitSeq() { + if err = oprot.WriteFieldBegin("next_commit_seq", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.NextCommitSeq); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetBinlogResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBinlogs() { + if err = oprot.WriteFieldBegin("binlogs", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Binlogs)); err != nil { + return err + } + for _, v := range p.Binlogs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetBinlogResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFeVersion() { + if err = oprot.WriteFieldBegin("fe_version", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FeVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGetBinlogResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetFeMetaVersion() { + if err = oprot.WriteFieldBegin("fe_meta_version", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FeMetaVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetBinlogResult_) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetBinlogResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetBinlogResult_(%+v)", *p) + +} + +func (p *TGetBinlogResult_) DeepEqual(ano *TGetBinlogResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.NextCommitSeq) { + return false + } + if !p.Field3DeepEqual(ano.Binlogs) { + return false + } + if !p.Field4DeepEqual(ano.FeVersion) { + return false + } + if !p.Field5DeepEqual(ano.FeMetaVersion) { + return false + } + if !p.Field6DeepEqual(ano.MasterAddress) { + return false + } + return true +} + +func (p *TGetBinlogResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TGetBinlogResult_) Field2DeepEqual(src *int64) bool { + + if p.NextCommitSeq == src { + return true + } else if p.NextCommitSeq == nil || src == nil { + return false + } + if *p.NextCommitSeq != *src { + return false + } + return true +} +func (p *TGetBinlogResult_) Field3DeepEqual(src []*TBinlog) bool { + + if len(p.Binlogs) != len(src) { + return false + } + for i, v := range p.Binlogs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TGetBinlogResult_) Field4DeepEqual(src *string) bool { + + if p.FeVersion == src { + return true + } else if p.FeVersion == nil || src == nil { + return false + } + if strings.Compare(*p.FeVersion, *src) != 0 { + return false + } + return true +} +func (p *TGetBinlogResult_) Field5DeepEqual(src *int64) bool { + + if p.FeMetaVersion == src { + return true + } else if p.FeMetaVersion == nil || src == nil { + return false + } + if *p.FeMetaVersion != *src { + return false + } + return true +} +func (p *TGetBinlogResult_) Field6DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} + +type TGetTabletReplicaInfosRequest struct { + TabletIds []int64 `thrift:"tablet_ids,1,required" frugal:"1,required,list" json:"tablet_ids"` +} + +func NewTGetTabletReplicaInfosRequest() *TGetTabletReplicaInfosRequest { + return &TGetTabletReplicaInfosRequest{} +} + +func (p *TGetTabletReplicaInfosRequest) InitDefault() { +} + +func (p *TGetTabletReplicaInfosRequest) GetTabletIds() (v []int64) { + return p.TabletIds +} +func (p *TGetTabletReplicaInfosRequest) SetTabletIds(val []int64) { + p.TabletIds = val +} + +var fieldIDToName_TGetTabletReplicaInfosRequest = map[int16]string{ + 1: "tablet_ids", +} + +func (p *TGetTabletReplicaInfosRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetTabletIds bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTabletIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTabletIds { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTabletReplicaInfosRequest[fieldId])) +} + +func (p *TGetTabletReplicaInfosRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TabletIds = _field + return nil +} + +func (p *TGetTabletReplicaInfosRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetTabletReplicaInfosRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { + return err + } + for _, v := range p.TabletIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTabletReplicaInfosRequest(%+v)", *p) + +} + +func (p *TGetTabletReplicaInfosRequest) DeepEqual(ano *TGetTabletReplicaInfosRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TabletIds) { + return false + } + return true +} + +func (p *TGetTabletReplicaInfosRequest) Field1DeepEqual(src []int64) bool { + + if len(p.TabletIds) != len(src) { + return false + } + for i, v := range p.TabletIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} + +type TGetTabletReplicaInfosResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + TabletReplicaInfos map[int64][]*types.TReplicaInfo `thrift:"tablet_replica_infos,2,optional" frugal:"2,optional,map>" json:"tablet_replica_infos,omitempty"` + Token *string `thrift:"token,3,optional" frugal:"3,optional,string" json:"token,omitempty"` +} + +func NewTGetTabletReplicaInfosResult_() *TGetTabletReplicaInfosResult_ { + return &TGetTabletReplicaInfosResult_{} +} + +func (p *TGetTabletReplicaInfosResult_) InitDefault() { +} + +var TGetTabletReplicaInfosResult__Status_DEFAULT *status.TStatus + +func (p *TGetTabletReplicaInfosResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetTabletReplicaInfosResult__Status_DEFAULT + } + return p.Status +} + +var TGetTabletReplicaInfosResult__TabletReplicaInfos_DEFAULT map[int64][]*types.TReplicaInfo + +func (p *TGetTabletReplicaInfosResult_) GetTabletReplicaInfos() (v map[int64][]*types.TReplicaInfo) { + if !p.IsSetTabletReplicaInfos() { + return TGetTabletReplicaInfosResult__TabletReplicaInfos_DEFAULT + } + return p.TabletReplicaInfos +} + +var TGetTabletReplicaInfosResult__Token_DEFAULT string + +func (p *TGetTabletReplicaInfosResult_) GetToken() (v string) { + if !p.IsSetToken() { + return TGetTabletReplicaInfosResult__Token_DEFAULT + } + return *p.Token +} +func (p *TGetTabletReplicaInfosResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetTabletReplicaInfosResult_) SetTabletReplicaInfos(val map[int64][]*types.TReplicaInfo) { + p.TabletReplicaInfos = val +} +func (p *TGetTabletReplicaInfosResult_) SetToken(val *string) { + p.Token = val +} + +var fieldIDToName_TGetTabletReplicaInfosResult_ = map[int16]string{ + 1: "status", + 2: "tablet_replica_infos", + 3: "token", +} + +func (p *TGetTabletReplicaInfosResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetTabletReplicaInfosResult_) IsSetTabletReplicaInfos() bool { + return p.TabletReplicaInfos != nil +} + +func (p *TGetTabletReplicaInfosResult_) IsSetToken() bool { + return p.Token != nil +} + +func (p *TGetTabletReplicaInfosResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.MAP { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetTabletReplicaInfosResult_) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int64][]*types.TReplicaInfo, size) + for i := 0; i < size; i++ { + var _key int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*types.TReplicaInfo, 0, size) + values := make([]types.TReplicaInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TabletReplicaInfos = _field + return nil +} +func (p *TGetTabletReplicaInfosResult_) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} + +func (p *TGetTabletReplicaInfosResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetTabletReplicaInfosResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletReplicaInfos() { + if err = oprot.WriteFieldBegin("tablet_replica_infos", thrift.MAP, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.LIST, len(p.TabletReplicaInfos)); err != nil { + return err + } + for k, v := range p.TabletReplicaInfos { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetTabletReplicaInfosResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTabletReplicaInfosResult_(%+v)", *p) + +} + +func (p *TGetTabletReplicaInfosResult_) DeepEqual(ano *TGetTabletReplicaInfosResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.TabletReplicaInfos) { + return false + } + if !p.Field3DeepEqual(ano.Token) { + return false + } + return true +} + +func (p *TGetTabletReplicaInfosResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TGetTabletReplicaInfosResult_) Field2DeepEqual(src map[int64][]*types.TReplicaInfo) bool { + + if len(p.TabletReplicaInfos) != len(src) { + return false + } + for k, v := range p.TabletReplicaInfos { + _src := src[k] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true +} +func (p *TGetTabletReplicaInfosResult_) Field3DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} + +type TGetSnapshotRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` + Token *string `thrift:"token,6,optional" frugal:"6,optional,string" json:"token,omitempty"` + LabelName *string `thrift:"label_name,7,optional" frugal:"7,optional,string" json:"label_name,omitempty"` + SnapshotName *string `thrift:"snapshot_name,8,optional" frugal:"8,optional,string" json:"snapshot_name,omitempty"` + SnapshotType *TSnapshotType `thrift:"snapshot_type,9,optional" frugal:"9,optional,TSnapshotType" json:"snapshot_type,omitempty"` + EnableCompress *bool `thrift:"enable_compress,10,optional" frugal:"10,optional,bool" json:"enable_compress,omitempty"` +} + +func NewTGetSnapshotRequest() *TGetSnapshotRequest { + return &TGetSnapshotRequest{} +} + +func (p *TGetSnapshotRequest) InitDefault() { +} + +var TGetSnapshotRequest_Cluster_DEFAULT string + +func (p *TGetSnapshotRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGetSnapshotRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +var TGetSnapshotRequest_User_DEFAULT string + +func (p *TGetSnapshotRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TGetSnapshotRequest_User_DEFAULT + } + return *p.User +} + +var TGetSnapshotRequest_Passwd_DEFAULT string + +func (p *TGetSnapshotRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TGetSnapshotRequest_Passwd_DEFAULT + } + return *p.Passwd +} + +var TGetSnapshotRequest_Db_DEFAULT string + +func (p *TGetSnapshotRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TGetSnapshotRequest_Db_DEFAULT + } + return *p.Db +} + +var TGetSnapshotRequest_Table_DEFAULT string + +func (p *TGetSnapshotRequest) GetTable() (v string) { + if !p.IsSetTable() { + return TGetSnapshotRequest_Table_DEFAULT + } + return *p.Table +} + +var TGetSnapshotRequest_Token_DEFAULT string + +func (p *TGetSnapshotRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TGetSnapshotRequest_Token_DEFAULT + } + return *p.Token +} + +var TGetSnapshotRequest_LabelName_DEFAULT string + +func (p *TGetSnapshotRequest) GetLabelName() (v string) { + if !p.IsSetLabelName() { + return TGetSnapshotRequest_LabelName_DEFAULT + } + return *p.LabelName +} + +var TGetSnapshotRequest_SnapshotName_DEFAULT string + +func (p *TGetSnapshotRequest) GetSnapshotName() (v string) { + if !p.IsSetSnapshotName() { + return TGetSnapshotRequest_SnapshotName_DEFAULT + } + return *p.SnapshotName +} + +var TGetSnapshotRequest_SnapshotType_DEFAULT TSnapshotType + +func (p *TGetSnapshotRequest) GetSnapshotType() (v TSnapshotType) { + if !p.IsSetSnapshotType() { + return TGetSnapshotRequest_SnapshotType_DEFAULT + } + return *p.SnapshotType +} + +var TGetSnapshotRequest_EnableCompress_DEFAULT bool + +func (p *TGetSnapshotRequest) GetEnableCompress() (v bool) { + if !p.IsSetEnableCompress() { + return TGetSnapshotRequest_EnableCompress_DEFAULT + } + return *p.EnableCompress +} +func (p *TGetSnapshotRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TGetSnapshotRequest) SetUser(val *string) { + p.User = val +} +func (p *TGetSnapshotRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TGetSnapshotRequest) SetDb(val *string) { + p.Db = val +} +func (p *TGetSnapshotRequest) SetTable(val *string) { + p.Table = val +} +func (p *TGetSnapshotRequest) SetToken(val *string) { + p.Token = val +} +func (p *TGetSnapshotRequest) SetLabelName(val *string) { + p.LabelName = val +} +func (p *TGetSnapshotRequest) SetSnapshotName(val *string) { + p.SnapshotName = val +} +func (p *TGetSnapshotRequest) SetSnapshotType(val *TSnapshotType) { + p.SnapshotType = val +} +func (p *TGetSnapshotRequest) SetEnableCompress(val *bool) { + p.EnableCompress = val +} + +var fieldIDToName_TGetSnapshotRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "table", + 6: "token", + 7: "label_name", + 8: "snapshot_name", + 9: "snapshot_type", + 10: "enable_compress", +} + +func (p *TGetSnapshotRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TGetSnapshotRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TGetSnapshotRequest) IsSetPasswd() bool { + return p.Passwd != nil +} + +func (p *TGetSnapshotRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TGetSnapshotRequest) IsSetTable() bool { + return p.Table != nil +} + +func (p *TGetSnapshotRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TGetSnapshotRequest) IsSetLabelName() bool { + return p.LabelName != nil +} + +func (p *TGetSnapshotRequest) IsSetSnapshotName() bool { + return p.SnapshotName != nil +} + +func (p *TGetSnapshotRequest) IsSetSnapshotType() bool { + return p.SnapshotType != nil +} + +func (p *TGetSnapshotRequest) IsSetEnableCompress() bool { + return p.EnableCompress != nil +} + +func (p *TGetSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.User = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Passwd = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Table = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.LabelName = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SnapshotName = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field *TSnapshotType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TSnapshotType(v) + _field = &tmp + } + p.SnapshotType = _field + return nil +} +func (p *TGetSnapshotRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableCompress = _field + return nil +} + +func (p *TGetSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetSnapshotRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Table); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetLabelName() { + if err = oprot.WriteFieldBegin("label_name", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.LabelName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetSnapshotName() { + if err = oprot.WriteFieldBegin("snapshot_name", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SnapshotName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetSnapshotType() { + if err = oprot.WriteFieldBegin("snapshot_type", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.SnapshotType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableCompress() { + if err = oprot.WriteFieldBegin("enable_compress", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableCompress); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TGetSnapshotRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetSnapshotRequest(%+v)", *p) + +} + +func (p *TGetSnapshotRequest) DeepEqual(ano *TGetSnapshotRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Table) { + return false + } + if !p.Field6DeepEqual(ano.Token) { + return false + } + if !p.Field7DeepEqual(ano.LabelName) { + return false + } + if !p.Field8DeepEqual(ano.SnapshotName) { + return false + } + if !p.Field9DeepEqual(ano.SnapshotType) { + return false + } + if !p.Field10DeepEqual(ano.EnableCompress) { + return false + } + return true +} + +func (p *TGetSnapshotRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field2DeepEqual(src *string) bool { + + if p.User == src { + return true + } else if p.User == nil || src == nil { + return false + } + if strings.Compare(*p.User, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field3DeepEqual(src *string) bool { + + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { + return false + } + if strings.Compare(*p.Passwd, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field4DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field5DeepEqual(src *string) bool { + + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field6DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field7DeepEqual(src *string) bool { + + if p.LabelName == src { + return true + } else if p.LabelName == nil || src == nil { + return false + } + if strings.Compare(*p.LabelName, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field8DeepEqual(src *string) bool { + + if p.SnapshotName == src { + return true + } else if p.SnapshotName == nil || src == nil { + return false + } + if strings.Compare(*p.SnapshotName, *src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field9DeepEqual(src *TSnapshotType) bool { + + if p.SnapshotType == src { + return true + } else if p.SnapshotType == nil || src == nil { + return false + } + if *p.SnapshotType != *src { + return false + } + return true +} +func (p *TGetSnapshotRequest) Field10DeepEqual(src *bool) bool { + + if p.EnableCompress == src { + return true + } else if p.EnableCompress == nil || src == nil { + return false + } + if *p.EnableCompress != *src { + return false + } + return true +} + +type TGetSnapshotResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Meta []byte `thrift:"meta,2,optional" frugal:"2,optional,binary" json:"meta,omitempty"` + JobInfo []byte `thrift:"job_info,3,optional" frugal:"3,optional,binary" json:"job_info,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,4,optional" frugal:"4,optional,types.TNetworkAddress" json:"master_address,omitempty"` + Compressed *bool `thrift:"compressed,5,optional" frugal:"5,optional,bool" json:"compressed,omitempty"` + ExpiredAt *int64 `thrift:"expiredAt,6,optional" frugal:"6,optional,i64" json:"expiredAt,omitempty"` + CommitSeq *int64 `thrift:"commit_seq,7,optional" frugal:"7,optional,i64" json:"commit_seq,omitempty"` +} + +func NewTGetSnapshotResult_() *TGetSnapshotResult_ { + return &TGetSnapshotResult_{} +} + +func (p *TGetSnapshotResult_) InitDefault() { +} + +var TGetSnapshotResult__Status_DEFAULT *status.TStatus + +func (p *TGetSnapshotResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetSnapshotResult__Status_DEFAULT + } + return p.Status +} + +var TGetSnapshotResult__Meta_DEFAULT []byte + +func (p *TGetSnapshotResult_) GetMeta() (v []byte) { + if !p.IsSetMeta() { + return TGetSnapshotResult__Meta_DEFAULT + } + return p.Meta +} + +var TGetSnapshotResult__JobInfo_DEFAULT []byte + +func (p *TGetSnapshotResult_) GetJobInfo() (v []byte) { + if !p.IsSetJobInfo() { + return TGetSnapshotResult__JobInfo_DEFAULT + } + return p.JobInfo +} + +var TGetSnapshotResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TGetSnapshotResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TGetSnapshotResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} + +var TGetSnapshotResult__Compressed_DEFAULT bool + +func (p *TGetSnapshotResult_) GetCompressed() (v bool) { + if !p.IsSetCompressed() { + return TGetSnapshotResult__Compressed_DEFAULT + } + return *p.Compressed +} + +var TGetSnapshotResult__ExpiredAt_DEFAULT int64 + +func (p *TGetSnapshotResult_) GetExpiredAt() (v int64) { + if !p.IsSetExpiredAt() { + return TGetSnapshotResult__ExpiredAt_DEFAULT + } + return *p.ExpiredAt +} + +var TGetSnapshotResult__CommitSeq_DEFAULT int64 + +func (p *TGetSnapshotResult_) GetCommitSeq() (v int64) { + if !p.IsSetCommitSeq() { + return TGetSnapshotResult__CommitSeq_DEFAULT + } + return *p.CommitSeq +} +func (p *TGetSnapshotResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetSnapshotResult_) SetMeta(val []byte) { + p.Meta = val +} +func (p *TGetSnapshotResult_) SetJobInfo(val []byte) { + p.JobInfo = val +} +func (p *TGetSnapshotResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} +func (p *TGetSnapshotResult_) SetCompressed(val *bool) { + p.Compressed = val +} +func (p *TGetSnapshotResult_) SetExpiredAt(val *int64) { + p.ExpiredAt = val +} +func (p *TGetSnapshotResult_) SetCommitSeq(val *int64) { + p.CommitSeq = val +} + +var fieldIDToName_TGetSnapshotResult_ = map[int16]string{ + 1: "status", + 2: "meta", + 3: "job_info", + 4: "master_address", + 5: "compressed", + 6: "expiredAt", + 7: "commit_seq", +} + +func (p *TGetSnapshotResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetSnapshotResult_) IsSetMeta() bool { + return p.Meta != nil +} + +func (p *TGetSnapshotResult_) IsSetJobInfo() bool { + return p.JobInfo != nil +} + +func (p *TGetSnapshotResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TGetSnapshotResult_) IsSetCompressed() bool { + return p.Compressed != nil +} + +func (p *TGetSnapshotResult_) IsSetExpiredAt() bool { + return p.ExpiredAt != nil +} + +func (p *TGetSnapshotResult_) IsSetCommitSeq() bool { + return p.CommitSeq != nil +} + +func (p *TGetSnapshotResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetSnapshotResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetSnapshotResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { + return err + } else { + _field = []byte(v) + } + p.Meta = _field + return nil +} +func (p *TGetSnapshotResult_) ReadField3(iprot thrift.TProtocol) error { + + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { + return err + } else { + _field = []byte(v) + } + p.JobInfo = _field + return nil +} +func (p *TGetSnapshotResult_) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} +func (p *TGetSnapshotResult_) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Compressed = _field + return nil +} +func (p *TGetSnapshotResult_) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ExpiredAt = _field + return nil +} +func (p *TGetSnapshotResult_) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CommitSeq = _field + return nil +} + +func (p *TGetSnapshotResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetSnapshotResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMeta() { + if err = oprot.WriteFieldBegin("meta", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.Meta)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetJobInfo() { + if err = oprot.WriteFieldBegin("job_info", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.JobInfo)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressed() { + if err = oprot.WriteFieldBegin("compressed", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Compressed); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetExpiredAt() { + if err = oprot.WriteFieldBegin("expiredAt", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ExpiredAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetCommitSeq() { + if err = oprot.WriteFieldBegin("commit_seq", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CommitSeq); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TGetSnapshotResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetSnapshotResult_(%+v)", *p) + +} + +func (p *TGetSnapshotResult_) DeepEqual(ano *TGetSnapshotResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Meta) { + return false + } + if !p.Field3DeepEqual(ano.JobInfo) { + return false + } + if !p.Field4DeepEqual(ano.MasterAddress) { + return false + } + if !p.Field5DeepEqual(ano.Compressed) { + return false + } + if !p.Field6DeepEqual(ano.ExpiredAt) { + return false + } + if !p.Field7DeepEqual(ano.CommitSeq) { + return false + } + return true +} + +func (p *TGetSnapshotResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TGetSnapshotResult_) Field2DeepEqual(src []byte) bool { + + if bytes.Compare(p.Meta, src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotResult_) Field3DeepEqual(src []byte) bool { + + if bytes.Compare(p.JobInfo, src) != 0 { + return false + } + return true +} +func (p *TGetSnapshotResult_) Field4DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} +func (p *TGetSnapshotResult_) Field5DeepEqual(src *bool) bool { + + if p.Compressed == src { + return true + } else if p.Compressed == nil || src == nil { + return false + } + if *p.Compressed != *src { + return false + } + return true +} +func (p *TGetSnapshotResult_) Field6DeepEqual(src *int64) bool { + + if p.ExpiredAt == src { + return true + } else if p.ExpiredAt == nil || src == nil { + return false + } + if *p.ExpiredAt != *src { + return false + } + return true +} +func (p *TGetSnapshotResult_) Field7DeepEqual(src *int64) bool { + + if p.CommitSeq == src { + return true + } else if p.CommitSeq == nil || src == nil { + return false + } + if *p.CommitSeq != *src { + return false + } + return true +} + +type TTableRef struct { + Table *string `thrift:"table,1,optional" frugal:"1,optional,string" json:"table,omitempty"` + AliasName *string `thrift:"alias_name,3,optional" frugal:"3,optional,string" json:"alias_name,omitempty"` +} + +func NewTTableRef() *TTableRef { + return &TTableRef{} +} + +func (p *TTableRef) InitDefault() { +} + +var TTableRef_Table_DEFAULT string + +func (p *TTableRef) GetTable() (v string) { + if !p.IsSetTable() { + return TTableRef_Table_DEFAULT + } + return *p.Table +} + +var TTableRef_AliasName_DEFAULT string + +func (p *TTableRef) GetAliasName() (v string) { + if !p.IsSetAliasName() { + return TTableRef_AliasName_DEFAULT + } + return *p.AliasName +} +func (p *TTableRef) SetTable(val *string) { + p.Table = val +} +func (p *TTableRef) SetAliasName(val *string) { + p.AliasName = val +} + +var fieldIDToName_TTableRef = map[int16]string{ + 1: "table", + 3: "alias_name", +} + +func (p *TTableRef) IsSetTable() bool { + return p.Table != nil +} + +func (p *TTableRef) IsSetAliasName() bool { + return p.AliasName != nil +} + +func (p *TTableRef) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableRef[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableRef) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Table = _field + return nil +} +func (p *TTableRef) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AliasName = _field + return nil +} + +func (p *TTableRef) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTableRef"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTableRef) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Table); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTableRef) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetAliasName() { + if err = oprot.WriteFieldBegin("alias_name", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AliasName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TTableRef) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTableRef(%+v)", *p) + +} + +func (p *TTableRef) DeepEqual(ano *TTableRef) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Table) { + return false + } + if !p.Field3DeepEqual(ano.AliasName) { + return false + } + return true +} + +func (p *TTableRef) Field1DeepEqual(src *string) bool { + + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true +} +func (p *TTableRef) Field3DeepEqual(src *string) bool { + + if p.AliasName == src { + return true + } else if p.AliasName == nil || src == nil { + return false + } + if strings.Compare(*p.AliasName, *src) != 0 { + return false + } + return true +} + +type TRestoreSnapshotRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` + Token *string `thrift:"token,6,optional" frugal:"6,optional,string" json:"token,omitempty"` + LabelName *string `thrift:"label_name,7,optional" frugal:"7,optional,string" json:"label_name,omitempty"` + RepoName *string `thrift:"repo_name,8,optional" frugal:"8,optional,string" json:"repo_name,omitempty"` + TableRefs []*TTableRef `thrift:"table_refs,9,optional" frugal:"9,optional,list" json:"table_refs,omitempty"` + Properties map[string]string `thrift:"properties,10,optional" frugal:"10,optional,map" json:"properties,omitempty"` + Meta []byte `thrift:"meta,11,optional" frugal:"11,optional,binary" json:"meta,omitempty"` + JobInfo []byte `thrift:"job_info,12,optional" frugal:"12,optional,binary" json:"job_info,omitempty"` + CleanTables *bool `thrift:"clean_tables,13,optional" frugal:"13,optional,bool" json:"clean_tables,omitempty"` + CleanPartitions *bool `thrift:"clean_partitions,14,optional" frugal:"14,optional,bool" json:"clean_partitions,omitempty"` + AtomicRestore *bool `thrift:"atomic_restore,15,optional" frugal:"15,optional,bool" json:"atomic_restore,omitempty"` + Compressed *bool `thrift:"compressed,16,optional" frugal:"16,optional,bool" json:"compressed,omitempty"` +} + +func NewTRestoreSnapshotRequest() *TRestoreSnapshotRequest { + return &TRestoreSnapshotRequest{} +} + +func (p *TRestoreSnapshotRequest) InitDefault() { +} + +var TRestoreSnapshotRequest_Cluster_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TRestoreSnapshotRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +var TRestoreSnapshotRequest_User_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TRestoreSnapshotRequest_User_DEFAULT + } + return *p.User +} + +var TRestoreSnapshotRequest_Passwd_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TRestoreSnapshotRequest_Passwd_DEFAULT + } + return *p.Passwd +} + +var TRestoreSnapshotRequest_Db_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetDb() (v string) { + if !p.IsSetDb() { + return TRestoreSnapshotRequest_Db_DEFAULT + } + return *p.Db +} + +var TRestoreSnapshotRequest_Table_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetTable() (v string) { + if !p.IsSetTable() { + return TRestoreSnapshotRequest_Table_DEFAULT + } + return *p.Table +} + +var TRestoreSnapshotRequest_Token_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TRestoreSnapshotRequest_Token_DEFAULT + } + return *p.Token +} + +var TRestoreSnapshotRequest_LabelName_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetLabelName() (v string) { + if !p.IsSetLabelName() { + return TRestoreSnapshotRequest_LabelName_DEFAULT + } + return *p.LabelName +} + +var TRestoreSnapshotRequest_RepoName_DEFAULT string + +func (p *TRestoreSnapshotRequest) GetRepoName() (v string) { + if !p.IsSetRepoName() { + return TRestoreSnapshotRequest_RepoName_DEFAULT + } + return *p.RepoName +} + +var TRestoreSnapshotRequest_TableRefs_DEFAULT []*TTableRef + +func (p *TRestoreSnapshotRequest) GetTableRefs() (v []*TTableRef) { + if !p.IsSetTableRefs() { + return TRestoreSnapshotRequest_TableRefs_DEFAULT + } + return p.TableRefs +} + +var TRestoreSnapshotRequest_Properties_DEFAULT map[string]string + +func (p *TRestoreSnapshotRequest) GetProperties() (v map[string]string) { + if !p.IsSetProperties() { + return TRestoreSnapshotRequest_Properties_DEFAULT + } + return p.Properties +} + +var TRestoreSnapshotRequest_Meta_DEFAULT []byte + +func (p *TRestoreSnapshotRequest) GetMeta() (v []byte) { + if !p.IsSetMeta() { + return TRestoreSnapshotRequest_Meta_DEFAULT + } + return p.Meta +} + +var TRestoreSnapshotRequest_JobInfo_DEFAULT []byte + +func (p *TRestoreSnapshotRequest) GetJobInfo() (v []byte) { + if !p.IsSetJobInfo() { + return TRestoreSnapshotRequest_JobInfo_DEFAULT + } + return p.JobInfo +} + +var TRestoreSnapshotRequest_CleanTables_DEFAULT bool + +func (p *TRestoreSnapshotRequest) GetCleanTables() (v bool) { + if !p.IsSetCleanTables() { + return TRestoreSnapshotRequest_CleanTables_DEFAULT + } + return *p.CleanTables +} + +var TRestoreSnapshotRequest_CleanPartitions_DEFAULT bool + +func (p *TRestoreSnapshotRequest) GetCleanPartitions() (v bool) { + if !p.IsSetCleanPartitions() { + return TRestoreSnapshotRequest_CleanPartitions_DEFAULT + } + return *p.CleanPartitions +} + +var TRestoreSnapshotRequest_AtomicRestore_DEFAULT bool + +func (p *TRestoreSnapshotRequest) GetAtomicRestore() (v bool) { + if !p.IsSetAtomicRestore() { + return TRestoreSnapshotRequest_AtomicRestore_DEFAULT + } + return *p.AtomicRestore +} + +var TRestoreSnapshotRequest_Compressed_DEFAULT bool + +func (p *TRestoreSnapshotRequest) GetCompressed() (v bool) { + if !p.IsSetCompressed() { + return TRestoreSnapshotRequest_Compressed_DEFAULT + } + return *p.Compressed +} +func (p *TRestoreSnapshotRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TRestoreSnapshotRequest) SetUser(val *string) { + p.User = val +} +func (p *TRestoreSnapshotRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TRestoreSnapshotRequest) SetDb(val *string) { + p.Db = val +} +func (p *TRestoreSnapshotRequest) SetTable(val *string) { + p.Table = val +} +func (p *TRestoreSnapshotRequest) SetToken(val *string) { + p.Token = val +} +func (p *TRestoreSnapshotRequest) SetLabelName(val *string) { + p.LabelName = val +} +func (p *TRestoreSnapshotRequest) SetRepoName(val *string) { + p.RepoName = val +} +func (p *TRestoreSnapshotRequest) SetTableRefs(val []*TTableRef) { + p.TableRefs = val +} +func (p *TRestoreSnapshotRequest) SetProperties(val map[string]string) { + p.Properties = val +} +func (p *TRestoreSnapshotRequest) SetMeta(val []byte) { + p.Meta = val +} +func (p *TRestoreSnapshotRequest) SetJobInfo(val []byte) { + p.JobInfo = val +} +func (p *TRestoreSnapshotRequest) SetCleanTables(val *bool) { + p.CleanTables = val +} +func (p *TRestoreSnapshotRequest) SetCleanPartitions(val *bool) { + p.CleanPartitions = val +} +func (p *TRestoreSnapshotRequest) SetAtomicRestore(val *bool) { + p.AtomicRestore = val +} +func (p *TRestoreSnapshotRequest) SetCompressed(val *bool) { + p.Compressed = val +} + +var fieldIDToName_TRestoreSnapshotRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "db", + 5: "table", + 6: "token", + 7: "label_name", + 8: "repo_name", + 9: "table_refs", + 10: "properties", + 11: "meta", + 12: "job_info", + 13: "clean_tables", + 14: "clean_partitions", + 15: "atomic_restore", + 16: "compressed", +} + +func (p *TRestoreSnapshotRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TRestoreSnapshotRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TRestoreSnapshotRequest) IsSetPasswd() bool { + return p.Passwd != nil +} + +func (p *TRestoreSnapshotRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TRestoreSnapshotRequest) IsSetTable() bool { + return p.Table != nil +} + +func (p *TRestoreSnapshotRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TRestoreSnapshotRequest) IsSetLabelName() bool { + return p.LabelName != nil +} + +func (p *TRestoreSnapshotRequest) IsSetRepoName() bool { + return p.RepoName != nil +} + +func (p *TRestoreSnapshotRequest) IsSetTableRefs() bool { + return p.TableRefs != nil +} + +func (p *TRestoreSnapshotRequest) IsSetProperties() bool { + return p.Properties != nil +} + +func (p *TRestoreSnapshotRequest) IsSetMeta() bool { + return p.Meta != nil +} + +func (p *TRestoreSnapshotRequest) IsSetJobInfo() bool { + return p.JobInfo != nil +} + +func (p *TRestoreSnapshotRequest) IsSetCleanTables() bool { + return p.CleanTables != nil +} + +func (p *TRestoreSnapshotRequest) IsSetCleanPartitions() bool { + return p.CleanPartitions != nil +} + +func (p *TRestoreSnapshotRequest) IsSetAtomicRestore() bool { + return p.AtomicRestore != nil +} + +func (p *TRestoreSnapshotRequest) IsSetCompressed() bool { + return p.Compressed != nil +} + +func (p *TRestoreSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.LIST { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.MAP { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.User = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Passwd = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Table = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.LabelName = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.RepoName = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTableRef, 0, size) + values := make([]TTableRef, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TableRefs = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField10(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.Properties = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { + return err + } else { + _field = []byte(v) + } + p.Meta = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { + return err + } else { + _field = []byte(v) + } + p.JobInfo = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.CleanTables = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField14(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.CleanPartitions = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField15(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.AtomicRestore = _field + return nil +} +func (p *TRestoreSnapshotRequest) ReadField16(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Compressed = _field + return nil +} + +func (p *TRestoreSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRestoreSnapshotRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { fieldId = 9 goto WriteFieldError } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Table); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetLabelName() { + if err = oprot.WriteFieldBegin("label_name", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.LabelName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetRepoName() { + if err = oprot.WriteFieldBegin("repo_name", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.RepoName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTableRefs() { + if err = oprot.WriteFieldBegin("table_refs", thrift.LIST, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableRefs)); err != nil { + return err + } + for _, v := range p.TableRefs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetProperties() { + if err = oprot.WriteFieldBegin("properties", thrift.MAP, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + return err + } + for k, v := range p.Properties { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetMeta() { + if err = oprot.WriteFieldBegin("meta", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.Meta)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetJobInfo() { + if err = oprot.WriteFieldBegin("job_info", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.JobInfo)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetCleanTables() { + if err = oprot.WriteFieldBegin("clean_tables", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.CleanTables); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetCleanPartitions() { + if err = oprot.WriteFieldBegin("clean_partitions", thrift.BOOL, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.CleanPartitions); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetAtomicRestore() { + if err = oprot.WriteFieldBegin("atomic_restore", thrift.BOOL, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.AtomicRestore); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressed() { + if err = oprot.WriteFieldBegin("compressed", thrift.BOOL, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Compressed); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TRestoreSnapshotRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRestoreSnapshotRequest(%+v)", *p) + +} + +func (p *TRestoreSnapshotRequest) DeepEqual(ano *TRestoreSnapshotRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Table) { + return false + } + if !p.Field6DeepEqual(ano.Token) { + return false + } + if !p.Field7DeepEqual(ano.LabelName) { + return false + } + if !p.Field8DeepEqual(ano.RepoName) { + return false + } + if !p.Field9DeepEqual(ano.TableRefs) { + return false + } + if !p.Field10DeepEqual(ano.Properties) { + return false + } + if !p.Field11DeepEqual(ano.Meta) { + return false + } + if !p.Field12DeepEqual(ano.JobInfo) { + return false + } + if !p.Field13DeepEqual(ano.CleanTables) { + return false + } + if !p.Field14DeepEqual(ano.CleanPartitions) { + return false + } + if !p.Field15DeepEqual(ano.AtomicRestore) { + return false + } + if !p.Field16DeepEqual(ano.Compressed) { + return false + } + return true +} + +func (p *TRestoreSnapshotRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field2DeepEqual(src *string) bool { + + if p.User == src { + return true + } else if p.User == nil || src == nil { + return false + } + if strings.Compare(*p.User, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field3DeepEqual(src *string) bool { + + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { + return false + } + if strings.Compare(*p.Passwd, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field4DeepEqual(src *string) bool { + + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false + } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field5DeepEqual(src *string) bool { + + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field6DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field7DeepEqual(src *string) bool { + + if p.LabelName == src { + return true + } else if p.LabelName == nil || src == nil { + return false + } + if strings.Compare(*p.LabelName, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field8DeepEqual(src *string) bool { + + if p.RepoName == src { + return true + } else if p.RepoName == nil || src == nil { + return false + } + if strings.Compare(*p.RepoName, *src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field9DeepEqual(src []*TTableRef) bool { + + if len(p.TableRefs) != len(src) { + return false + } + for i, v := range p.TableRefs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TRestoreSnapshotRequest) Field10DeepEqual(src map[string]string) bool { + + if len(p.Properties) != len(src) { + return false + } + for k, v := range p.Properties { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TRestoreSnapshotRequest) Field11DeepEqual(src []byte) bool { + + if bytes.Compare(p.Meta, src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field12DeepEqual(src []byte) bool { + + if bytes.Compare(p.JobInfo, src) != 0 { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field13DeepEqual(src *bool) bool { + + if p.CleanTables == src { + return true + } else if p.CleanTables == nil || src == nil { + return false + } + if *p.CleanTables != *src { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field14DeepEqual(src *bool) bool { + + if p.CleanPartitions == src { + return true + } else if p.CleanPartitions == nil || src == nil { + return false + } + if *p.CleanPartitions != *src { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field15DeepEqual(src *bool) bool { + + if p.AtomicRestore == src { + return true + } else if p.AtomicRestore == nil || src == nil { + return false + } + if *p.AtomicRestore != *src { + return false + } + return true +} +func (p *TRestoreSnapshotRequest) Field16DeepEqual(src *bool) bool { + + if p.Compressed == src { + return true + } else if p.Compressed == nil || src == nil { + return false + } + if *p.Compressed != *src { + return false + } + return true +} + +type TRestoreSnapshotResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,2,optional" frugal:"2,optional,types.TNetworkAddress" json:"master_address,omitempty"` +} + +func NewTRestoreSnapshotResult_() *TRestoreSnapshotResult_ { + return &TRestoreSnapshotResult_{} +} + +func (p *TRestoreSnapshotResult_) InitDefault() { +} + +var TRestoreSnapshotResult__Status_DEFAULT *status.TStatus + +func (p *TRestoreSnapshotResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TRestoreSnapshotResult__Status_DEFAULT + } + return p.Status +} + +var TRestoreSnapshotResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TRestoreSnapshotResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TRestoreSnapshotResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TRestoreSnapshotResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TRestoreSnapshotResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} + +var fieldIDToName_TRestoreSnapshotResult_ = map[int16]string{ + 1: "status", + 2: "master_address", +} + +func (p *TRestoreSnapshotResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TRestoreSnapshotResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TRestoreSnapshotResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRestoreSnapshotResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TRestoreSnapshotResult_) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} + +func (p *TRestoreSnapshotResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRestoreSnapshotResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TRestoreSnapshotResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TRestoreSnapshotResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TRestoreSnapshotResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRestoreSnapshotResult_(%+v)", *p) + +} + +func (p *TRestoreSnapshotResult_) DeepEqual(ano *TRestoreSnapshotResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.MasterAddress) { + return false + } + return true +} + +func (p *TRestoreSnapshotResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TRestoreSnapshotResult_) Field2DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} + +type TPlsqlStoredProcedure struct { + Name *string `thrift:"name,1,optional" frugal:"1,optional,string" json:"name,omitempty"` + CatalogId *int64 `thrift:"catalogId,2,optional" frugal:"2,optional,i64" json:"catalogId,omitempty"` + DbId *int64 `thrift:"dbId,3,optional" frugal:"3,optional,i64" json:"dbId,omitempty"` + PackageName *string `thrift:"packageName,4,optional" frugal:"4,optional,string" json:"packageName,omitempty"` + OwnerName *string `thrift:"ownerName,5,optional" frugal:"5,optional,string" json:"ownerName,omitempty"` + Source *string `thrift:"source,6,optional" frugal:"6,optional,string" json:"source,omitempty"` + CreateTime *string `thrift:"createTime,7,optional" frugal:"7,optional,string" json:"createTime,omitempty"` + ModifyTime *string `thrift:"modifyTime,8,optional" frugal:"8,optional,string" json:"modifyTime,omitempty"` +} + +func NewTPlsqlStoredProcedure() *TPlsqlStoredProcedure { + return &TPlsqlStoredProcedure{} +} + +func (p *TPlsqlStoredProcedure) InitDefault() { +} + +var TPlsqlStoredProcedure_Name_DEFAULT string + +func (p *TPlsqlStoredProcedure) GetName() (v string) { + if !p.IsSetName() { + return TPlsqlStoredProcedure_Name_DEFAULT + } + return *p.Name +} + +var TPlsqlStoredProcedure_CatalogId_DEFAULT int64 + +func (p *TPlsqlStoredProcedure) GetCatalogId() (v int64) { + if !p.IsSetCatalogId() { + return TPlsqlStoredProcedure_CatalogId_DEFAULT + } + return *p.CatalogId +} + +var TPlsqlStoredProcedure_DbId_DEFAULT int64 + +func (p *TPlsqlStoredProcedure) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TPlsqlStoredProcedure_DbId_DEFAULT + } + return *p.DbId +} + +var TPlsqlStoredProcedure_PackageName_DEFAULT string + +func (p *TPlsqlStoredProcedure) GetPackageName() (v string) { + if !p.IsSetPackageName() { + return TPlsqlStoredProcedure_PackageName_DEFAULT + } + return *p.PackageName +} + +var TPlsqlStoredProcedure_OwnerName_DEFAULT string + +func (p *TPlsqlStoredProcedure) GetOwnerName() (v string) { + if !p.IsSetOwnerName() { + return TPlsqlStoredProcedure_OwnerName_DEFAULT + } + return *p.OwnerName +} + +var TPlsqlStoredProcedure_Source_DEFAULT string + +func (p *TPlsqlStoredProcedure) GetSource() (v string) { + if !p.IsSetSource() { + return TPlsqlStoredProcedure_Source_DEFAULT + } + return *p.Source +} + +var TPlsqlStoredProcedure_CreateTime_DEFAULT string + +func (p *TPlsqlStoredProcedure) GetCreateTime() (v string) { + if !p.IsSetCreateTime() { + return TPlsqlStoredProcedure_CreateTime_DEFAULT + } + return *p.CreateTime +} + +var TPlsqlStoredProcedure_ModifyTime_DEFAULT string + +func (p *TPlsqlStoredProcedure) GetModifyTime() (v string) { + if !p.IsSetModifyTime() { + return TPlsqlStoredProcedure_ModifyTime_DEFAULT + } + return *p.ModifyTime +} +func (p *TPlsqlStoredProcedure) SetName(val *string) { + p.Name = val +} +func (p *TPlsqlStoredProcedure) SetCatalogId(val *int64) { + p.CatalogId = val +} +func (p *TPlsqlStoredProcedure) SetDbId(val *int64) { + p.DbId = val +} +func (p *TPlsqlStoredProcedure) SetPackageName(val *string) { + p.PackageName = val +} +func (p *TPlsqlStoredProcedure) SetOwnerName(val *string) { + p.OwnerName = val +} +func (p *TPlsqlStoredProcedure) SetSource(val *string) { + p.Source = val +} +func (p *TPlsqlStoredProcedure) SetCreateTime(val *string) { + p.CreateTime = val +} +func (p *TPlsqlStoredProcedure) SetModifyTime(val *string) { + p.ModifyTime = val +} + +var fieldIDToName_TPlsqlStoredProcedure = map[int16]string{ + 1: "name", + 2: "catalogId", + 3: "dbId", + 4: "packageName", + 5: "ownerName", + 6: "source", + 7: "createTime", + 8: "modifyTime", +} + +func (p *TPlsqlStoredProcedure) IsSetName() bool { + return p.Name != nil +} + +func (p *TPlsqlStoredProcedure) IsSetCatalogId() bool { + return p.CatalogId != nil +} + +func (p *TPlsqlStoredProcedure) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TPlsqlStoredProcedure) IsSetPackageName() bool { + return p.PackageName != nil +} + +func (p *TPlsqlStoredProcedure) IsSetOwnerName() bool { + return p.OwnerName != nil +} + +func (p *TPlsqlStoredProcedure) IsSetSource() bool { + return p.Source != nil +} + +func (p *TPlsqlStoredProcedure) IsSetCreateTime() bool { + return p.CreateTime != nil +} + +func (p *TPlsqlStoredProcedure) IsSetModifyTime() bool { + return p.ModifyTime != nil +} + +func (p *TPlsqlStoredProcedure) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlStoredProcedure[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CatalogId = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.PackageName = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OwnerName = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Source = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CreateTime = _field + return nil +} +func (p *TPlsqlStoredProcedure) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ModifyTime = _field + return nil +} + +func (p *TPlsqlStoredProcedure) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPlsqlStoredProcedure"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalogId", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CatalogId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPackageName() { + if err = oprot.WriteFieldBegin("packageName", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.PackageName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetOwnerName() { + if err = oprot.WriteFieldBegin("ownerName", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OwnerName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetSource() { + if err = oprot.WriteFieldBegin("source", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Source); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetCreateTime() { + if err = oprot.WriteFieldBegin("createTime", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CreateTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetModifyTime() { + if err = oprot.WriteFieldBegin("modifyTime", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ModifyTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedure) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPlsqlStoredProcedure(%+v)", *p) + +} + +func (p *TPlsqlStoredProcedure) DeepEqual(ano *TPlsqlStoredProcedure) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Name) { + return false + } + if !p.Field2DeepEqual(ano.CatalogId) { + return false + } + if !p.Field3DeepEqual(ano.DbId) { + return false + } + if !p.Field4DeepEqual(ano.PackageName) { + return false + } + if !p.Field5DeepEqual(ano.OwnerName) { + return false + } + if !p.Field6DeepEqual(ano.Source) { + return false + } + if !p.Field7DeepEqual(ano.CreateTime) { + return false + } + if !p.Field8DeepEqual(ano.ModifyTime) { + return false + } + return true +} + +func (p *TPlsqlStoredProcedure) Field1DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field2DeepEqual(src *int64) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if *p.CatalogId != *src { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field3DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field4DeepEqual(src *string) bool { + + if p.PackageName == src { + return true + } else if p.PackageName == nil || src == nil { + return false + } + if strings.Compare(*p.PackageName, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field5DeepEqual(src *string) bool { + + if p.OwnerName == src { + return true + } else if p.OwnerName == nil || src == nil { + return false + } + if strings.Compare(*p.OwnerName, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field6DeepEqual(src *string) bool { + + if p.Source == src { + return true + } else if p.Source == nil || src == nil { + return false + } + if strings.Compare(*p.Source, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field7DeepEqual(src *string) bool { + + if p.CreateTime == src { + return true + } else if p.CreateTime == nil || src == nil { + return false + } + if strings.Compare(*p.CreateTime, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlStoredProcedure) Field8DeepEqual(src *string) bool { + + if p.ModifyTime == src { + return true + } else if p.ModifyTime == nil || src == nil { + return false + } + if strings.Compare(*p.ModifyTime, *src) != 0 { + return false + } + return true +} + +type TPlsqlPackage struct { + Name *string `thrift:"name,1,optional" frugal:"1,optional,string" json:"name,omitempty"` + CatalogId *int64 `thrift:"catalogId,2,optional" frugal:"2,optional,i64" json:"catalogId,omitempty"` + DbId *int64 `thrift:"dbId,3,optional" frugal:"3,optional,i64" json:"dbId,omitempty"` + OwnerName *string `thrift:"ownerName,4,optional" frugal:"4,optional,string" json:"ownerName,omitempty"` + Header *string `thrift:"header,5,optional" frugal:"5,optional,string" json:"header,omitempty"` + Body *string `thrift:"body,6,optional" frugal:"6,optional,string" json:"body,omitempty"` +} + +func NewTPlsqlPackage() *TPlsqlPackage { + return &TPlsqlPackage{} +} + +func (p *TPlsqlPackage) InitDefault() { +} + +var TPlsqlPackage_Name_DEFAULT string + +func (p *TPlsqlPackage) GetName() (v string) { + if !p.IsSetName() { + return TPlsqlPackage_Name_DEFAULT + } + return *p.Name +} + +var TPlsqlPackage_CatalogId_DEFAULT int64 + +func (p *TPlsqlPackage) GetCatalogId() (v int64) { + if !p.IsSetCatalogId() { + return TPlsqlPackage_CatalogId_DEFAULT + } + return *p.CatalogId +} + +var TPlsqlPackage_DbId_DEFAULT int64 + +func (p *TPlsqlPackage) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TPlsqlPackage_DbId_DEFAULT + } + return *p.DbId +} + +var TPlsqlPackage_OwnerName_DEFAULT string + +func (p *TPlsqlPackage) GetOwnerName() (v string) { + if !p.IsSetOwnerName() { + return TPlsqlPackage_OwnerName_DEFAULT + } + return *p.OwnerName +} + +var TPlsqlPackage_Header_DEFAULT string + +func (p *TPlsqlPackage) GetHeader() (v string) { + if !p.IsSetHeader() { + return TPlsqlPackage_Header_DEFAULT + } + return *p.Header +} + +var TPlsqlPackage_Body_DEFAULT string + +func (p *TPlsqlPackage) GetBody() (v string) { + if !p.IsSetBody() { + return TPlsqlPackage_Body_DEFAULT + } + return *p.Body +} +func (p *TPlsqlPackage) SetName(val *string) { + p.Name = val +} +func (p *TPlsqlPackage) SetCatalogId(val *int64) { + p.CatalogId = val +} +func (p *TPlsqlPackage) SetDbId(val *int64) { + p.DbId = val +} +func (p *TPlsqlPackage) SetOwnerName(val *string) { + p.OwnerName = val +} +func (p *TPlsqlPackage) SetHeader(val *string) { + p.Header = val +} +func (p *TPlsqlPackage) SetBody(val *string) { + p.Body = val +} + +var fieldIDToName_TPlsqlPackage = map[int16]string{ + 1: "name", + 2: "catalogId", + 3: "dbId", + 4: "ownerName", + 5: "header", + 6: "body", +} + +func (p *TPlsqlPackage) IsSetName() bool { + return p.Name != nil +} + +func (p *TPlsqlPackage) IsSetCatalogId() bool { + return p.CatalogId != nil +} + +func (p *TPlsqlPackage) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TPlsqlPackage) IsSetOwnerName() bool { + return p.OwnerName != nil +} + +func (p *TPlsqlPackage) IsSetHeader() bool { + return p.Header != nil +} + +func (p *TPlsqlPackage) IsSetBody() bool { + return p.Body != nil +} + +func (p *TPlsqlPackage) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlPackage[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPlsqlPackage) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *TPlsqlPackage) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CatalogId = _field + return nil +} +func (p *TPlsqlPackage) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TPlsqlPackage) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OwnerName = _field + return nil +} +func (p *TPlsqlPackage) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Header = _field + return nil +} +func (p *TPlsqlPackage) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Body = _field + return nil +} + +func (p *TPlsqlPackage) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPlsqlPackage"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPlsqlPackage) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPlsqlPackage) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalogId", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CatalogId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPlsqlPackage) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPlsqlPackage) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetOwnerName() { + if err = oprot.WriteFieldBegin("ownerName", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OwnerName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPlsqlPackage) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHeader() { + if err = oprot.WriteFieldBegin("header", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Header); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TPlsqlPackage) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetBody() { + if err = oprot.WriteFieldBegin("body", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Body); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TPlsqlPackage) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPlsqlPackage(%+v)", *p) + +} + +func (p *TPlsqlPackage) DeepEqual(ano *TPlsqlPackage) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Name) { + return false + } + if !p.Field2DeepEqual(ano.CatalogId) { + return false + } + if !p.Field3DeepEqual(ano.DbId) { + return false + } + if !p.Field4DeepEqual(ano.OwnerName) { + return false + } + if !p.Field5DeepEqual(ano.Header) { + return false + } + if !p.Field6DeepEqual(ano.Body) { + return false + } + return true +} + +func (p *TPlsqlPackage) Field1DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlPackage) Field2DeepEqual(src *int64) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if *p.CatalogId != *src { + return false + } + return true +} +func (p *TPlsqlPackage) Field3DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} +func (p *TPlsqlPackage) Field4DeepEqual(src *string) bool { + + if p.OwnerName == src { + return true + } else if p.OwnerName == nil || src == nil { + return false + } + if strings.Compare(*p.OwnerName, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlPackage) Field5DeepEqual(src *string) bool { + + if p.Header == src { + return true + } else if p.Header == nil || src == nil { + return false + } + if strings.Compare(*p.Header, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlPackage) Field6DeepEqual(src *string) bool { + + if p.Body == src { + return true + } else if p.Body == nil || src == nil { + return false + } + if strings.Compare(*p.Body, *src) != 0 { + return false + } + return true +} + +type TPlsqlProcedureKey struct { + Name *string `thrift:"name,1,optional" frugal:"1,optional,string" json:"name,omitempty"` + CatalogId *int64 `thrift:"catalogId,2,optional" frugal:"2,optional,i64" json:"catalogId,omitempty"` + DbId *int64 `thrift:"dbId,3,optional" frugal:"3,optional,i64" json:"dbId,omitempty"` +} + +func NewTPlsqlProcedureKey() *TPlsqlProcedureKey { + return &TPlsqlProcedureKey{} +} + +func (p *TPlsqlProcedureKey) InitDefault() { +} + +var TPlsqlProcedureKey_Name_DEFAULT string + +func (p *TPlsqlProcedureKey) GetName() (v string) { + if !p.IsSetName() { + return TPlsqlProcedureKey_Name_DEFAULT + } + return *p.Name +} + +var TPlsqlProcedureKey_CatalogId_DEFAULT int64 + +func (p *TPlsqlProcedureKey) GetCatalogId() (v int64) { + if !p.IsSetCatalogId() { + return TPlsqlProcedureKey_CatalogId_DEFAULT + } + return *p.CatalogId +} + +var TPlsqlProcedureKey_DbId_DEFAULT int64 + +func (p *TPlsqlProcedureKey) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TPlsqlProcedureKey_DbId_DEFAULT + } + return *p.DbId +} +func (p *TPlsqlProcedureKey) SetName(val *string) { + p.Name = val +} +func (p *TPlsqlProcedureKey) SetCatalogId(val *int64) { + p.CatalogId = val +} +func (p *TPlsqlProcedureKey) SetDbId(val *int64) { + p.DbId = val +} + +var fieldIDToName_TPlsqlProcedureKey = map[int16]string{ + 1: "name", + 2: "catalogId", + 3: "dbId", +} + +func (p *TPlsqlProcedureKey) IsSetName() bool { + return p.Name != nil +} + +func (p *TPlsqlProcedureKey) IsSetCatalogId() bool { + return p.CatalogId != nil +} + +func (p *TPlsqlProcedureKey) IsSetDbId() bool { + return p.DbId != nil +} + +func (p *TPlsqlProcedureKey) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlProcedureKey[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPlsqlProcedureKey) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *TPlsqlProcedureKey) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CatalogId = _field + return nil +} +func (p *TPlsqlProcedureKey) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} + +func (p *TPlsqlProcedureKey) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPlsqlProcedureKey"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPlsqlProcedureKey) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPlsqlProcedureKey) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalogId", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CatalogId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPlsqlProcedureKey) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPlsqlProcedureKey) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPlsqlProcedureKey(%+v)", *p) + +} + +func (p *TPlsqlProcedureKey) DeepEqual(ano *TPlsqlProcedureKey) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Name) { + return false + } + if !p.Field2DeepEqual(ano.CatalogId) { + return false + } + if !p.Field3DeepEqual(ano.DbId) { + return false + } + return true +} + +func (p *TPlsqlProcedureKey) Field1DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *TPlsqlProcedureKey) Field2DeepEqual(src *int64) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if *p.CatalogId != *src { + return false + } + return true +} +func (p *TPlsqlProcedureKey) Field3DeepEqual(src *int64) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { + return false + } + return true +} + +type TAddPlsqlStoredProcedureRequest struct { + PlsqlStoredProcedure *TPlsqlStoredProcedure `thrift:"plsqlStoredProcedure,1,optional" frugal:"1,optional,TPlsqlStoredProcedure" json:"plsqlStoredProcedure,omitempty"` + IsForce *bool `thrift:"isForce,2,optional" frugal:"2,optional,bool" json:"isForce,omitempty"` +} + +func NewTAddPlsqlStoredProcedureRequest() *TAddPlsqlStoredProcedureRequest { + return &TAddPlsqlStoredProcedureRequest{} +} + +func (p *TAddPlsqlStoredProcedureRequest) InitDefault() { +} + +var TAddPlsqlStoredProcedureRequest_PlsqlStoredProcedure_DEFAULT *TPlsqlStoredProcedure + +func (p *TAddPlsqlStoredProcedureRequest) GetPlsqlStoredProcedure() (v *TPlsqlStoredProcedure) { + if !p.IsSetPlsqlStoredProcedure() { + return TAddPlsqlStoredProcedureRequest_PlsqlStoredProcedure_DEFAULT + } + return p.PlsqlStoredProcedure +} + +var TAddPlsqlStoredProcedureRequest_IsForce_DEFAULT bool + +func (p *TAddPlsqlStoredProcedureRequest) GetIsForce() (v bool) { + if !p.IsSetIsForce() { + return TAddPlsqlStoredProcedureRequest_IsForce_DEFAULT + } + return *p.IsForce +} +func (p *TAddPlsqlStoredProcedureRequest) SetPlsqlStoredProcedure(val *TPlsqlStoredProcedure) { + p.PlsqlStoredProcedure = val +} +func (p *TAddPlsqlStoredProcedureRequest) SetIsForce(val *bool) { + p.IsForce = val +} + +var fieldIDToName_TAddPlsqlStoredProcedureRequest = map[int16]string{ + 1: "plsqlStoredProcedure", + 2: "isForce", +} + +func (p *TAddPlsqlStoredProcedureRequest) IsSetPlsqlStoredProcedure() bool { + return p.PlsqlStoredProcedure != nil +} + +func (p *TAddPlsqlStoredProcedureRequest) IsSetIsForce() bool { + return p.IsForce != nil +} + +func (p *TAddPlsqlStoredProcedureRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddPlsqlStoredProcedureRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TAddPlsqlStoredProcedureRequest) ReadField1(iprot thrift.TProtocol) error { + _field := NewTPlsqlStoredProcedure() + if err := _field.Read(iprot); err != nil { + return err + } + p.PlsqlStoredProcedure = _field + return nil +} +func (p *TAddPlsqlStoredProcedureRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsForce = _field + return nil +} + +func (p *TAddPlsqlStoredProcedureRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TAddPlsqlStoredProcedureRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TAddPlsqlStoredProcedureRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPlsqlStoredProcedure() { + if err = oprot.WriteFieldBegin("plsqlStoredProcedure", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.PlsqlStoredProcedure.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TAddPlsqlStoredProcedureRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIsForce() { + if err = oprot.WriteFieldBegin("isForce", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsForce); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TAddPlsqlStoredProcedureRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TAddPlsqlStoredProcedureRequest(%+v)", *p) + +} + +func (p *TAddPlsqlStoredProcedureRequest) DeepEqual(ano *TAddPlsqlStoredProcedureRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PlsqlStoredProcedure) { + return false + } + if !p.Field2DeepEqual(ano.IsForce) { + return false + } + return true +} + +func (p *TAddPlsqlStoredProcedureRequest) Field1DeepEqual(src *TPlsqlStoredProcedure) bool { + + if !p.PlsqlStoredProcedure.DeepEqual(src) { + return false + } + return true +} +func (p *TAddPlsqlStoredProcedureRequest) Field2DeepEqual(src *bool) bool { + + if p.IsForce == src { + return true + } else if p.IsForce == nil || src == nil { + return false + } + if *p.IsForce != *src { + return false + } + return true +} + +type TDropPlsqlStoredProcedureRequest struct { + PlsqlProcedureKey *TPlsqlProcedureKey `thrift:"plsqlProcedureKey,1,optional" frugal:"1,optional,TPlsqlProcedureKey" json:"plsqlProcedureKey,omitempty"` +} + +func NewTDropPlsqlStoredProcedureRequest() *TDropPlsqlStoredProcedureRequest { + return &TDropPlsqlStoredProcedureRequest{} +} + +func (p *TDropPlsqlStoredProcedureRequest) InitDefault() { +} + +var TDropPlsqlStoredProcedureRequest_PlsqlProcedureKey_DEFAULT *TPlsqlProcedureKey + +func (p *TDropPlsqlStoredProcedureRequest) GetPlsqlProcedureKey() (v *TPlsqlProcedureKey) { + if !p.IsSetPlsqlProcedureKey() { + return TDropPlsqlStoredProcedureRequest_PlsqlProcedureKey_DEFAULT + } + return p.PlsqlProcedureKey +} +func (p *TDropPlsqlStoredProcedureRequest) SetPlsqlProcedureKey(val *TPlsqlProcedureKey) { + p.PlsqlProcedureKey = val +} + +var fieldIDToName_TDropPlsqlStoredProcedureRequest = map[int16]string{ + 1: "plsqlProcedureKey", +} + +func (p *TDropPlsqlStoredProcedureRequest) IsSetPlsqlProcedureKey() bool { + return p.PlsqlProcedureKey != nil +} + +func (p *TDropPlsqlStoredProcedureRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDropPlsqlStoredProcedureRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TDropPlsqlStoredProcedureRequest) ReadField1(iprot thrift.TProtocol) error { + _field := NewTPlsqlProcedureKey() + if err := _field.Read(iprot); err != nil { + return err + } + p.PlsqlProcedureKey = _field + return nil +} + +func (p *TDropPlsqlStoredProcedureRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TDropPlsqlStoredProcedureRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TDropPlsqlStoredProcedureRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPlsqlProcedureKey() { + if err = oprot.WriteFieldBegin("plsqlProcedureKey", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.PlsqlProcedureKey.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TDropPlsqlStoredProcedureRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDropPlsqlStoredProcedureRequest(%+v)", *p) + +} + +func (p *TDropPlsqlStoredProcedureRequest) DeepEqual(ano *TDropPlsqlStoredProcedureRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PlsqlProcedureKey) { + return false + } + return true +} + +func (p *TDropPlsqlStoredProcedureRequest) Field1DeepEqual(src *TPlsqlProcedureKey) bool { + + if !p.PlsqlProcedureKey.DeepEqual(src) { + return false + } + return true +} + +type TPlsqlStoredProcedureResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +} + +func NewTPlsqlStoredProcedureResult_() *TPlsqlStoredProcedureResult_ { + return &TPlsqlStoredProcedureResult_{} +} + +func (p *TPlsqlStoredProcedureResult_) InitDefault() { +} + +var TPlsqlStoredProcedureResult__Status_DEFAULT *status.TStatus + +func (p *TPlsqlStoredProcedureResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TPlsqlStoredProcedureResult__Status_DEFAULT + } + return p.Status +} +func (p *TPlsqlStoredProcedureResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TPlsqlStoredProcedureResult_ = map[int16]string{ + 1: "status", +} + +func (p *TPlsqlStoredProcedureResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TPlsqlStoredProcedureResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlStoredProcedureResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPlsqlStoredProcedureResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TPlsqlStoredProcedureResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPlsqlStoredProcedureResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPlsqlStoredProcedureResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPlsqlStoredProcedureResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPlsqlStoredProcedureResult_(%+v)", *p) + +} + +func (p *TPlsqlStoredProcedureResult_) DeepEqual(ano *TPlsqlStoredProcedureResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TPlsqlStoredProcedureResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TAddPlsqlPackageRequest struct { + PlsqlPackage *TPlsqlPackage `thrift:"plsqlPackage,1,optional" frugal:"1,optional,TPlsqlPackage" json:"plsqlPackage,omitempty"` + IsForce *bool `thrift:"isForce,2,optional" frugal:"2,optional,bool" json:"isForce,omitempty"` +} + +func NewTAddPlsqlPackageRequest() *TAddPlsqlPackageRequest { + return &TAddPlsqlPackageRequest{} +} + +func (p *TAddPlsqlPackageRequest) InitDefault() { +} + +var TAddPlsqlPackageRequest_PlsqlPackage_DEFAULT *TPlsqlPackage + +func (p *TAddPlsqlPackageRequest) GetPlsqlPackage() (v *TPlsqlPackage) { + if !p.IsSetPlsqlPackage() { + return TAddPlsqlPackageRequest_PlsqlPackage_DEFAULT + } + return p.PlsqlPackage +} + +var TAddPlsqlPackageRequest_IsForce_DEFAULT bool + +func (p *TAddPlsqlPackageRequest) GetIsForce() (v bool) { + if !p.IsSetIsForce() { + return TAddPlsqlPackageRequest_IsForce_DEFAULT + } + return *p.IsForce +} +func (p *TAddPlsqlPackageRequest) SetPlsqlPackage(val *TPlsqlPackage) { + p.PlsqlPackage = val +} +func (p *TAddPlsqlPackageRequest) SetIsForce(val *bool) { + p.IsForce = val +} + +var fieldIDToName_TAddPlsqlPackageRequest = map[int16]string{ + 1: "plsqlPackage", + 2: "isForce", +} + +func (p *TAddPlsqlPackageRequest) IsSetPlsqlPackage() bool { + return p.PlsqlPackage != nil +} + +func (p *TAddPlsqlPackageRequest) IsSetIsForce() bool { + return p.IsForce != nil +} + +func (p *TAddPlsqlPackageRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddPlsqlPackageRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TAddPlsqlPackageRequest) ReadField1(iprot thrift.TProtocol) error { + _field := NewTPlsqlPackage() + if err := _field.Read(iprot); err != nil { + return err + } + p.PlsqlPackage = _field + return nil +} +func (p *TAddPlsqlPackageRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsForce = _field + return nil +} + +func (p *TAddPlsqlPackageRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TAddPlsqlPackageRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TAddPlsqlPackageRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPlsqlPackage() { + if err = oprot.WriteFieldBegin("plsqlPackage", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.PlsqlPackage.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TAddPlsqlPackageRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIsForce() { + if err = oprot.WriteFieldBegin("isForce", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsForce); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TAddPlsqlPackageRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TAddPlsqlPackageRequest(%+v)", *p) + +} + +func (p *TAddPlsqlPackageRequest) DeepEqual(ano *TAddPlsqlPackageRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PlsqlPackage) { + return false + } + if !p.Field2DeepEqual(ano.IsForce) { + return false + } + return true +} + +func (p *TAddPlsqlPackageRequest) Field1DeepEqual(src *TPlsqlPackage) bool { + + if !p.PlsqlPackage.DeepEqual(src) { + return false + } + return true +} +func (p *TAddPlsqlPackageRequest) Field2DeepEqual(src *bool) bool { + + if p.IsForce == src { + return true + } else if p.IsForce == nil || src == nil { + return false + } + if *p.IsForce != *src { + return false + } + return true +} + +type TDropPlsqlPackageRequest struct { + PlsqlProcedureKey *TPlsqlProcedureKey `thrift:"plsqlProcedureKey,1,optional" frugal:"1,optional,TPlsqlProcedureKey" json:"plsqlProcedureKey,omitempty"` +} + +func NewTDropPlsqlPackageRequest() *TDropPlsqlPackageRequest { + return &TDropPlsqlPackageRequest{} +} + +func (p *TDropPlsqlPackageRequest) InitDefault() { +} + +var TDropPlsqlPackageRequest_PlsqlProcedureKey_DEFAULT *TPlsqlProcedureKey + +func (p *TDropPlsqlPackageRequest) GetPlsqlProcedureKey() (v *TPlsqlProcedureKey) { + if !p.IsSetPlsqlProcedureKey() { + return TDropPlsqlPackageRequest_PlsqlProcedureKey_DEFAULT + } + return p.PlsqlProcedureKey +} +func (p *TDropPlsqlPackageRequest) SetPlsqlProcedureKey(val *TPlsqlProcedureKey) { + p.PlsqlProcedureKey = val +} + +var fieldIDToName_TDropPlsqlPackageRequest = map[int16]string{ + 1: "plsqlProcedureKey", +} + +func (p *TDropPlsqlPackageRequest) IsSetPlsqlProcedureKey() bool { + return p.PlsqlProcedureKey != nil +} + +func (p *TDropPlsqlPackageRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDropPlsqlPackageRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TDropPlsqlPackageRequest) ReadField1(iprot thrift.TProtocol) error { + _field := NewTPlsqlProcedureKey() + if err := _field.Read(iprot); err != nil { + return err + } + p.PlsqlProcedureKey = _field + return nil +} + +func (p *TDropPlsqlPackageRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TDropPlsqlPackageRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TDropPlsqlPackageRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPlsqlProcedureKey() { + if err = oprot.WriteFieldBegin("plsqlProcedureKey", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.PlsqlProcedureKey.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TDropPlsqlPackageRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDropPlsqlPackageRequest(%+v)", *p) + +} + +func (p *TDropPlsqlPackageRequest) DeepEqual(ano *TDropPlsqlPackageRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.PlsqlProcedureKey) { + return false + } + return true +} + +func (p *TDropPlsqlPackageRequest) Field1DeepEqual(src *TPlsqlProcedureKey) bool { + + if !p.PlsqlProcedureKey.DeepEqual(src) { + return false + } + return true +} + +type TPlsqlPackageResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +} + +func NewTPlsqlPackageResult_() *TPlsqlPackageResult_ { + return &TPlsqlPackageResult_{} +} + +func (p *TPlsqlPackageResult_) InitDefault() { +} + +var TPlsqlPackageResult__Status_DEFAULT *status.TStatus + +func (p *TPlsqlPackageResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TPlsqlPackageResult__Status_DEFAULT + } + return p.Status +} +func (p *TPlsqlPackageResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TPlsqlPackageResult_ = map[int16]string{ + 1: "status", +} + +func (p *TPlsqlPackageResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TPlsqlPackageResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlPackageResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPlsqlPackageResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} + +func (p *TPlsqlPackageResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPlsqlPackageResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPlsqlPackageResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPlsqlPackageResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPlsqlPackageResult_(%+v)", *p) + +} + +func (p *TPlsqlPackageResult_) DeepEqual(ano *TPlsqlPackageResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *TPlsqlPackageResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} + +type TGetMasterTokenRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Password *string `thrift:"password,3,optional" frugal:"3,optional,string" json:"password,omitempty"` +} + +func NewTGetMasterTokenRequest() *TGetMasterTokenRequest { + return &TGetMasterTokenRequest{} +} + +func (p *TGetMasterTokenRequest) InitDefault() { +} + +var TGetMasterTokenRequest_Cluster_DEFAULT string + +func (p *TGetMasterTokenRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGetMasterTokenRequest_Cluster_DEFAULT + } + return *p.Cluster +} + +var TGetMasterTokenRequest_User_DEFAULT string + +func (p *TGetMasterTokenRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TGetMasterTokenRequest_User_DEFAULT + } + return *p.User +} + +var TGetMasterTokenRequest_Password_DEFAULT string + +func (p *TGetMasterTokenRequest) GetPassword() (v string) { + if !p.IsSetPassword() { + return TGetMasterTokenRequest_Password_DEFAULT + } + return *p.Password +} +func (p *TGetMasterTokenRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TGetMasterTokenRequest) SetUser(val *string) { + p.User = val +} +func (p *TGetMasterTokenRequest) SetPassword(val *string) { + p.Password = val +} + +var fieldIDToName_TGetMasterTokenRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "password", +} + +func (p *TGetMasterTokenRequest) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TGetMasterTokenRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TGetMasterTokenRequest) IsSetPassword() bool { + return p.Password != nil +} + +func (p *TGetMasterTokenRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetMasterTokenRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TGetMasterTokenRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.User = _field + return nil +} +func (p *TGetMasterTokenRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Password = _field + return nil +} + +func (p *TGetMasterTokenRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetMasterTokenRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetMasterTokenRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetMasterTokenRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetMasterTokenRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPassword() { + if err = oprot.WriteFieldBegin("password", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Password); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetMasterTokenRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetMasterTokenRequest(%+v)", *p) + +} + +func (p *TGetMasterTokenRequest) DeepEqual(ano *TGetMasterTokenRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Cluster) { + return false + } + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Password) { + return false + } + return true +} + +func (p *TGetMasterTokenRequest) Field1DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TGetMasterTokenRequest) Field2DeepEqual(src *string) bool { + + if p.User == src { + return true + } else if p.User == nil || src == nil { + return false + } + if strings.Compare(*p.User, *src) != 0 { + return false + } + return true +} +func (p *TGetMasterTokenRequest) Field3DeepEqual(src *string) bool { + + if p.Password == src { + return true + } else if p.Password == nil || src == nil { + return false + } + if strings.Compare(*p.Password, *src) != 0 { + return false + } + return true +} + +type TGetMasterTokenResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Token *string `thrift:"token,2,optional" frugal:"2,optional,string" json:"token,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,3,optional" frugal:"3,optional,types.TNetworkAddress" json:"master_address,omitempty"` +} + +func NewTGetMasterTokenResult_() *TGetMasterTokenResult_ { + return &TGetMasterTokenResult_{} +} + +func (p *TGetMasterTokenResult_) InitDefault() { +} + +var TGetMasterTokenResult__Status_DEFAULT *status.TStatus + +func (p *TGetMasterTokenResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetMasterTokenResult__Status_DEFAULT + } + return p.Status +} + +var TGetMasterTokenResult__Token_DEFAULT string + +func (p *TGetMasterTokenResult_) GetToken() (v string) { + if !p.IsSetToken() { + return TGetMasterTokenResult__Token_DEFAULT + } + return *p.Token +} + +var TGetMasterTokenResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TGetMasterTokenResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TGetMasterTokenResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TGetMasterTokenResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetMasterTokenResult_) SetToken(val *string) { + p.Token = val +} +func (p *TGetMasterTokenResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} + +var fieldIDToName_TGetMasterTokenResult_ = map[int16]string{ + 1: "status", + 2: "token", + 3: "master_address", +} + +func (p *TGetMasterTokenResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetMasterTokenResult_) IsSetToken() bool { + return p.Token != nil +} + +func (p *TGetMasterTokenResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TGetMasterTokenResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetMasterTokenResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetMasterTokenResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TGetMasterTokenResult_) ReadField3(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} + +func (p *TGetMasterTokenResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetMasterTokenResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetMasterTokenResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetMasterTokenResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetMasterTokenResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetMasterTokenResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetMasterTokenResult_(%+v)", *p) + +} + +func (p *TGetMasterTokenResult_) DeepEqual(ano *TGetMasterTokenResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Token) { + return false + } + if !p.Field3DeepEqual(ano.MasterAddress) { + return false + } + return true +} + +func (p *TGetMasterTokenResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TGetMasterTokenResult_) Field2DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TGetMasterTokenResult_) Field3DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} + +type TGetBinlogLagResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Lag *int64 `thrift:"lag,2,optional" frugal:"2,optional,i64" json:"lag,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,3,optional" frugal:"3,optional,types.TNetworkAddress" json:"master_address,omitempty"` +} + +func NewTGetBinlogLagResult_() *TGetBinlogLagResult_ { + return &TGetBinlogLagResult_{} +} + +func (p *TGetBinlogLagResult_) InitDefault() { +} + +var TGetBinlogLagResult__Status_DEFAULT *status.TStatus + +func (p *TGetBinlogLagResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetBinlogLagResult__Status_DEFAULT + } + return p.Status +} + +var TGetBinlogLagResult__Lag_DEFAULT int64 + +func (p *TGetBinlogLagResult_) GetLag() (v int64) { + if !p.IsSetLag() { + return TGetBinlogLagResult__Lag_DEFAULT + } + return *p.Lag +} + +var TGetBinlogLagResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TGetBinlogLagResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TGetBinlogLagResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TGetBinlogLagResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetBinlogLagResult_) SetLag(val *int64) { + p.Lag = val +} +func (p *TGetBinlogLagResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val +} + +var fieldIDToName_TGetBinlogLagResult_ = map[int16]string{ + 1: "status", + 2: "lag", + 3: "master_address", +} + +func (p *TGetBinlogLagResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetBinlogLagResult_) IsSetLag() bool { + return p.Lag != nil +} + +func (p *TGetBinlogLagResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TGetBinlogLagResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogLagResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetBinlogLagResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetBinlogLagResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Lag = _field + return nil +} +func (p *TGetBinlogLagResult_) ReadField3(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterAddress = _field + return nil +} + +func (p *TGetBinlogLagResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGetBinlogLagResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 goto WriteFieldError } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 + if err = p.writeField2(oprot); err != nil { + fieldId = 2 goto WriteFieldError } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 + if err = p.writeField3(oprot); err != nil { + fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -31546,12 +62777,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCommitTxnRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *TGetBinlogLagResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -31565,12 +62796,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { +func (p *TGetBinlogLagResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetLag() { + if err = oprot.WriteFieldBegin("lag", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.User); err != nil { + if err := oprot.WriteI64(*p.Lag); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -31584,12 +62815,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPasswd() { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { +func (p *TGetBinlogLagResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Passwd); err != nil { + if err := p.MasterAddress.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -31603,50 +62834,286 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError +func (p *TGetBinlogLagResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetBinlogLagResult_(%+v)", *p) + +} + +func (p *TGetBinlogLagResult_) DeepEqual(ano *TGetBinlogLagResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Lag) { + return false + } + if !p.Field3DeepEqual(ano.MasterAddress) { + return false + } + return true +} + +func (p *TGetBinlogLagResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TGetBinlogLagResult_) Field2DeepEqual(src *int64) bool { + + if p.Lag == src { + return true + } else if p.Lag == nil || src == nil { + return false + } + if *p.Lag != *src { + return false + } + return true +} +func (p *TGetBinlogLagResult_) Field3DeepEqual(src *types.TNetworkAddress) bool { + + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true +} + +type TUpdateFollowerStatsCacheRequest struct { + Key *string `thrift:"key,1,optional" frugal:"1,optional,string" json:"key,omitempty"` + StatsRows []string `thrift:"statsRows,2,optional" frugal:"2,optional,list" json:"statsRows,omitempty"` + ColStatsData *string `thrift:"colStatsData,3,optional" frugal:"3,optional,string" json:"colStatsData,omitempty"` +} + +func NewTUpdateFollowerStatsCacheRequest() *TUpdateFollowerStatsCacheRequest { + return &TUpdateFollowerStatsCacheRequest{} +} + +func (p *TUpdateFollowerStatsCacheRequest) InitDefault() { +} + +var TUpdateFollowerStatsCacheRequest_Key_DEFAULT string + +func (p *TUpdateFollowerStatsCacheRequest) GetKey() (v string) { + if !p.IsSetKey() { + return TUpdateFollowerStatsCacheRequest_Key_DEFAULT + } + return *p.Key +} + +var TUpdateFollowerStatsCacheRequest_StatsRows_DEFAULT []string + +func (p *TUpdateFollowerStatsCacheRequest) GetStatsRows() (v []string) { + if !p.IsSetStatsRows() { + return TUpdateFollowerStatsCacheRequest_StatsRows_DEFAULT + } + return p.StatsRows +} + +var TUpdateFollowerStatsCacheRequest_ColStatsData_DEFAULT string + +func (p *TUpdateFollowerStatsCacheRequest) GetColStatsData() (v string) { + if !p.IsSetColStatsData() { + return TUpdateFollowerStatsCacheRequest_ColStatsData_DEFAULT + } + return *p.ColStatsData +} +func (p *TUpdateFollowerStatsCacheRequest) SetKey(val *string) { + p.Key = val +} +func (p *TUpdateFollowerStatsCacheRequest) SetStatsRows(val []string) { + p.StatsRows = val +} +func (p *TUpdateFollowerStatsCacheRequest) SetColStatsData(val *string) { + p.ColStatsData = val +} + +var fieldIDToName_TUpdateFollowerStatsCacheRequest = map[int16]string{ + 1: "key", + 2: "statsRows", + 3: "colStatsData", +} + +func (p *TUpdateFollowerStatsCacheRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *TUpdateFollowerStatsCacheRequest) IsSetStatsRows() bool { + return p.StatsRows != nil +} + +func (p *TUpdateFollowerStatsCacheRequest) IsSetColStatsData() bool { + return p.ColStatsData != nil +} + +func (p *TUpdateFollowerStatsCacheRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteString(*p.Db); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateFollowerStatsCacheRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCommitTxnRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { +func (p *TUpdateFollowerStatsCacheRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Key = _field + return nil +} +func (p *TUpdateFollowerStatsCacheRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _elem = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.StatsRows = _field + return nil +} +func (p *TUpdateFollowerStatsCacheRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ColStatsData = _field + return nil +} + +func (p *TUpdateFollowerStatsCacheRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TUpdateFollowerStatsCacheRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCommitTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 6); err != nil { +func (p *TUpdateFollowerStatsCacheRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetKey() { + if err = oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TxnId); err != nil { + if err := oprot.WriteString(*p.Key); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -31655,21 +63122,21 @@ func (p *TCommitTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetCommitInfos() { - if err = oprot.WriteFieldBegin("commit_infos", thrift.LIST, 7); err != nil { +func (p *TUpdateFollowerStatsCacheRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetStatsRows() { + if err = oprot.WriteFieldBegin("statsRows", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommitInfos)); err != nil { + if err := oprot.WriteListBegin(thrift.STRING, len(p.StatsRows)); err != nil { return err } - for _, v := range p.CommitInfos { - if err := v.Write(oprot); err != nil { + for _, v := range p.StatsRows { + if err := oprot.WriteString(v); err != nil { return err } } @@ -31682,17 +63149,17 @@ func (p *TCommitTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 8); err != nil { +func (p *TUpdateFollowerStatsCacheRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetColStatsData() { + if err = oprot.WriteFieldBegin("colStatsData", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.AuthCode); err != nil { + if err := oprot.WriteString(*p.ColStatsData); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -31701,74 +63168,208 @@ func (p *TCommitTxnRequest) writeField8(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnCommitAttachment() { - if err = oprot.WriteFieldBegin("txn_commit_attachment", thrift.STRUCT, 9); err != nil { - goto WriteFieldBeginError +func (p *TUpdateFollowerStatsCacheRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TUpdateFollowerStatsCacheRequest(%+v)", *p) + +} + +func (p *TUpdateFollowerStatsCacheRequest) DeepEqual(ano *TUpdateFollowerStatsCacheRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Key) { + return false + } + if !p.Field2DeepEqual(ano.StatsRows) { + return false + } + if !p.Field3DeepEqual(ano.ColStatsData) { + return false + } + return true +} + +func (p *TUpdateFollowerStatsCacheRequest) Field1DeepEqual(src *string) bool { + + if p.Key == src { + return true + } else if p.Key == nil || src == nil { + return false + } + if strings.Compare(*p.Key, *src) != 0 { + return false + } + return true +} +func (p *TUpdateFollowerStatsCacheRequest) Field2DeepEqual(src []string) bool { + + if len(p.StatsRows) != len(src) { + return false + } + for i, v := range p.StatsRows { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false } - if err := p.TxnCommitAttachment.Write(oprot); err != nil { - return err + } + return true +} +func (p *TUpdateFollowerStatsCacheRequest) Field3DeepEqual(src *string) bool { + + if p.ColStatsData == src { + return true + } else if p.ColStatsData == nil || src == nil { + return false + } + if strings.Compare(*p.ColStatsData, *src) != 0 { + return false + } + return true +} + +type TInvalidateFollowerStatsCacheRequest struct { + Key *string `thrift:"key,1,optional" frugal:"1,optional,string" json:"key,omitempty"` +} + +func NewTInvalidateFollowerStatsCacheRequest() *TInvalidateFollowerStatsCacheRequest { + return &TInvalidateFollowerStatsCacheRequest{} +} + +func (p *TInvalidateFollowerStatsCacheRequest) InitDefault() { +} + +var TInvalidateFollowerStatsCacheRequest_Key_DEFAULT string + +func (p *TInvalidateFollowerStatsCacheRequest) GetKey() (v string) { + if !p.IsSetKey() { + return TInvalidateFollowerStatsCacheRequest_Key_DEFAULT + } + return *p.Key +} +func (p *TInvalidateFollowerStatsCacheRequest) SetKey(val *string) { + p.Key = val +} + +var fieldIDToName_TInvalidateFollowerStatsCacheRequest = map[int16]string{ + 1: "key", +} + +func (p *TInvalidateFollowerStatsCacheRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *TInvalidateFollowerStatsCacheRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInvalidateFollowerStatsCacheRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCommitTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetThriftRpcTimeoutMs() { - if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TInvalidateFollowerStatsCacheRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } + p.Key = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TCommitTxnRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TInvalidateFollowerStatsCacheRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TInvalidateFollowerStatsCacheRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCommitTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 12); err != nil { +func (p *TInvalidateFollowerStatsCacheRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetKey() { + if err = oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.DbId); err != nil { + if err := oprot.WriteString(*p.Key); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -31777,237 +63378,327 @@ func (p *TCommitTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCommitTxnRequest) String() string { +func (p *TInvalidateFollowerStatsCacheRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TCommitTxnRequest(%+v)", *p) + return fmt.Sprintf("TInvalidateFollowerStatsCacheRequest(%+v)", *p) + } -func (p *TCommitTxnRequest) DeepEqual(ano *TCommitTxnRequest) bool { +func (p *TInvalidateFollowerStatsCacheRequest) DeepEqual(ano *TInvalidateFollowerStatsCacheRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.UserIp) { - return false - } - if !p.Field6DeepEqual(ano.TxnId) { - return false - } - if !p.Field7DeepEqual(ano.CommitInfos) { - return false - } - if !p.Field8DeepEqual(ano.AuthCode) { - return false - } - if !p.Field9DeepEqual(ano.TxnCommitAttachment) { - return false - } - if !p.Field10DeepEqual(ano.ThriftRpcTimeoutMs) { - return false - } - if !p.Field11DeepEqual(ano.Token) { - return false - } - if !p.Field12DeepEqual(ano.DbId) { + if !p.Field1DeepEqual(ano.Key) { return false } return true } -func (p *TCommitTxnRequest) Field1DeepEqual(src *string) bool { +func (p *TInvalidateFollowerStatsCacheRequest) Field1DeepEqual(src *string) bool { - if p.Cluster == src { + if p.Key == src { return true - } else if p.Cluster == nil || src == nil { + } else if p.Key == nil || src == nil { return false } - if strings.Compare(*p.Cluster, *src) != 0 { + if strings.Compare(*p.Key, *src) != 0 { return false } return true } -func (p *TCommitTxnRequest) Field2DeepEqual(src *string) bool { - if p.User == src { - return true - } else if p.User == nil || src == nil { - return false - } - if strings.Compare(*p.User, *src) != 0 { - return false - } - return true +type TUpdateFollowerPartitionStatsCacheRequest struct { + Key *string `thrift:"key,1,optional" frugal:"1,optional,string" json:"key,omitempty"` } -func (p *TCommitTxnRequest) Field3DeepEqual(src *string) bool { - if p.Passwd == src { - return true - } else if p.Passwd == nil || src == nil { - return false - } - if strings.Compare(*p.Passwd, *src) != 0 { - return false - } - return true +func NewTUpdateFollowerPartitionStatsCacheRequest() *TUpdateFollowerPartitionStatsCacheRequest { + return &TUpdateFollowerPartitionStatsCacheRequest{} } -func (p *TCommitTxnRequest) Field4DeepEqual(src *string) bool { - if p.Db == src { - return true - } else if p.Db == nil || src == nil { - return false - } - if strings.Compare(*p.Db, *src) != 0 { - return false +func (p *TUpdateFollowerPartitionStatsCacheRequest) InitDefault() { +} + +var TUpdateFollowerPartitionStatsCacheRequest_Key_DEFAULT string + +func (p *TUpdateFollowerPartitionStatsCacheRequest) GetKey() (v string) { + if !p.IsSetKey() { + return TUpdateFollowerPartitionStatsCacheRequest_Key_DEFAULT } - return true + return *p.Key +} +func (p *TUpdateFollowerPartitionStatsCacheRequest) SetKey(val *string) { + p.Key = val } -func (p *TCommitTxnRequest) Field5DeepEqual(src *string) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false +var fieldIDToName_TUpdateFollowerPartitionStatsCacheRequest = map[int16]string{ + 1: "key", +} + +func (p *TUpdateFollowerPartitionStatsCacheRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *TUpdateFollowerPartitionStatsCacheRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if strings.Compare(*p.UserIp, *src) != 0 { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - return true + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateFollowerPartitionStatsCacheRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCommitTxnRequest) Field6DeepEqual(src *int64) bool { - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false - } - if *p.TxnId != *src { - return false +func (p *TUpdateFollowerPartitionStatsCacheRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return true + p.Key = _field + return nil } -func (p *TCommitTxnRequest) Field7DeepEqual(src []*types.TTabletCommitInfo) bool { - if len(p.CommitInfos) != len(src) { - return false +func (p *TUpdateFollowerPartitionStatsCacheRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TUpdateFollowerPartitionStatsCacheRequest"); err != nil { + goto WriteStructBeginError } - for i, v := range p.CommitInfos { - _src := src[i] - if !v.DeepEqual(_src) { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } } - return true + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCommitTxnRequest) Field8DeepEqual(src *int64) bool { - if p.AuthCode == src { - return true - } else if p.AuthCode == nil || src == nil { - return false - } - if *p.AuthCode != *src { - return false +func (p *TUpdateFollowerPartitionStatsCacheRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetKey() { + if err = oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Key); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCommitTxnRequest) Field9DeepEqual(src *TTxnCommitAttachment) bool { - if !p.TxnCommitAttachment.DeepEqual(src) { - return false +func (p *TUpdateFollowerPartitionStatsCacheRequest) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TUpdateFollowerPartitionStatsCacheRequest(%+v)", *p) + } -func (p *TCommitTxnRequest) Field10DeepEqual(src *int64) bool { - if p.ThriftRpcTimeoutMs == src { +func (p *TUpdateFollowerPartitionStatsCacheRequest) DeepEqual(ano *TUpdateFollowerPartitionStatsCacheRequest) bool { + if p == ano { return true - } else if p.ThriftRpcTimeoutMs == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.ThriftRpcTimeoutMs != *src { + if !p.Field1DeepEqual(ano.Key) { return false } return true } -func (p *TCommitTxnRequest) Field11DeepEqual(src *string) bool { - if p.Token == src { +func (p *TUpdateFollowerPartitionStatsCacheRequest) Field1DeepEqual(src *string) bool { + + if p.Key == src { return true - } else if p.Token == nil || src == nil { + } else if p.Key == nil || src == nil { return false } - if strings.Compare(*p.Token, *src) != 0 { + if strings.Compare(*p.Key, *src) != 0 { return false } return true } -func (p *TCommitTxnRequest) Field12DeepEqual(src *int64) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false - } - if *p.DbId != *src { - return false +type TAutoIncrementRangeRequest struct { + DbId *int64 `thrift:"db_id,1,optional" frugal:"1,optional,i64" json:"db_id,omitempty"` + TableId *int64 `thrift:"table_id,2,optional" frugal:"2,optional,i64" json:"table_id,omitempty"` + ColumnId *int64 `thrift:"column_id,3,optional" frugal:"3,optional,i64" json:"column_id,omitempty"` + Length *int64 `thrift:"length,4,optional" frugal:"4,optional,i64" json:"length,omitempty"` + LowerBound *int64 `thrift:"lower_bound,5,optional" frugal:"5,optional,i64" json:"lower_bound,omitempty"` +} + +func NewTAutoIncrementRangeRequest() *TAutoIncrementRangeRequest { + return &TAutoIncrementRangeRequest{} +} + +func (p *TAutoIncrementRangeRequest) InitDefault() { +} + +var TAutoIncrementRangeRequest_DbId_DEFAULT int64 + +func (p *TAutoIncrementRangeRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TAutoIncrementRangeRequest_DbId_DEFAULT } - return true + return *p.DbId } -type TCommitTxnResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +var TAutoIncrementRangeRequest_TableId_DEFAULT int64 + +func (p *TAutoIncrementRangeRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TAutoIncrementRangeRequest_TableId_DEFAULT + } + return *p.TableId } -func NewTCommitTxnResult_() *TCommitTxnResult_ { - return &TCommitTxnResult_{} +var TAutoIncrementRangeRequest_ColumnId_DEFAULT int64 + +func (p *TAutoIncrementRangeRequest) GetColumnId() (v int64) { + if !p.IsSetColumnId() { + return TAutoIncrementRangeRequest_ColumnId_DEFAULT + } + return *p.ColumnId } -func (p *TCommitTxnResult_) InitDefault() { - *p = TCommitTxnResult_{} +var TAutoIncrementRangeRequest_Length_DEFAULT int64 + +func (p *TAutoIncrementRangeRequest) GetLength() (v int64) { + if !p.IsSetLength() { + return TAutoIncrementRangeRequest_Length_DEFAULT + } + return *p.Length } -var TCommitTxnResult__Status_DEFAULT *status.TStatus +var TAutoIncrementRangeRequest_LowerBound_DEFAULT int64 -func (p *TCommitTxnResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TCommitTxnResult__Status_DEFAULT +func (p *TAutoIncrementRangeRequest) GetLowerBound() (v int64) { + if !p.IsSetLowerBound() { + return TAutoIncrementRangeRequest_LowerBound_DEFAULT } - return p.Status + return *p.LowerBound } -func (p *TCommitTxnResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TAutoIncrementRangeRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TAutoIncrementRangeRequest) SetTableId(val *int64) { + p.TableId = val +} +func (p *TAutoIncrementRangeRequest) SetColumnId(val *int64) { + p.ColumnId = val +} +func (p *TAutoIncrementRangeRequest) SetLength(val *int64) { + p.Length = val +} +func (p *TAutoIncrementRangeRequest) SetLowerBound(val *int64) { + p.LowerBound = val } -var fieldIDToName_TCommitTxnResult_ = map[int16]string{ - 1: "status", +var fieldIDToName_TAutoIncrementRangeRequest = map[int16]string{ + 1: "db_id", + 2: "table_id", + 3: "column_id", + 4: "length", + 5: "lower_bound", } -func (p *TCommitTxnResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TAutoIncrementRangeRequest) IsSetDbId() bool { + return p.DbId != nil } -func (p *TCommitTxnResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TAutoIncrementRangeRequest) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TAutoIncrementRangeRequest) IsSetColumnId() bool { + return p.ColumnId != nil +} + +func (p *TAutoIncrementRangeRequest) IsSetLength() bool { + return p.Length != nil +} + +func (p *TAutoIncrementRangeRequest) IsSetLowerBound() bool { + return p.LowerBound != nil +} + +func (p *TAutoIncrementRangeRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -32027,21 +63718,50 @@ func (p *TCommitTxnResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -32056,7 +63776,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -32066,17 +63786,65 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCommitTxnResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TAutoIncrementRangeRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TAutoIncrementRangeRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TAutoIncrementRangeRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ColumnId = _field + return nil +} +func (p *TAutoIncrementRangeRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Length = _field + return nil +} +func (p *TAutoIncrementRangeRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.LowerBound = _field return nil } -func (p *TCommitTxnResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TAutoIncrementRangeRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCommitTxnResult"); err != nil { + if err = oprot.WriteStructBegin("TAutoIncrementRangeRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -32084,7 +63852,22 @@ func (p *TCommitTxnResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -32103,12 +63886,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCommitTxnResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TAutoIncrementRangeRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.DbId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -32122,233 +63905,264 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCommitTxnResult_) String() string { +func (p *TAutoIncrementRangeRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TAutoIncrementRangeRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnId() { + if err = oprot.WriteFieldBegin("column_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ColumnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TAutoIncrementRangeRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLength() { + if err = oprot.WriteFieldBegin("length", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Length); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TAutoIncrementRangeRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetLowerBound() { + if err = oprot.WriteFieldBegin("lower_bound", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LowerBound); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TAutoIncrementRangeRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TCommitTxnResult_(%+v)", *p) + return fmt.Sprintf("TAutoIncrementRangeRequest(%+v)", *p) + } -func (p *TCommitTxnResult_) DeepEqual(ano *TCommitTxnResult_) bool { +func (p *TAutoIncrementRangeRequest) DeepEqual(ano *TAutoIncrementRangeRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.DbId) { + return false + } + if !p.Field2DeepEqual(ano.TableId) { + return false + } + if !p.Field3DeepEqual(ano.ColumnId) { + return false + } + if !p.Field4DeepEqual(ano.Length) { + return false + } + if !p.Field5DeepEqual(ano.LowerBound) { return false } return true } -func (p *TCommitTxnResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TAutoIncrementRangeRequest) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { return false } return true } +func (p *TAutoIncrementRangeRequest) Field2DeepEqual(src *int64) bool { -type TLoadTxn2PCRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` - Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - UserIp *string `thrift:"user_ip,5,optional" frugal:"5,optional,string" json:"user_ip,omitempty"` - TxnId *int64 `thrift:"txnId,6,optional" frugal:"6,optional,i64" json:"txnId,omitempty"` - Operation *string `thrift:"operation,7,optional" frugal:"7,optional,string" json:"operation,omitempty"` - AuthCode *int64 `thrift:"auth_code,8,optional" frugal:"8,optional,i64" json:"auth_code,omitempty"` - Token *string `thrift:"token,9,optional" frugal:"9,optional,string" json:"token,omitempty"` - ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,10,optional" frugal:"10,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` - Label *string `thrift:"label,11,optional" frugal:"11,optional,string" json:"label,omitempty"` -} - -func NewTLoadTxn2PCRequest() *TLoadTxn2PCRequest { - return &TLoadTxn2PCRequest{} -} - -func (p *TLoadTxn2PCRequest) InitDefault() { - *p = TLoadTxn2PCRequest{} -} - -var TLoadTxn2PCRequest_Cluster_DEFAULT string - -func (p *TLoadTxn2PCRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TLoadTxn2PCRequest_Cluster_DEFAULT + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false } - return *p.Cluster -} - -func (p *TLoadTxn2PCRequest) GetUser() (v string) { - return p.User + if *p.TableId != *src { + return false + } + return true } +func (p *TAutoIncrementRangeRequest) Field3DeepEqual(src *int64) bool { -func (p *TLoadTxn2PCRequest) GetPasswd() (v string) { - return p.Passwd + if p.ColumnId == src { + return true + } else if p.ColumnId == nil || src == nil { + return false + } + if *p.ColumnId != *src { + return false + } + return true } +func (p *TAutoIncrementRangeRequest) Field4DeepEqual(src *int64) bool { -var TLoadTxn2PCRequest_Db_DEFAULT string - -func (p *TLoadTxn2PCRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TLoadTxn2PCRequest_Db_DEFAULT + if p.Length == src { + return true + } else if p.Length == nil || src == nil { + return false } - return *p.Db + if *p.Length != *src { + return false + } + return true } +func (p *TAutoIncrementRangeRequest) Field5DeepEqual(src *int64) bool { -var TLoadTxn2PCRequest_UserIp_DEFAULT string - -func (p *TLoadTxn2PCRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TLoadTxn2PCRequest_UserIp_DEFAULT + if p.LowerBound == src { + return true + } else if p.LowerBound == nil || src == nil { + return false } - return *p.UserIp + if *p.LowerBound != *src { + return false + } + return true } -var TLoadTxn2PCRequest_TxnId_DEFAULT int64 - -func (p *TLoadTxn2PCRequest) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TLoadTxn2PCRequest_TxnId_DEFAULT - } - return *p.TxnId +type TAutoIncrementRangeResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Start *int64 `thrift:"start,2,optional" frugal:"2,optional,i64" json:"start,omitempty"` + Length *int64 `thrift:"length,3,optional" frugal:"3,optional,i64" json:"length,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,4,optional" frugal:"4,optional,types.TNetworkAddress" json:"master_address,omitempty"` } -var TLoadTxn2PCRequest_Operation_DEFAULT string +func NewTAutoIncrementRangeResult_() *TAutoIncrementRangeResult_ { + return &TAutoIncrementRangeResult_{} +} -func (p *TLoadTxn2PCRequest) GetOperation() (v string) { - if !p.IsSetOperation() { - return TLoadTxn2PCRequest_Operation_DEFAULT - } - return *p.Operation +func (p *TAutoIncrementRangeResult_) InitDefault() { } -var TLoadTxn2PCRequest_AuthCode_DEFAULT int64 +var TAutoIncrementRangeResult__Status_DEFAULT *status.TStatus -func (p *TLoadTxn2PCRequest) GetAuthCode() (v int64) { - if !p.IsSetAuthCode() { - return TLoadTxn2PCRequest_AuthCode_DEFAULT +func (p *TAutoIncrementRangeResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TAutoIncrementRangeResult__Status_DEFAULT } - return *p.AuthCode + return p.Status } -var TLoadTxn2PCRequest_Token_DEFAULT string +var TAutoIncrementRangeResult__Start_DEFAULT int64 -func (p *TLoadTxn2PCRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TLoadTxn2PCRequest_Token_DEFAULT +func (p *TAutoIncrementRangeResult_) GetStart() (v int64) { + if !p.IsSetStart() { + return TAutoIncrementRangeResult__Start_DEFAULT } - return *p.Token + return *p.Start } -var TLoadTxn2PCRequest_ThriftRpcTimeoutMs_DEFAULT int64 +var TAutoIncrementRangeResult__Length_DEFAULT int64 -func (p *TLoadTxn2PCRequest) GetThriftRpcTimeoutMs() (v int64) { - if !p.IsSetThriftRpcTimeoutMs() { - return TLoadTxn2PCRequest_ThriftRpcTimeoutMs_DEFAULT +func (p *TAutoIncrementRangeResult_) GetLength() (v int64) { + if !p.IsSetLength() { + return TAutoIncrementRangeResult__Length_DEFAULT } - return *p.ThriftRpcTimeoutMs + return *p.Length } -var TLoadTxn2PCRequest_Label_DEFAULT string +var TAutoIncrementRangeResult__MasterAddress_DEFAULT *types.TNetworkAddress -func (p *TLoadTxn2PCRequest) GetLabel() (v string) { - if !p.IsSetLabel() { - return TLoadTxn2PCRequest_Label_DEFAULT +func (p *TAutoIncrementRangeResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TAutoIncrementRangeResult__MasterAddress_DEFAULT } - return *p.Label -} -func (p *TLoadTxn2PCRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TLoadTxn2PCRequest) SetUser(val string) { - p.User = val -} -func (p *TLoadTxn2PCRequest) SetPasswd(val string) { - p.Passwd = val -} -func (p *TLoadTxn2PCRequest) SetDb(val *string) { - p.Db = val -} -func (p *TLoadTxn2PCRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TLoadTxn2PCRequest) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TLoadTxn2PCRequest) SetOperation(val *string) { - p.Operation = val -} -func (p *TLoadTxn2PCRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TLoadTxn2PCRequest) SetToken(val *string) { - p.Token = val -} -func (p *TLoadTxn2PCRequest) SetThriftRpcTimeoutMs(val *int64) { - p.ThriftRpcTimeoutMs = val -} -func (p *TLoadTxn2PCRequest) SetLabel(val *string) { - p.Label = val -} - -var fieldIDToName_TLoadTxn2PCRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "user_ip", - 6: "txnId", - 7: "operation", - 8: "auth_code", - 9: "token", - 10: "thrift_rpc_timeout_ms", - 11: "label", + return p.MasterAddress } - -func (p *TLoadTxn2PCRequest) IsSetCluster() bool { - return p.Cluster != nil +func (p *TAutoIncrementRangeResult_) SetStatus(val *status.TStatus) { + p.Status = val } - -func (p *TLoadTxn2PCRequest) IsSetDb() bool { - return p.Db != nil +func (p *TAutoIncrementRangeResult_) SetStart(val *int64) { + p.Start = val } - -func (p *TLoadTxn2PCRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TAutoIncrementRangeResult_) SetLength(val *int64) { + p.Length = val } - -func (p *TLoadTxn2PCRequest) IsSetTxnId() bool { - return p.TxnId != nil +func (p *TAutoIncrementRangeResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val } -func (p *TLoadTxn2PCRequest) IsSetOperation() bool { - return p.Operation != nil +var fieldIDToName_TAutoIncrementRangeResult_ = map[int16]string{ + 1: "status", + 2: "start", + 3: "length", + 4: "master_address", } -func (p *TLoadTxn2PCRequest) IsSetAuthCode() bool { - return p.AuthCode != nil +func (p *TAutoIncrementRangeResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TLoadTxn2PCRequest) IsSetToken() bool { - return p.Token != nil +func (p *TAutoIncrementRangeResult_) IsSetStart() bool { + return p.Start != nil } -func (p *TLoadTxn2PCRequest) IsSetThriftRpcTimeoutMs() bool { - return p.ThriftRpcTimeoutMs != nil +func (p *TAutoIncrementRangeResult_) IsSetLength() bool { + return p.Length != nil } -func (p *TLoadTxn2PCRequest) IsSetLabel() bool { - return p.Label != nil +func (p *TAutoIncrementRangeResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil } -func (p *TLoadTxn2PCRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TAutoIncrementRangeResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -32365,123 +64179,42 @@ func (p *TLoadTxn2PCRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.STRING { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -32490,22 +64223,13 @@ func (p *TLoadTxn2PCRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -32513,112 +64237,50 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCRequest[fieldId])) -} - -func (p *TLoadTxn2PCRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} - -func (p *TLoadTxn2PCRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = v - } - return nil -} - -func (p *TLoadTxn2PCRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = v - } - return nil -} - -func (p *TLoadTxn2PCRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v - } - return nil -} - -func (p *TLoadTxn2PCRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v - } - return nil -} - -func (p *TLoadTxn2PCRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = &v - } - return nil -} - -func (p *TLoadTxn2PCRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Operation = &v - } - return nil } -func (p *TLoadTxn2PCRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TAutoIncrementRangeResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err - } else { - p.AuthCode = &v } + p.Status = _field return nil } +func (p *TAutoIncrementRangeResult_) ReadField2(iprot thrift.TProtocol) error { -func (p *TLoadTxn2PCRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Token = &v + _field = &v } + p.Start = _field return nil } +func (p *TAutoIncrementRangeResult_) ReadField3(iprot thrift.TProtocol) error { -func (p *TLoadTxn2PCRequest) ReadField10(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ThriftRpcTimeoutMs = &v + _field = &v } + p.Length = _field return nil } - -func (p *TLoadTxn2PCRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TAutoIncrementRangeResult_) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Label = &v } + p.MasterAddress = _field return nil } -func (p *TLoadTxn2PCRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TAutoIncrementRangeResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxn2PCRequest"); err != nil { + if err = oprot.WriteStructBegin("TAutoIncrementRangeResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -32638,35 +64300,6 @@ func (p *TLoadTxn2PCRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -32685,12 +64318,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxn2PCRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *TAutoIncrementRangeResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -32704,141 +64337,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxn2PCRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txnId", thrift.I64, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetOperation() { - if err = oprot.WriteFieldBegin("operation", thrift.STRING, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Operation); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.AuthCode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TLoadTxn2PCRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 9); err != nil { +func (p *TAutoIncrementRangeResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetStart() { + if err = oprot.WriteFieldBegin("start", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Token); err != nil { + if err := oprot.WriteI64(*p.Start); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -32847,17 +64351,17 @@ func (p *TLoadTxn2PCRequest) writeField9(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TLoadTxn2PCRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetThriftRpcTimeoutMs() { - if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 10); err != nil { +func (p *TAutoIncrementRangeResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLength() { + if err = oprot.WriteFieldBegin("length", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + if err := oprot.WriteI64(*p.Length); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -32866,17 +64370,17 @@ func (p *TLoadTxn2PCRequest) writeField10(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TLoadTxn2PCRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetLabel() { - if err = oprot.WriteFieldBegin("label", thrift.STRING, 11); err != nil { +func (p *TAutoIncrementRangeResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Label); err != nil { + if err := p.MasterAddress.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -32885,220 +64389,186 @@ func (p *TLoadTxn2PCRequest) writeField11(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TLoadTxn2PCRequest) String() string { +func (p *TAutoIncrementRangeResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TLoadTxn2PCRequest(%+v)", *p) + return fmt.Sprintf("TAutoIncrementRangeResult_(%+v)", *p) + } -func (p *TLoadTxn2PCRequest) DeepEqual(ano *TLoadTxn2PCRequest) bool { +func (p *TAutoIncrementRangeResult_) DeepEqual(ano *TAutoIncrementRangeResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.UserIp) { - return false - } - if !p.Field6DeepEqual(ano.TxnId) { - return false - } - if !p.Field7DeepEqual(ano.Operation) { - return false - } - if !p.Field8DeepEqual(ano.AuthCode) { + if !p.Field1DeepEqual(ano.Status) { return false } - if !p.Field9DeepEqual(ano.Token) { + if !p.Field2DeepEqual(ano.Start) { return false } - if !p.Field10DeepEqual(ano.ThriftRpcTimeoutMs) { + if !p.Field3DeepEqual(ano.Length) { return false } - if !p.Field11DeepEqual(ano.Label) { + if !p.Field4DeepEqual(ano.MasterAddress) { return false } return true } -func (p *TLoadTxn2PCRequest) Field1DeepEqual(src *string) bool { +func (p *TAutoIncrementRangeResult_) Field1DeepEqual(src *status.TStatus) bool { - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TLoadTxn2PCRequest) Field2DeepEqual(src string) bool { +func (p *TAutoIncrementRangeResult_) Field2DeepEqual(src *int64) bool { - if strings.Compare(p.User, src) != 0 { + if p.Start == src { + return true + } else if p.Start == nil || src == nil { return false } - return true -} -func (p *TLoadTxn2PCRequest) Field3DeepEqual(src string) bool { - - if strings.Compare(p.Passwd, src) != 0 { + if *p.Start != *src { return false } return true } -func (p *TLoadTxn2PCRequest) Field4DeepEqual(src *string) bool { +func (p *TAutoIncrementRangeResult_) Field3DeepEqual(src *int64) bool { - if p.Db == src { + if p.Length == src { return true - } else if p.Db == nil || src == nil { + } else if p.Length == nil || src == nil { return false } - if strings.Compare(*p.Db, *src) != 0 { + if *p.Length != *src { return false } return true } -func (p *TLoadTxn2PCRequest) Field5DeepEqual(src *string) bool { +func (p *TAutoIncrementRangeResult_) Field4DeepEqual(src *types.TNetworkAddress) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false - } - if strings.Compare(*p.UserIp, *src) != 0 { + if !p.MasterAddress.DeepEqual(src) { return false } return true } -func (p *TLoadTxn2PCRequest) Field6DeepEqual(src *int64) bool { - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false - } - if *p.TxnId != *src { - return false - } - return true +type TCreatePartitionRequest struct { + TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"` + DbId *int64 `thrift:"db_id,2,optional" frugal:"2,optional,i64" json:"db_id,omitempty"` + TableId *int64 `thrift:"table_id,3,optional" frugal:"3,optional,i64" json:"table_id,omitempty"` + PartitionValues [][]*exprs.TNullableStringLiteral `thrift:"partitionValues,4,optional" frugal:"4,optional,list>" json:"partitionValues,omitempty"` + BeEndpoint *string `thrift:"be_endpoint,5,optional" frugal:"5,optional,string" json:"be_endpoint,omitempty"` } -func (p *TLoadTxn2PCRequest) Field7DeepEqual(src *string) bool { - if p.Operation == src { - return true - } else if p.Operation == nil || src == nil { - return false - } - if strings.Compare(*p.Operation, *src) != 0 { - return false - } - return true +func NewTCreatePartitionRequest() *TCreatePartitionRequest { + return &TCreatePartitionRequest{} } -func (p *TLoadTxn2PCRequest) Field8DeepEqual(src *int64) bool { - if p.AuthCode == src { - return true - } else if p.AuthCode == nil || src == nil { - return false - } - if *p.AuthCode != *src { - return false - } - return true +func (p *TCreatePartitionRequest) InitDefault() { } -func (p *TLoadTxn2PCRequest) Field9DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false +var TCreatePartitionRequest_TxnId_DEFAULT int64 + +func (p *TCreatePartitionRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TCreatePartitionRequest_TxnId_DEFAULT } - return true + return *p.TxnId } -func (p *TLoadTxn2PCRequest) Field10DeepEqual(src *int64) bool { - if p.ThriftRpcTimeoutMs == src { - return true - } else if p.ThriftRpcTimeoutMs == nil || src == nil { - return false - } - if *p.ThriftRpcTimeoutMs != *src { - return false +var TCreatePartitionRequest_DbId_DEFAULT int64 + +func (p *TCreatePartitionRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TCreatePartitionRequest_DbId_DEFAULT } - return true + return *p.DbId } -func (p *TLoadTxn2PCRequest) Field11DeepEqual(src *string) bool { - if p.Label == src { - return true - } else if p.Label == nil || src == nil { - return false - } - if strings.Compare(*p.Label, *src) != 0 { - return false +var TCreatePartitionRequest_TableId_DEFAULT int64 + +func (p *TCreatePartitionRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TCreatePartitionRequest_TableId_DEFAULT } - return true + return *p.TableId } -type TLoadTxn2PCResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +var TCreatePartitionRequest_PartitionValues_DEFAULT [][]*exprs.TNullableStringLiteral + +func (p *TCreatePartitionRequest) GetPartitionValues() (v [][]*exprs.TNullableStringLiteral) { + if !p.IsSetPartitionValues() { + return TCreatePartitionRequest_PartitionValues_DEFAULT + } + return p.PartitionValues } -func NewTLoadTxn2PCResult_() *TLoadTxn2PCResult_ { - return &TLoadTxn2PCResult_{} +var TCreatePartitionRequest_BeEndpoint_DEFAULT string + +func (p *TCreatePartitionRequest) GetBeEndpoint() (v string) { + if !p.IsSetBeEndpoint() { + return TCreatePartitionRequest_BeEndpoint_DEFAULT + } + return *p.BeEndpoint +} +func (p *TCreatePartitionRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TCreatePartitionRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TCreatePartitionRequest) SetTableId(val *int64) { + p.TableId = val +} +func (p *TCreatePartitionRequest) SetPartitionValues(val [][]*exprs.TNullableStringLiteral) { + p.PartitionValues = val +} +func (p *TCreatePartitionRequest) SetBeEndpoint(val *string) { + p.BeEndpoint = val } -func (p *TLoadTxn2PCResult_) InitDefault() { - *p = TLoadTxn2PCResult_{} +var fieldIDToName_TCreatePartitionRequest = map[int16]string{ + 1: "txn_id", + 2: "db_id", + 3: "table_id", + 4: "partitionValues", + 5: "be_endpoint", } -var TLoadTxn2PCResult__Status_DEFAULT *status.TStatus +func (p *TCreatePartitionRequest) IsSetTxnId() bool { + return p.TxnId != nil +} -func (p *TLoadTxn2PCResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TLoadTxn2PCResult__Status_DEFAULT - } - return p.Status +func (p *TCreatePartitionRequest) IsSetDbId() bool { + return p.DbId != nil } -func (p *TLoadTxn2PCResult_) SetStatus(val *status.TStatus) { - p.Status = val + +func (p *TCreatePartitionRequest) IsSetTableId() bool { + return p.TableId != nil } -var fieldIDToName_TLoadTxn2PCResult_ = map[int16]string{ - 1: "status", +func (p *TCreatePartitionRequest) IsSetPartitionValues() bool { + return p.PartitionValues != nil } -func (p *TLoadTxn2PCResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TCreatePartitionRequest) IsSetBeEndpoint() bool { + return p.BeEndpoint != nil } -func (p *TLoadTxn2PCResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TCreatePartitionRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -33115,63 +64585,157 @@ func (p *TLoadTxn2PCResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } - } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCreatePartitionRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TCreatePartitionRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TCreatePartitionRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TCreatePartitionRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([][]*exprs.TNullableStringLiteral, 0, size) + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _elem := make([]*exprs.TNullableStringLiteral, 0, size) + values := make([]exprs.TNullableStringLiteral, size) + for i := 0; i < size; i++ { + _elem1 := &values[i] + _elem1.InitDefault() + + if err := _elem1.Read(iprot); err != nil { + return err + } - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return err } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError + _field = append(_field, _elem) } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PartitionValues = _field return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCResult_[fieldId])) } +func (p *TCreatePartitionRequest) ReadField5(iprot thrift.TProtocol) error { -func (p *TLoadTxn2PCResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.BeEndpoint = _field return nil } -func (p *TLoadTxn2PCResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TCreatePartitionRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxn2PCResult"); err != nil { + if err = oprot.WriteStructBegin("TCreatePartitionRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -33179,7 +64743,22 @@ func (p *TLoadTxn2PCResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -33198,15 +64777,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxn2PCResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TCreatePartitionRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -33215,246 +64796,284 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxn2PCResult_) String() string { +func (p *TCreatePartitionRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TCreatePartitionRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TCreatePartitionRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionValues() { + if err = oprot.WriteFieldBegin("partitionValues", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.LIST, len(p.PartitionValues)); err != nil { + return err + } + for _, v := range p.PartitionValues { + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TCreatePartitionRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBeEndpoint() { + if err = oprot.WriteFieldBegin("be_endpoint", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.BeEndpoint); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TCreatePartitionRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TLoadTxn2PCResult_(%+v)", *p) + return fmt.Sprintf("TCreatePartitionRequest(%+v)", *p) + } -func (p *TLoadTxn2PCResult_) DeepEqual(ano *TLoadTxn2PCResult_) bool { +func (p *TCreatePartitionRequest) DeepEqual(ano *TCreatePartitionRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.TxnId) { + return false + } + if !p.Field2DeepEqual(ano.DbId) { + return false + } + if !p.Field3DeepEqual(ano.TableId) { + return false + } + if !p.Field4DeepEqual(ano.PartitionValues) { + return false + } + if !p.Field5DeepEqual(ano.BeEndpoint) { return false } return true } -func (p *TLoadTxn2PCResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TCreatePartitionRequest) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { return false } return true } +func (p *TCreatePartitionRequest) Field2DeepEqual(src *int64) bool { -type TRollbackTxnRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - UserIp *string `thrift:"user_ip,5,optional" frugal:"5,optional,string" json:"user_ip,omitempty"` - TxnId *int64 `thrift:"txn_id,6,optional" frugal:"6,optional,i64" json:"txn_id,omitempty"` - Reason *string `thrift:"reason,7,optional" frugal:"7,optional,string" json:"reason,omitempty"` - AuthCode *int64 `thrift:"auth_code,9,optional" frugal:"9,optional,i64" json:"auth_code,omitempty"` - TxnCommitAttachment *TTxnCommitAttachment `thrift:"txn_commit_attachment,10,optional" frugal:"10,optional,TTxnCommitAttachment" json:"txn_commit_attachment,omitempty"` - Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` - DbId *int64 `thrift:"db_id,12,optional" frugal:"12,optional,i64" json:"db_id,omitempty"` -} - -func NewTRollbackTxnRequest() *TRollbackTxnRequest { - return &TRollbackTxnRequest{} -} - -func (p *TRollbackTxnRequest) InitDefault() { - *p = TRollbackTxnRequest{} -} - -var TRollbackTxnRequest_Cluster_DEFAULT string - -func (p *TRollbackTxnRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TRollbackTxnRequest_Cluster_DEFAULT + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false } - return *p.Cluster -} - -var TRollbackTxnRequest_User_DEFAULT string - -func (p *TRollbackTxnRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TRollbackTxnRequest_User_DEFAULT + if *p.DbId != *src { + return false } - return *p.User + return true } +func (p *TCreatePartitionRequest) Field3DeepEqual(src *int64) bool { -var TRollbackTxnRequest_Passwd_DEFAULT string - -func (p *TRollbackTxnRequest) GetPasswd() (v string) { - if !p.IsSetPasswd() { - return TRollbackTxnRequest_Passwd_DEFAULT + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false } - return *p.Passwd -} - -var TRollbackTxnRequest_Db_DEFAULT string - -func (p *TRollbackTxnRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TRollbackTxnRequest_Db_DEFAULT + if *p.TableId != *src { + return false } - return *p.Db + return true } +func (p *TCreatePartitionRequest) Field4DeepEqual(src [][]*exprs.TNullableStringLiteral) bool { -var TRollbackTxnRequest_UserIp_DEFAULT string - -func (p *TRollbackTxnRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TRollbackTxnRequest_UserIp_DEFAULT + if len(p.PartitionValues) != len(src) { + return false } - return *p.UserIp + for i, v := range p.PartitionValues { + _src := src[i] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true } +func (p *TCreatePartitionRequest) Field5DeepEqual(src *string) bool { -var TRollbackTxnRequest_TxnId_DEFAULT int64 - -func (p *TRollbackTxnRequest) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TRollbackTxnRequest_TxnId_DEFAULT + if p.BeEndpoint == src { + return true + } else if p.BeEndpoint == nil || src == nil { + return false } - return *p.TxnId + if strings.Compare(*p.BeEndpoint, *src) != 0 { + return false + } + return true } -var TRollbackTxnRequest_Reason_DEFAULT string - -func (p *TRollbackTxnRequest) GetReason() (v string) { - if !p.IsSetReason() { - return TRollbackTxnRequest_Reason_DEFAULT - } - return *p.Reason +type TCreatePartitionResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Partitions []*descriptors.TOlapTablePartition `thrift:"partitions,2,optional" frugal:"2,optional,list" json:"partitions,omitempty"` + Tablets []*descriptors.TTabletLocation `thrift:"tablets,3,optional" frugal:"3,optional,list" json:"tablets,omitempty"` + Nodes []*descriptors.TNodeInfo `thrift:"nodes,4,optional" frugal:"4,optional,list" json:"nodes,omitempty"` } -var TRollbackTxnRequest_AuthCode_DEFAULT int64 +func NewTCreatePartitionResult_() *TCreatePartitionResult_ { + return &TCreatePartitionResult_{} +} -func (p *TRollbackTxnRequest) GetAuthCode() (v int64) { - if !p.IsSetAuthCode() { - return TRollbackTxnRequest_AuthCode_DEFAULT - } - return *p.AuthCode +func (p *TCreatePartitionResult_) InitDefault() { } -var TRollbackTxnRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment +var TCreatePartitionResult__Status_DEFAULT *status.TStatus -func (p *TRollbackTxnRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { - if !p.IsSetTxnCommitAttachment() { - return TRollbackTxnRequest_TxnCommitAttachment_DEFAULT +func (p *TCreatePartitionResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TCreatePartitionResult__Status_DEFAULT } - return p.TxnCommitAttachment + return p.Status } -var TRollbackTxnRequest_Token_DEFAULT string +var TCreatePartitionResult__Partitions_DEFAULT []*descriptors.TOlapTablePartition -func (p *TRollbackTxnRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TRollbackTxnRequest_Token_DEFAULT +func (p *TCreatePartitionResult_) GetPartitions() (v []*descriptors.TOlapTablePartition) { + if !p.IsSetPartitions() { + return TCreatePartitionResult__Partitions_DEFAULT } - return *p.Token + return p.Partitions } -var TRollbackTxnRequest_DbId_DEFAULT int64 +var TCreatePartitionResult__Tablets_DEFAULT []*descriptors.TTabletLocation -func (p *TRollbackTxnRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TRollbackTxnRequest_DbId_DEFAULT +func (p *TCreatePartitionResult_) GetTablets() (v []*descriptors.TTabletLocation) { + if !p.IsSetTablets() { + return TCreatePartitionResult__Tablets_DEFAULT } - return *p.DbId -} -func (p *TRollbackTxnRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TRollbackTxnRequest) SetUser(val *string) { - p.User = val -} -func (p *TRollbackTxnRequest) SetPasswd(val *string) { - p.Passwd = val -} -func (p *TRollbackTxnRequest) SetDb(val *string) { - p.Db = val -} -func (p *TRollbackTxnRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TRollbackTxnRequest) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TRollbackTxnRequest) SetReason(val *string) { - p.Reason = val -} -func (p *TRollbackTxnRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TRollbackTxnRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { - p.TxnCommitAttachment = val -} -func (p *TRollbackTxnRequest) SetToken(val *string) { - p.Token = val -} -func (p *TRollbackTxnRequest) SetDbId(val *int64) { - p.DbId = val -} - -var fieldIDToName_TRollbackTxnRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "user_ip", - 6: "txn_id", - 7: "reason", - 9: "auth_code", - 10: "txn_commit_attachment", - 11: "token", - 12: "db_id", + return p.Tablets } -func (p *TRollbackTxnRequest) IsSetCluster() bool { - return p.Cluster != nil -} +var TCreatePartitionResult__Nodes_DEFAULT []*descriptors.TNodeInfo -func (p *TRollbackTxnRequest) IsSetUser() bool { - return p.User != nil +func (p *TCreatePartitionResult_) GetNodes() (v []*descriptors.TNodeInfo) { + if !p.IsSetNodes() { + return TCreatePartitionResult__Nodes_DEFAULT + } + return p.Nodes } - -func (p *TRollbackTxnRequest) IsSetPasswd() bool { - return p.Passwd != nil +func (p *TCreatePartitionResult_) SetStatus(val *status.TStatus) { + p.Status = val } - -func (p *TRollbackTxnRequest) IsSetDb() bool { - return p.Db != nil +func (p *TCreatePartitionResult_) SetPartitions(val []*descriptors.TOlapTablePartition) { + p.Partitions = val } - -func (p *TRollbackTxnRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TCreatePartitionResult_) SetTablets(val []*descriptors.TTabletLocation) { + p.Tablets = val } - -func (p *TRollbackTxnRequest) IsSetTxnId() bool { - return p.TxnId != nil +func (p *TCreatePartitionResult_) SetNodes(val []*descriptors.TNodeInfo) { + p.Nodes = val } -func (p *TRollbackTxnRequest) IsSetReason() bool { - return p.Reason != nil +var fieldIDToName_TCreatePartitionResult_ = map[int16]string{ + 1: "status", + 2: "partitions", + 3: "tablets", + 4: "nodes", } -func (p *TRollbackTxnRequest) IsSetAuthCode() bool { - return p.AuthCode != nil +func (p *TCreatePartitionResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TRollbackTxnRequest) IsSetTxnCommitAttachment() bool { - return p.TxnCommitAttachment != nil +func (p *TCreatePartitionResult_) IsSetPartitions() bool { + return p.Partitions != nil } -func (p *TRollbackTxnRequest) IsSetToken() bool { - return p.Token != nil +func (p *TCreatePartitionResult_) IsSetTablets() bool { + return p.Tablets != nil } -func (p *TRollbackTxnRequest) IsSetDbId() bool { - return p.DbId != nil +func (p *TCreatePartitionResult_) IsSetNodes() bool { + return p.Nodes != nil } -func (p *TRollbackTxnRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TCreatePartitionResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -33473,122 +65092,43 @@ func (p *TRollbackTxnRequest) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: + case 1: if fieldTypeId == thrift.STRUCT { - if err = p.ReadField10(iprot); err != nil { + if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 12: - if fieldTypeId == thrift.I64 { - if err = p.ReadField12(iprot); err != nil { + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -33603,7 +65143,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -33613,107 +65153,87 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRollbackTxnRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TCreatePartitionResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Cluster = &v } + p.Status = _field return nil } - -func (p *TRollbackTxnRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TCreatePartitionResult_) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.User = &v } - return nil -} + _field := make([]*descriptors.TOlapTablePartition, 0, size) + values := make([]descriptors.TOlapTablePartition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TRollbackTxnRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = &v - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TRollbackTxnRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v + _field = append(_field, _elem) } - return nil -} - -func (p *TRollbackTxnRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.UserIp = &v } + p.Partitions = _field return nil } - -func (p *TRollbackTxnRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TCreatePartitionResult_) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.TxnId = &v } - return nil -} + _field := make([]*descriptors.TTabletLocation, 0, size) + values := make([]descriptors.TTabletLocation, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TRollbackTxnRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Reason = &v - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TRollbackTxnRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.AuthCode = &v + _field = append(_field, _elem) } - return nil -} - -func (p *TRollbackTxnRequest) ReadField10(iprot thrift.TProtocol) error { - p.TxnCommitAttachment = NewTTxnCommitAttachment() - if err := p.TxnCommitAttachment.Read(iprot); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } + p.Tablets = _field return nil } - -func (p *TRollbackTxnRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TCreatePartitionResult_) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.Token = &v } - return nil -} + _field := make([]*descriptors.TNodeInfo, 0, size) + values := make([]descriptors.TNodeInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TRollbackTxnRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.DbId = &v } + p.Nodes = _field return nil } -func (p *TRollbackTxnRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TCreatePartitionResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRollbackTxnRequest"); err != nil { + if err = oprot.WriteStructBegin("TCreatePartitionResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -33733,35 +65253,6 @@ func (p *TRollbackTxnRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -33780,12 +65271,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRollbackTxnRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *TCreatePartitionResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -33799,107 +65290,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRollbackTxnRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPasswd() { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 6); err != nil { +func (p *TCreatePartitionResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitions() { + if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TxnId); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetReason() { - if err = oprot.WriteFieldBegin("reason", thrift.STRING, 7); err != nil { - goto WriteFieldBeginError + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return err + } } - if err := oprot.WriteString(*p.Reason); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -33908,36 +65312,25 @@ func (p *TRollbackTxnRequest) writeField7(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TRollbackTxnRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 9); err != nil { +func (p *TCreatePartitionResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTablets() { + if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.AuthCode); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnCommitAttachment() { - if err = oprot.WriteFieldBegin("txn_commit_attachment", thrift.STRUCT, 10); err != nil { - goto WriteFieldBeginError + for _, v := range p.Tablets { + if err := v.Write(oprot); err != nil { + return err + } } - if err := p.TxnCommitAttachment.Write(oprot); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -33946,36 +65339,25 @@ func (p *TRollbackTxnRequest) writeField10(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TRollbackTxnRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { +func (p *TCreatePartitionResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetNodes() { + if err = oprot.WriteFieldBegin("nodes", thrift.LIST, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Token); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Nodes)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) -} - -func (p *TRollbackTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 12); err != nil { - goto WriteFieldBeginError + for _, v := range p.Nodes { + if err := v.Write(oprot); err != nil { + return err + } } - if err := oprot.WriteI64(*p.DbId); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -33984,221 +65366,191 @@ func (p *TRollbackTxnRequest) writeField12(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TRollbackTxnRequest) String() string { +func (p *TCreatePartitionResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TRollbackTxnRequest(%+v)", *p) + return fmt.Sprintf("TCreatePartitionResult_(%+v)", *p) + } -func (p *TRollbackTxnRequest) DeepEqual(ano *TRollbackTxnRequest) bool { +func (p *TCreatePartitionResult_) DeepEqual(ano *TCreatePartitionResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.UserIp) { - return false - } - if !p.Field6DeepEqual(ano.TxnId) { - return false - } - if !p.Field7DeepEqual(ano.Reason) { - return false - } - if !p.Field9DeepEqual(ano.AuthCode) { + if !p.Field1DeepEqual(ano.Status) { return false } - if !p.Field10DeepEqual(ano.TxnCommitAttachment) { + if !p.Field2DeepEqual(ano.Partitions) { return false } - if !p.Field11DeepEqual(ano.Token) { + if !p.Field3DeepEqual(ano.Tablets) { return false } - if !p.Field12DeepEqual(ano.DbId) { + if !p.Field4DeepEqual(ano.Nodes) { return false } return true } -func (p *TRollbackTxnRequest) Field1DeepEqual(src *string) bool { +func (p *TCreatePartitionResult_) Field1DeepEqual(src *status.TStatus) bool { - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TRollbackTxnRequest) Field2DeepEqual(src *string) bool { +func (p *TCreatePartitionResult_) Field2DeepEqual(src []*descriptors.TOlapTablePartition) bool { - if p.User == src { - return true - } else if p.User == nil || src == nil { + if len(p.Partitions) != len(src) { return false } - if strings.Compare(*p.User, *src) != 0 { - return false + for i, v := range p.Partitions { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TRollbackTxnRequest) Field3DeepEqual(src *string) bool { +func (p *TCreatePartitionResult_) Field3DeepEqual(src []*descriptors.TTabletLocation) bool { - if p.Passwd == src { - return true - } else if p.Passwd == nil || src == nil { + if len(p.Tablets) != len(src) { return false } - if strings.Compare(*p.Passwd, *src) != 0 { - return false + for i, v := range p.Tablets { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TRollbackTxnRequest) Field4DeepEqual(src *string) bool { +func (p *TCreatePartitionResult_) Field4DeepEqual(src []*descriptors.TNodeInfo) bool { - if p.Db == src { - return true - } else if p.Db == nil || src == nil { + if len(p.Nodes) != len(src) { return false } - if strings.Compare(*p.Db, *src) != 0 { - return false + for i, v := range p.Nodes { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TRollbackTxnRequest) Field5DeepEqual(src *string) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false - } - if strings.Compare(*p.UserIp, *src) != 0 { - return false - } - return true +type TReplacePartitionRequest struct { + OverwriteGroupId *int64 `thrift:"overwrite_group_id,1,optional" frugal:"1,optional,i64" json:"overwrite_group_id,omitempty"` + DbId *int64 `thrift:"db_id,2,optional" frugal:"2,optional,i64" json:"db_id,omitempty"` + TableId *int64 `thrift:"table_id,3,optional" frugal:"3,optional,i64" json:"table_id,omitempty"` + PartitionIds []int64 `thrift:"partition_ids,4,optional" frugal:"4,optional,list" json:"partition_ids,omitempty"` + BeEndpoint *string `thrift:"be_endpoint,5,optional" frugal:"5,optional,string" json:"be_endpoint,omitempty"` } -func (p *TRollbackTxnRequest) Field6DeepEqual(src *int64) bool { - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false - } - if *p.TxnId != *src { - return false - } - return true +func NewTReplacePartitionRequest() *TReplacePartitionRequest { + return &TReplacePartitionRequest{} } -func (p *TRollbackTxnRequest) Field7DeepEqual(src *string) bool { - if p.Reason == src { - return true - } else if p.Reason == nil || src == nil { - return false - } - if strings.Compare(*p.Reason, *src) != 0 { - return false - } - return true +func (p *TReplacePartitionRequest) InitDefault() { } -func (p *TRollbackTxnRequest) Field9DeepEqual(src *int64) bool { - if p.AuthCode == src { - return true - } else if p.AuthCode == nil || src == nil { - return false - } - if *p.AuthCode != *src { - return false +var TReplacePartitionRequest_OverwriteGroupId_DEFAULT int64 + +func (p *TReplacePartitionRequest) GetOverwriteGroupId() (v int64) { + if !p.IsSetOverwriteGroupId() { + return TReplacePartitionRequest_OverwriteGroupId_DEFAULT } - return true + return *p.OverwriteGroupId } -func (p *TRollbackTxnRequest) Field10DeepEqual(src *TTxnCommitAttachment) bool { - if !p.TxnCommitAttachment.DeepEqual(src) { - return false +var TReplacePartitionRequest_DbId_DEFAULT int64 + +func (p *TReplacePartitionRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TReplacePartitionRequest_DbId_DEFAULT } - return true + return *p.DbId } -func (p *TRollbackTxnRequest) Field11DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false +var TReplacePartitionRequest_TableId_DEFAULT int64 + +func (p *TReplacePartitionRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TReplacePartitionRequest_TableId_DEFAULT } - return true + return *p.TableId } -func (p *TRollbackTxnRequest) Field12DeepEqual(src *int64) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false - } - if *p.DbId != *src { - return false +var TReplacePartitionRequest_PartitionIds_DEFAULT []int64 + +func (p *TReplacePartitionRequest) GetPartitionIds() (v []int64) { + if !p.IsSetPartitionIds() { + return TReplacePartitionRequest_PartitionIds_DEFAULT } - return true + return p.PartitionIds } -type TRollbackTxnResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` -} +var TReplacePartitionRequest_BeEndpoint_DEFAULT string -func NewTRollbackTxnResult_() *TRollbackTxnResult_ { - return &TRollbackTxnResult_{} +func (p *TReplacePartitionRequest) GetBeEndpoint() (v string) { + if !p.IsSetBeEndpoint() { + return TReplacePartitionRequest_BeEndpoint_DEFAULT + } + return *p.BeEndpoint +} +func (p *TReplacePartitionRequest) SetOverwriteGroupId(val *int64) { + p.OverwriteGroupId = val +} +func (p *TReplacePartitionRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TReplacePartitionRequest) SetTableId(val *int64) { + p.TableId = val +} +func (p *TReplacePartitionRequest) SetPartitionIds(val []int64) { + p.PartitionIds = val +} +func (p *TReplacePartitionRequest) SetBeEndpoint(val *string) { + p.BeEndpoint = val } -func (p *TRollbackTxnResult_) InitDefault() { - *p = TRollbackTxnResult_{} +var fieldIDToName_TReplacePartitionRequest = map[int16]string{ + 1: "overwrite_group_id", + 2: "db_id", + 3: "table_id", + 4: "partition_ids", + 5: "be_endpoint", } -var TRollbackTxnResult__Status_DEFAULT *status.TStatus +func (p *TReplacePartitionRequest) IsSetOverwriteGroupId() bool { + return p.OverwriteGroupId != nil +} -func (p *TRollbackTxnResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TRollbackTxnResult__Status_DEFAULT - } - return p.Status +func (p *TReplacePartitionRequest) IsSetDbId() bool { + return p.DbId != nil } -func (p *TRollbackTxnResult_) SetStatus(val *status.TStatus) { - p.Status = val + +func (p *TReplacePartitionRequest) IsSetTableId() bool { + return p.TableId != nil } -var fieldIDToName_TRollbackTxnResult_ = map[int16]string{ - 1: "status", +func (p *TReplacePartitionRequest) IsSetPartitionIds() bool { + return p.PartitionIds != nil } -func (p *TRollbackTxnResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TReplacePartitionRequest) IsSetBeEndpoint() bool { + return p.BeEndpoint != nil } -func (p *TRollbackTxnResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TReplacePartitionRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -34218,21 +65570,50 @@ func (p *TRollbackTxnResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -34247,7 +65628,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReplacePartitionRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -34257,17 +65638,77 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRollbackTxnResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TReplacePartitionRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.OverwriteGroupId = _field return nil } +func (p *TReplacePartitionRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TRollbackTxnResult_) Write(oprot thrift.TProtocol) (err error) { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TReplacePartitionRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TReplacePartitionRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PartitionIds = _field + return nil +} +func (p *TReplacePartitionRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.BeEndpoint = _field + return nil +} + +func (p *TReplacePartitionRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRollbackTxnResult"); err != nil { + if err = oprot.WriteStructBegin("TReplacePartitionRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -34275,7 +65716,22 @@ func (p *TRollbackTxnResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -34294,12 +65750,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRollbackTxnResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TReplacePartitionRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetOverwriteGroupId() { + if err = oprot.WriteFieldBegin("overwrite_group_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.OverwriteGroupId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -34313,401 +65769,325 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRollbackTxnResult_) String() string { +func (p *TReplacePartitionRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TReplacePartitionRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TReplacePartitionRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionIds() { + if err = oprot.WriteFieldBegin("partition_ids", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.PartitionIds)); err != nil { + return err + } + for _, v := range p.PartitionIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TReplacePartitionRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBeEndpoint() { + if err = oprot.WriteFieldBegin("be_endpoint", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.BeEndpoint); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TReplacePartitionRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TRollbackTxnResult_(%+v)", *p) + return fmt.Sprintf("TReplacePartitionRequest(%+v)", *p) + } -func (p *TRollbackTxnResult_) DeepEqual(ano *TRollbackTxnResult_) bool { +func (p *TReplacePartitionRequest) DeepEqual(ano *TReplacePartitionRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.OverwriteGroupId) { + return false + } + if !p.Field2DeepEqual(ano.DbId) { + return false + } + if !p.Field3DeepEqual(ano.TableId) { + return false + } + if !p.Field4DeepEqual(ano.PartitionIds) { + return false + } + if !p.Field5DeepEqual(ano.BeEndpoint) { return false } return true } -func (p *TRollbackTxnResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TReplacePartitionRequest) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.OverwriteGroupId == src { + return true + } else if p.OverwriteGroupId == nil || src == nil { + return false + } + if *p.OverwriteGroupId != *src { return false } return true } +func (p *TReplacePartitionRequest) Field2DeepEqual(src *int64) bool { -type TLoadTxnRollbackRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` - Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` - Db string `thrift:"db,4,required" frugal:"4,required,string" json:"db"` - Tbl string `thrift:"tbl,5,required" frugal:"5,required,string" json:"tbl"` - UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` - TxnId int64 `thrift:"txnId,7,required" frugal:"7,required,i64" json:"txnId"` - Reason *string `thrift:"reason,8,optional" frugal:"8,optional,string" json:"reason,omitempty"` - AuthCode *int64 `thrift:"auth_code,9,optional" frugal:"9,optional,i64" json:"auth_code,omitempty"` - TxnCommitAttachment *TTxnCommitAttachment `thrift:"txnCommitAttachment,10,optional" frugal:"10,optional,TTxnCommitAttachment" json:"txnCommitAttachment,omitempty"` - Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"` - DbId *int64 `thrift:"db_id,12,optional" frugal:"12,optional,i64" json:"db_id,omitempty"` - Tbls []string `thrift:"tbls,13,optional" frugal:"13,optional,list" json:"tbls,omitempty"` -} - -func NewTLoadTxnRollbackRequest() *TLoadTxnRollbackRequest { - return &TLoadTxnRollbackRequest{} -} - -func (p *TLoadTxnRollbackRequest) InitDefault() { - *p = TLoadTxnRollbackRequest{} -} - -var TLoadTxnRollbackRequest_Cluster_DEFAULT string - -func (p *TLoadTxnRollbackRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TLoadTxnRollbackRequest_Cluster_DEFAULT + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false } - return *p.Cluster -} - -func (p *TLoadTxnRollbackRequest) GetUser() (v string) { - return p.User -} - -func (p *TLoadTxnRollbackRequest) GetPasswd() (v string) { - return p.Passwd + if *p.DbId != *src { + return false + } + return true } +func (p *TReplacePartitionRequest) Field3DeepEqual(src *int64) bool { -func (p *TLoadTxnRollbackRequest) GetDb() (v string) { - return p.Db + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true } +func (p *TReplacePartitionRequest) Field4DeepEqual(src []int64) bool { -func (p *TLoadTxnRollbackRequest) GetTbl() (v string) { - return p.Tbl + if len(p.PartitionIds) != len(src) { + return false + } + for i, v := range p.PartitionIds { + _src := src[i] + if v != _src { + return false + } + } + return true } +func (p *TReplacePartitionRequest) Field5DeepEqual(src *string) bool { -var TLoadTxnRollbackRequest_UserIp_DEFAULT string - -func (p *TLoadTxnRollbackRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TLoadTxnRollbackRequest_UserIp_DEFAULT + if p.BeEndpoint == src { + return true + } else if p.BeEndpoint == nil || src == nil { + return false } - return *p.UserIp + if strings.Compare(*p.BeEndpoint, *src) != 0 { + return false + } + return true } -func (p *TLoadTxnRollbackRequest) GetTxnId() (v int64) { - return p.TxnId +type TReplacePartitionResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Partitions []*descriptors.TOlapTablePartition `thrift:"partitions,2,optional" frugal:"2,optional,list" json:"partitions,omitempty"` + Tablets []*descriptors.TTabletLocation `thrift:"tablets,3,optional" frugal:"3,optional,list" json:"tablets,omitempty"` + Nodes []*descriptors.TNodeInfo `thrift:"nodes,4,optional" frugal:"4,optional,list" json:"nodes,omitempty"` } -var TLoadTxnRollbackRequest_Reason_DEFAULT string - -func (p *TLoadTxnRollbackRequest) GetReason() (v string) { - if !p.IsSetReason() { - return TLoadTxnRollbackRequest_Reason_DEFAULT - } - return *p.Reason +func NewTReplacePartitionResult_() *TReplacePartitionResult_ { + return &TReplacePartitionResult_{} } -var TLoadTxnRollbackRequest_AuthCode_DEFAULT int64 - -func (p *TLoadTxnRollbackRequest) GetAuthCode() (v int64) { - if !p.IsSetAuthCode() { - return TLoadTxnRollbackRequest_AuthCode_DEFAULT - } - return *p.AuthCode +func (p *TReplacePartitionResult_) InitDefault() { } -var TLoadTxnRollbackRequest_TxnCommitAttachment_DEFAULT *TTxnCommitAttachment +var TReplacePartitionResult__Status_DEFAULT *status.TStatus -func (p *TLoadTxnRollbackRequest) GetTxnCommitAttachment() (v *TTxnCommitAttachment) { - if !p.IsSetTxnCommitAttachment() { - return TLoadTxnRollbackRequest_TxnCommitAttachment_DEFAULT +func (p *TReplacePartitionResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TReplacePartitionResult__Status_DEFAULT } - return p.TxnCommitAttachment + return p.Status } -var TLoadTxnRollbackRequest_Token_DEFAULT string +var TReplacePartitionResult__Partitions_DEFAULT []*descriptors.TOlapTablePartition -func (p *TLoadTxnRollbackRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TLoadTxnRollbackRequest_Token_DEFAULT +func (p *TReplacePartitionResult_) GetPartitions() (v []*descriptors.TOlapTablePartition) { + if !p.IsSetPartitions() { + return TReplacePartitionResult__Partitions_DEFAULT } - return *p.Token + return p.Partitions } -var TLoadTxnRollbackRequest_DbId_DEFAULT int64 +var TReplacePartitionResult__Tablets_DEFAULT []*descriptors.TTabletLocation -func (p *TLoadTxnRollbackRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TLoadTxnRollbackRequest_DbId_DEFAULT +func (p *TReplacePartitionResult_) GetTablets() (v []*descriptors.TTabletLocation) { + if !p.IsSetTablets() { + return TReplacePartitionResult__Tablets_DEFAULT } - return *p.DbId + return p.Tablets } -var TLoadTxnRollbackRequest_Tbls_DEFAULT []string +var TReplacePartitionResult__Nodes_DEFAULT []*descriptors.TNodeInfo -func (p *TLoadTxnRollbackRequest) GetTbls() (v []string) { - if !p.IsSetTbls() { - return TLoadTxnRollbackRequest_Tbls_DEFAULT +func (p *TReplacePartitionResult_) GetNodes() (v []*descriptors.TNodeInfo) { + if !p.IsSetNodes() { + return TReplacePartitionResult__Nodes_DEFAULT } - return p.Tbls -} -func (p *TLoadTxnRollbackRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TLoadTxnRollbackRequest) SetUser(val string) { - p.User = val -} -func (p *TLoadTxnRollbackRequest) SetPasswd(val string) { - p.Passwd = val -} -func (p *TLoadTxnRollbackRequest) SetDb(val string) { - p.Db = val -} -func (p *TLoadTxnRollbackRequest) SetTbl(val string) { - p.Tbl = val -} -func (p *TLoadTxnRollbackRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TLoadTxnRollbackRequest) SetTxnId(val int64) { - p.TxnId = val -} -func (p *TLoadTxnRollbackRequest) SetReason(val *string) { - p.Reason = val -} -func (p *TLoadTxnRollbackRequest) SetAuthCode(val *int64) { - p.AuthCode = val -} -func (p *TLoadTxnRollbackRequest) SetTxnCommitAttachment(val *TTxnCommitAttachment) { - p.TxnCommitAttachment = val -} -func (p *TLoadTxnRollbackRequest) SetToken(val *string) { - p.Token = val -} -func (p *TLoadTxnRollbackRequest) SetDbId(val *int64) { - p.DbId = val -} -func (p *TLoadTxnRollbackRequest) SetTbls(val []string) { - p.Tbls = val -} - -var fieldIDToName_TLoadTxnRollbackRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "tbl", - 6: "user_ip", - 7: "txnId", - 8: "reason", - 9: "auth_code", - 10: "txnCommitAttachment", - 11: "token", - 12: "db_id", - 13: "tbls", -} - -func (p *TLoadTxnRollbackRequest) IsSetCluster() bool { - return p.Cluster != nil + return p.Nodes } - -func (p *TLoadTxnRollbackRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TReplacePartitionResult_) SetStatus(val *status.TStatus) { + p.Status = val } - -func (p *TLoadTxnRollbackRequest) IsSetReason() bool { - return p.Reason != nil +func (p *TReplacePartitionResult_) SetPartitions(val []*descriptors.TOlapTablePartition) { + p.Partitions = val } - -func (p *TLoadTxnRollbackRequest) IsSetAuthCode() bool { - return p.AuthCode != nil +func (p *TReplacePartitionResult_) SetTablets(val []*descriptors.TTabletLocation) { + p.Tablets = val } - -func (p *TLoadTxnRollbackRequest) IsSetTxnCommitAttachment() bool { - return p.TxnCommitAttachment != nil +func (p *TReplacePartitionResult_) SetNodes(val []*descriptors.TNodeInfo) { + p.Nodes = val } -func (p *TLoadTxnRollbackRequest) IsSetToken() bool { - return p.Token != nil +var fieldIDToName_TReplacePartitionResult_ = map[int16]string{ + 1: "status", + 2: "partitions", + 3: "tablets", + 4: "nodes", } -func (p *TLoadTxnRollbackRequest) IsSetDbId() bool { - return p.DbId != nil +func (p *TReplacePartitionResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TLoadTxnRollbackRequest) IsSetTbls() bool { - return p.Tbls != nil +func (p *TReplacePartitionResult_) IsSetPartitions() bool { + return p.Partitions != nil } -func (p *TLoadTxnRollbackRequest) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetTxnId bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - issetTbl = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - issetTxnId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: +func (p *TReplacePartitionResult_) IsSetTablets() bool { + return p.Tablets != nil +} + +func (p *TReplacePartitionResult_) IsSetNodes() bool { + return p.Nodes != nil +} + +func (p *TReplacePartitionResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: if fieldTypeId == thrift.STRUCT { - if err = p.ReadField10(iprot); err != nil { + if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 12: - if fieldTypeId == thrift.I64 { - if err = p.ReadField12(iprot); err != nil { + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 13: + case 4: if fieldTypeId == thrift.LIST { - if err = p.ReadField13(iprot); err != nil { + if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -34716,37 +66096,13 @@ func (p *TLoadTxnRollbackRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 7 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReplacePartitionResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -34754,142 +66110,89 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackRequest[fieldId])) -} - -func (p *TLoadTxnRollbackRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} - -func (p *TLoadTxnRollbackRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = v - } - return nil -} - -func (p *TLoadTxnRollbackRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = v - } - return nil } -func (p *TLoadTxnRollbackRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TReplacePartitionResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Db = v } + p.Status = _field return nil } - -func (p *TLoadTxnRollbackRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TReplacePartitionResult_) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.Tbl = v } - return nil -} + _field := make([]*descriptors.TOlapTablePartition, 0, size) + values := make([]descriptors.TOlapTablePartition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TLoadTxnRollbackRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TLoadTxnRollbackRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = v + _field = append(_field, _elem) } - return nil -} - -func (p *TLoadTxnRollbackRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.Reason = &v } + p.Partitions = _field return nil } - -func (p *TLoadTxnRollbackRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TReplacePartitionResult_) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.AuthCode = &v } - return nil -} + _field := make([]*descriptors.TTabletLocation, 0, size) + values := make([]descriptors.TTabletLocation, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TLoadTxnRollbackRequest) ReadField10(iprot thrift.TProtocol) error { - p.TxnCommitAttachment = NewTTxnCommitAttachment() - if err := p.TxnCommitAttachment.Read(iprot); err != nil { - return err - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TLoadTxnRollbackRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v + _field = append(_field, _elem) } - return nil -} - -func (p *TLoadTxnRollbackRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.DbId = &v } + p.Tablets = _field return nil } - -func (p *TLoadTxnRollbackRequest) ReadField13(iprot thrift.TProtocol) error { +func (p *TReplacePartitionResult_) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Tbls = make([]string, 0, size) + _field := make([]*descriptors.TNodeInfo, 0, size) + values := make([]descriptors.TNodeInfo, size) for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err - } else { - _elem = v } - p.Tbls = append(p.Tbls, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Nodes = _field return nil } -func (p *TLoadTxnRollbackRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TReplacePartitionResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxnRollbackRequest"); err != nil { + if err = oprot.WriteStructBegin("TReplacePartitionResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -34909,43 +66212,6 @@ func (p *TLoadTxnRollbackRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -34964,12 +66230,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxnRollbackRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *TReplacePartitionResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -34983,154 +66249,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxnRollbackRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Tbl); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("txnId", thrift.I64, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetReason() { - if err = oprot.WriteFieldBegin("reason", thrift.STRING, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Reason); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthCode() { - if err = oprot.WriteFieldBegin("auth_code", thrift.I64, 9); err != nil { +func (p *TReplacePartitionResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitions() { + if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.AuthCode); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnCommitAttachment() { - if err = oprot.WriteFieldBegin("txnCommitAttachment", thrift.STRUCT, 10); err != nil { - goto WriteFieldBeginError + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return err + } } - if err := p.TxnCommitAttachment.Write(oprot); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -35139,36 +66271,25 @@ func (p *TLoadTxnRollbackRequest) writeField10(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TLoadTxnRollbackRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil { +func (p *TReplacePartitionResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTablets() { + if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Token); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 12); err != nil { - goto WriteFieldBeginError + for _, v := range p.Tablets { + if err := v.Write(oprot); err != nil { + return err + } } - if err := oprot.WriteI64(*p.DbId); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -35177,21 +66298,21 @@ func (p *TLoadTxnRollbackRequest) writeField12(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TLoadTxnRollbackRequest) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetTbls() { - if err = oprot.WriteFieldBegin("tbls", thrift.LIST, 13); err != nil { +func (p *TReplacePartitionResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetNodes() { + if err = oprot.WriteFieldBegin("nodes", thrift.LIST, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Tbls)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Nodes)); err != nil { return err } - for _, v := range p.Tbls { - if err := oprot.WriteString(v); err != nil { + for _, v := range p.Nodes { + if err := v.Write(oprot); err != nil { return err } } @@ -35204,231 +66325,122 @@ func (p *TLoadTxnRollbackRequest) writeField13(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) -} - -func (p *TLoadTxnRollbackRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TLoadTxnRollbackRequest(%+v)", *p) -} - -func (p *TLoadTxnRollbackRequest) DeepEqual(ano *TLoadTxnRollbackRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.Tbl) { - return false - } - if !p.Field6DeepEqual(ano.UserIp) { - return false - } - if !p.Field7DeepEqual(ano.TxnId) { - return false - } - if !p.Field8DeepEqual(ano.Reason) { - return false - } - if !p.Field9DeepEqual(ano.AuthCode) { - return false - } - if !p.Field10DeepEqual(ano.TxnCommitAttachment) { - return false - } - if !p.Field11DeepEqual(ano.Token) { - return false - } - if !p.Field12DeepEqual(ano.DbId) { - return false - } - if !p.Field13DeepEqual(ano.Tbls) { - return false - } - return true -} - -func (p *TLoadTxnRollbackRequest) Field1DeepEqual(src *string) bool { - - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { - return false - } - return true -} -func (p *TLoadTxnRollbackRequest) Field2DeepEqual(src string) bool { - - if strings.Compare(p.User, src) != 0 { - return false - } - return true -} -func (p *TLoadTxnRollbackRequest) Field3DeepEqual(src string) bool { - - if strings.Compare(p.Passwd, src) != 0 { - return false - } - return true -} -func (p *TLoadTxnRollbackRequest) Field4DeepEqual(src string) bool { - - if strings.Compare(p.Db, src) != 0 { - return false - } - return true + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TLoadTxnRollbackRequest) Field5DeepEqual(src string) bool { - if strings.Compare(p.Tbl, src) != 0 { - return false +func (p *TReplacePartitionResult_) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TReplacePartitionResult_(%+v)", *p) + } -func (p *TLoadTxnRollbackRequest) Field6DeepEqual(src *string) bool { - if p.UserIp == src { +func (p *TReplacePartitionResult_) DeepEqual(ano *TReplacePartitionResult_) bool { + if p == ano { return true - } else if p.UserIp == nil || src == nil { + } else if p == nil || ano == nil { return false } - if strings.Compare(*p.UserIp, *src) != 0 { + if !p.Field1DeepEqual(ano.Status) { return false } - return true -} -func (p *TLoadTxnRollbackRequest) Field7DeepEqual(src int64) bool { - - if p.TxnId != src { + if !p.Field2DeepEqual(ano.Partitions) { return false } - return true -} -func (p *TLoadTxnRollbackRequest) Field8DeepEqual(src *string) bool { - - if p.Reason == src { - return true - } else if p.Reason == nil || src == nil { + if !p.Field3DeepEqual(ano.Tablets) { return false } - if strings.Compare(*p.Reason, *src) != 0 { + if !p.Field4DeepEqual(ano.Nodes) { return false } return true } -func (p *TLoadTxnRollbackRequest) Field9DeepEqual(src *int64) bool { - if p.AuthCode == src { - return true - } else if p.AuthCode == nil || src == nil { - return false - } - if *p.AuthCode != *src { - return false - } - return true -} -func (p *TLoadTxnRollbackRequest) Field10DeepEqual(src *TTxnCommitAttachment) bool { +func (p *TReplacePartitionResult_) Field1DeepEqual(src *status.TStatus) bool { - if !p.TxnCommitAttachment.DeepEqual(src) { + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TLoadTxnRollbackRequest) Field11DeepEqual(src *string) bool { +func (p *TReplacePartitionResult_) Field2DeepEqual(src []*descriptors.TOlapTablePartition) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { + if len(p.Partitions) != len(src) { return false } - if strings.Compare(*p.Token, *src) != 0 { - return false + for i, v := range p.Partitions { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TLoadTxnRollbackRequest) Field12DeepEqual(src *int64) bool { +func (p *TReplacePartitionResult_) Field3DeepEqual(src []*descriptors.TTabletLocation) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { + if len(p.Tablets) != len(src) { return false } - if *p.DbId != *src { - return false + for i, v := range p.Tablets { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TLoadTxnRollbackRequest) Field13DeepEqual(src []string) bool { +func (p *TReplacePartitionResult_) Field4DeepEqual(src []*descriptors.TNodeInfo) bool { - if len(p.Tbls) != len(src) { + if len(p.Nodes) != len(src) { return false } - for i, v := range p.Tbls { + for i, v := range p.Nodes { _src := src[i] - if strings.Compare(v, _src) != 0 { + if !v.DeepEqual(_src) { return false } } return true } -type TLoadTxnRollbackResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +type TGetMetaReplica struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` } -func NewTLoadTxnRollbackResult_() *TLoadTxnRollbackResult_ { - return &TLoadTxnRollbackResult_{} +func NewTGetMetaReplica() *TGetMetaReplica { + return &TGetMetaReplica{} } -func (p *TLoadTxnRollbackResult_) InitDefault() { - *p = TLoadTxnRollbackResult_{} +func (p *TGetMetaReplica) InitDefault() { } -var TLoadTxnRollbackResult__Status_DEFAULT *status.TStatus +var TGetMetaReplica_Id_DEFAULT int64 -func (p *TLoadTxnRollbackResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TLoadTxnRollbackResult__Status_DEFAULT +func (p *TGetMetaReplica) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaReplica_Id_DEFAULT } - return p.Status + return *p.Id } -func (p *TLoadTxnRollbackResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TGetMetaReplica) SetId(val *int64) { + p.Id = val } -var fieldIDToName_TLoadTxnRollbackResult_ = map[int16]string{ - 1: "status", +var fieldIDToName_TGetMetaReplica = map[int16]string{ + 1: "id", } -func (p *TLoadTxnRollbackResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TGetMetaReplica) IsSetId() bool { + return p.Id != nil } -func (p *TLoadTxnRollbackResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaReplica) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -35445,22 +66457,18 @@ func (p *TLoadTxnRollbackResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -35469,17 +66477,13 @@ func (p *TLoadTxnRollbackResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaReplica[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -35487,21 +66491,23 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackResult_[fieldId])) } -func (p *TLoadTxnRollbackResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TGetMetaReplica) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.Id = _field return nil } -func (p *TLoadTxnRollbackResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaReplica) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TLoadTxnRollbackResult"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaReplica"); err != nil { goto WriteStructBeginError } if p != nil { @@ -35509,7 +66515,6 @@ func (p *TLoadTxnRollbackResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -35528,15 +66533,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TLoadTxnRollbackResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaReplica) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -35545,117 +66552,92 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TLoadTxnRollbackResult_) String() string { +func (p *TGetMetaReplica) String() string { if p == nil { return "" } - return fmt.Sprintf("TLoadTxnRollbackResult_(%+v)", *p) + return fmt.Sprintf("TGetMetaReplica(%+v)", *p) + } -func (p *TLoadTxnRollbackResult_) DeepEqual(ano *TLoadTxnRollbackResult_) bool { +func (p *TGetMetaReplica) DeepEqual(ano *TGetMetaReplica) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Id) { return false } return true } -func (p *TLoadTxnRollbackResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TGetMetaReplica) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { return false } return true } -type TSnapshotLoaderReportRequest struct { - JobId int64 `thrift:"job_id,1,required" frugal:"1,required,i64" json:"job_id"` - TaskId int64 `thrift:"task_id,2,required" frugal:"2,required,i64" json:"task_id"` - TaskType types.TTaskType `thrift:"task_type,3,required" frugal:"3,required,TTaskType" json:"task_type"` - FinishedNum *int32 `thrift:"finished_num,4,optional" frugal:"4,optional,i32" json:"finished_num,omitempty"` - TotalNum *int32 `thrift:"total_num,5,optional" frugal:"5,optional,i32" json:"total_num,omitempty"` -} - -func NewTSnapshotLoaderReportRequest() *TSnapshotLoaderReportRequest { - return &TSnapshotLoaderReportRequest{} -} - -func (p *TSnapshotLoaderReportRequest) InitDefault() { - *p = TSnapshotLoaderReportRequest{} -} - -func (p *TSnapshotLoaderReportRequest) GetJobId() (v int64) { - return p.JobId +type TGetMetaTablet struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Replicas []*TGetMetaReplica `thrift:"replicas,2,optional" frugal:"2,optional,list" json:"replicas,omitempty"` } -func (p *TSnapshotLoaderReportRequest) GetTaskId() (v int64) { - return p.TaskId +func NewTGetMetaTablet() *TGetMetaTablet { + return &TGetMetaTablet{} } -func (p *TSnapshotLoaderReportRequest) GetTaskType() (v types.TTaskType) { - return p.TaskType +func (p *TGetMetaTablet) InitDefault() { } -var TSnapshotLoaderReportRequest_FinishedNum_DEFAULT int32 +var TGetMetaTablet_Id_DEFAULT int64 -func (p *TSnapshotLoaderReportRequest) GetFinishedNum() (v int32) { - if !p.IsSetFinishedNum() { - return TSnapshotLoaderReportRequest_FinishedNum_DEFAULT +func (p *TGetMetaTablet) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaTablet_Id_DEFAULT } - return *p.FinishedNum + return *p.Id } -var TSnapshotLoaderReportRequest_TotalNum_DEFAULT int32 +var TGetMetaTablet_Replicas_DEFAULT []*TGetMetaReplica -func (p *TSnapshotLoaderReportRequest) GetTotalNum() (v int32) { - if !p.IsSetTotalNum() { - return TSnapshotLoaderReportRequest_TotalNum_DEFAULT +func (p *TGetMetaTablet) GetReplicas() (v []*TGetMetaReplica) { + if !p.IsSetReplicas() { + return TGetMetaTablet_Replicas_DEFAULT } - return *p.TotalNum -} -func (p *TSnapshotLoaderReportRequest) SetJobId(val int64) { - p.JobId = val -} -func (p *TSnapshotLoaderReportRequest) SetTaskId(val int64) { - p.TaskId = val -} -func (p *TSnapshotLoaderReportRequest) SetTaskType(val types.TTaskType) { - p.TaskType = val + return p.Replicas } -func (p *TSnapshotLoaderReportRequest) SetFinishedNum(val *int32) { - p.FinishedNum = val +func (p *TGetMetaTablet) SetId(val *int64) { + p.Id = val } -func (p *TSnapshotLoaderReportRequest) SetTotalNum(val *int32) { - p.TotalNum = val +func (p *TGetMetaTablet) SetReplicas(val []*TGetMetaReplica) { + p.Replicas = val } -var fieldIDToName_TSnapshotLoaderReportRequest = map[int16]string{ - 1: "job_id", - 2: "task_id", - 3: "task_type", - 4: "finished_num", - 5: "total_num", +var fieldIDToName_TGetMetaTablet = map[int16]string{ + 1: "id", + 2: "replicas", } -func (p *TSnapshotLoaderReportRequest) IsSetFinishedNum() bool { - return p.FinishedNum != nil +func (p *TGetMetaTablet) IsSetId() bool { + return p.Id != nil } -func (p *TSnapshotLoaderReportRequest) IsSetTotalNum() bool { - return p.TotalNum != nil +func (p *TGetMetaTablet) IsSetReplicas() bool { + return p.Replicas != nil } -func (p *TSnapshotLoaderReportRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaTablet) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetJobId bool = false - var issetTaskId bool = false - var issetTaskType bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -35676,60 +66658,22 @@ func (p *TSnapshotLoaderReportRequest) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetJobId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetTaskId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - issetTaskType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -35738,27 +66682,13 @@ func (p *TSnapshotLoaderReportRequest) Read(iprot thrift.TProtocol) (err error) goto ReadStructEndError } - if !issetJobId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetTaskId { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetTaskType { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotLoaderReportRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTablet[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -35766,58 +66696,46 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotLoaderReportRequest[fieldId])) } -func (p *TSnapshotLoaderReportRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.JobId = v - } - return nil -} +func (p *TGetMetaTablet) ReadField1(iprot thrift.TProtocol) error { -func (p *TSnapshotLoaderReportRequest) ReadField2(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TaskId = v + _field = &v } + p.Id = _field return nil } - -func (p *TSnapshotLoaderReportRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TGetMetaTablet) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.TaskType = types.TTaskType(v) } - return nil -} + _field := make([]*TGetMetaReplica, 0, size) + values := make([]TGetMetaReplica, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TSnapshotLoaderReportRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.FinishedNum = &v - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TSnapshotLoaderReportRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.TotalNum = &v } + p.Replicas = _field return nil } -func (p *TSnapshotLoaderReportRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaTablet) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TSnapshotLoaderReportRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaTablet"); err != nil { goto WriteStructBeginError } if p != nil { @@ -35829,19 +66747,6 @@ func (p *TSnapshotLoaderReportRequest) Write(oprot thrift.TProtocol) (err error) fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -35860,63 +66765,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TSnapshotLoaderReportRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("job_id", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.JobId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TSnapshotLoaderReportRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("task_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.TaskId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TSnapshotLoaderReportRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("task_type", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.TaskType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TSnapshotLoaderReportRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetFinishedNum() { - if err = oprot.WriteFieldBegin("finished_num", thrift.I32, 4); err != nil { +func (p *TGetMetaTablet) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.FinishedNum); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -35925,17 +66779,25 @@ func (p *TSnapshotLoaderReportRequest) writeField4(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TSnapshotLoaderReportRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTotalNum() { - if err = oprot.WriteFieldBegin("total_num", thrift.I32, 5); err != nil { +func (p *TGetMetaTablet) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetReplicas() { + if err = oprot.WriteFieldBegin("replicas", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.TotalNum); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Replicas)); err != nil { + return err + } + for _, v := range p.Replicas { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -35944,126 +66806,131 @@ func (p *TSnapshotLoaderReportRequest) writeField5(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TSnapshotLoaderReportRequest) String() string { +func (p *TGetMetaTablet) String() string { if p == nil { return "" } - return fmt.Sprintf("TSnapshotLoaderReportRequest(%+v)", *p) + return fmt.Sprintf("TGetMetaTablet(%+v)", *p) + } -func (p *TSnapshotLoaderReportRequest) DeepEqual(ano *TSnapshotLoaderReportRequest) bool { +func (p *TGetMetaTablet) DeepEqual(ano *TGetMetaTablet) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.JobId) { - return false - } - if !p.Field2DeepEqual(ano.TaskId) { - return false - } - if !p.Field3DeepEqual(ano.TaskType) { - return false - } - if !p.Field4DeepEqual(ano.FinishedNum) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field5DeepEqual(ano.TotalNum) { + if !p.Field2DeepEqual(ano.Replicas) { return false } return true } -func (p *TSnapshotLoaderReportRequest) Field1DeepEqual(src int64) bool { +func (p *TGetMetaTablet) Field1DeepEqual(src *int64) bool { - if p.JobId != src { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { return false } - return true -} -func (p *TSnapshotLoaderReportRequest) Field2DeepEqual(src int64) bool { - - if p.TaskId != src { + if *p.Id != *src { return false } return true } -func (p *TSnapshotLoaderReportRequest) Field3DeepEqual(src types.TTaskType) bool { +func (p *TGetMetaTablet) Field2DeepEqual(src []*TGetMetaReplica) bool { - if p.TaskType != src { + if len(p.Replicas) != len(src) { return false } + for i, v := range p.Replicas { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } return true } -func (p *TSnapshotLoaderReportRequest) Field4DeepEqual(src *int32) bool { - if p.FinishedNum == src { - return true - } else if p.FinishedNum == nil || src == nil { - return false - } - if *p.FinishedNum != *src { - return false - } - return true +type TGetMetaIndex struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Tablets []*TGetMetaTablet `thrift:"tablets,3,optional" frugal:"3,optional,list" json:"tablets,omitempty"` } -func (p *TSnapshotLoaderReportRequest) Field5DeepEqual(src *int32) bool { - if p.TotalNum == src { - return true - } else if p.TotalNum == nil || src == nil { - return false - } - if *p.TotalNum != *src { - return false - } - return true +func NewTGetMetaIndex() *TGetMetaIndex { + return &TGetMetaIndex{} } -type TFrontendPingFrontendRequest struct { - ClusterId int32 `thrift:"clusterId,1,required" frugal:"1,required,i32" json:"clusterId"` - Token string `thrift:"token,2,required" frugal:"2,required,string" json:"token"` +func (p *TGetMetaIndex) InitDefault() { } -func NewTFrontendPingFrontendRequest() *TFrontendPingFrontendRequest { - return &TFrontendPingFrontendRequest{} +var TGetMetaIndex_Id_DEFAULT int64 + +func (p *TGetMetaIndex) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaIndex_Id_DEFAULT + } + return *p.Id } -func (p *TFrontendPingFrontendRequest) InitDefault() { - *p = TFrontendPingFrontendRequest{} +var TGetMetaIndex_Name_DEFAULT string + +func (p *TGetMetaIndex) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaIndex_Name_DEFAULT + } + return *p.Name } -func (p *TFrontendPingFrontendRequest) GetClusterId() (v int32) { - return p.ClusterId +var TGetMetaIndex_Tablets_DEFAULT []*TGetMetaTablet + +func (p *TGetMetaIndex) GetTablets() (v []*TGetMetaTablet) { + if !p.IsSetTablets() { + return TGetMetaIndex_Tablets_DEFAULT + } + return p.Tablets +} +func (p *TGetMetaIndex) SetId(val *int64) { + p.Id = val +} +func (p *TGetMetaIndex) SetName(val *string) { + p.Name = val +} +func (p *TGetMetaIndex) SetTablets(val []*TGetMetaTablet) { + p.Tablets = val } -func (p *TFrontendPingFrontendRequest) GetToken() (v string) { - return p.Token +var fieldIDToName_TGetMetaIndex = map[int16]string{ + 1: "id", + 2: "name", + 3: "tablets", } -func (p *TFrontendPingFrontendRequest) SetClusterId(val int32) { - p.ClusterId = val + +func (p *TGetMetaIndex) IsSetId() bool { + return p.Id != nil } -func (p *TFrontendPingFrontendRequest) SetToken(val string) { - p.Token = val + +func (p *TGetMetaIndex) IsSetName() bool { + return p.Name != nil } -var fieldIDToName_TFrontendPingFrontendRequest = map[int16]string{ - 1: "clusterId", - 2: "token", +func (p *TGetMetaIndex) IsSetTablets() bool { + return p.Tablets != nil } -func (p *TFrontendPingFrontendRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaIndex) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetClusterId bool = false - var issetToken bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -36080,57 +66947,49 @@ func (p *TFrontendPingFrontendRequest) Read(iprot thrift.TProtocol) (err error) switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetClusterId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetToken = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } } if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - if !issetClusterId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetToken { - fieldId = 2 - goto RequiredFieldNotSetError + goto ReadStructEndError } + return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaIndex[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -36138,31 +66997,57 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendRequest[fieldId])) } -func (p *TFrontendPingFrontendRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TGetMetaIndex) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ClusterId = v + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaIndex) ReadField2(iprot thrift.TProtocol) error { -func (p *TFrontendPingFrontendRequest) ReadField2(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Token = v + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaIndex) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TGetMetaTablet, 0, size) + values := make([]TGetMetaTablet, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TFrontendPingFrontendRequest) Write(oprot thrift.TProtocol) (err error) { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tablets = _field + return nil +} + +func (p *TGetMetaIndex) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFrontendPingFrontendRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaIndex"); err != nil { goto WriteStructBeginError } if p != nil { @@ -36174,7 +67059,10 @@ func (p *TFrontendPingFrontendRequest) Write(oprot thrift.TProtocol) (err error) fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -36193,15 +67081,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFrontendPingFrontendRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("clusterId", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.ClusterId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaIndex) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36210,15 +67100,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFrontendPingFrontendRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaIndex) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36227,141 +67119,222 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFrontendPingFrontendRequest) String() string { +func (p *TGetMetaIndex) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTablets() { + if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { + return err + } + for _, v := range p.Tablets { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetMetaIndex) String() string { if p == nil { return "" } - return fmt.Sprintf("TFrontendPingFrontendRequest(%+v)", *p) + return fmt.Sprintf("TGetMetaIndex(%+v)", *p) + } -func (p *TFrontendPingFrontendRequest) DeepEqual(ano *TFrontendPingFrontendRequest) bool { +func (p *TGetMetaIndex) DeepEqual(ano *TGetMetaIndex) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ClusterId) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.Token) { + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Tablets) { return false } return true } -func (p *TFrontendPingFrontendRequest) Field1DeepEqual(src int32) bool { +func (p *TGetMetaIndex) Field1DeepEqual(src *int64) bool { - if p.ClusterId != src { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { return false } return true } -func (p *TFrontendPingFrontendRequest) Field2DeepEqual(src string) bool { +func (p *TGetMetaIndex) Field2DeepEqual(src *string) bool { - if strings.Compare(p.Token, src) != 0 { + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { return false } return true } +func (p *TGetMetaIndex) Field3DeepEqual(src []*TGetMetaTablet) bool { -type TDiskInfo struct { - DirType string `thrift:"dirType,1,required" frugal:"1,required,string" json:"dirType"` - Dir string `thrift:"dir,2,required" frugal:"2,required,string" json:"dir"` - Filesystem string `thrift:"filesystem,3,required" frugal:"3,required,string" json:"filesystem"` - Blocks int64 `thrift:"blocks,4,required" frugal:"4,required,i64" json:"blocks"` - Used int64 `thrift:"used,5,required" frugal:"5,required,i64" json:"used"` - Available int64 `thrift:"available,6,required" frugal:"6,required,i64" json:"available"` - UseRate int32 `thrift:"useRate,7,required" frugal:"7,required,i32" json:"useRate"` - MountedOn string `thrift:"mountedOn,8,required" frugal:"8,required,string" json:"mountedOn"` + if len(p.Tablets) != len(src) { + return false + } + for i, v := range p.Tablets { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } -func NewTDiskInfo() *TDiskInfo { - return &TDiskInfo{} +type TGetMetaPartition struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Key *string `thrift:"key,3,optional" frugal:"3,optional,string" json:"key,omitempty"` + Range *string `thrift:"range,4,optional" frugal:"4,optional,string" json:"range,omitempty"` + IsTemp *bool `thrift:"is_temp,5,optional" frugal:"5,optional,bool" json:"is_temp,omitempty"` + Indexes []*TGetMetaIndex `thrift:"indexes,6,optional" frugal:"6,optional,list" json:"indexes,omitempty"` } -func (p *TDiskInfo) InitDefault() { - *p = TDiskInfo{} +func NewTGetMetaPartition() *TGetMetaPartition { + return &TGetMetaPartition{} } -func (p *TDiskInfo) GetDirType() (v string) { - return p.DirType +func (p *TGetMetaPartition) InitDefault() { } -func (p *TDiskInfo) GetDir() (v string) { - return p.Dir -} +var TGetMetaPartition_Id_DEFAULT int64 -func (p *TDiskInfo) GetFilesystem() (v string) { - return p.Filesystem +func (p *TGetMetaPartition) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaPartition_Id_DEFAULT + } + return *p.Id } -func (p *TDiskInfo) GetBlocks() (v int64) { - return p.Blocks +var TGetMetaPartition_Name_DEFAULT string + +func (p *TGetMetaPartition) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaPartition_Name_DEFAULT + } + return *p.Name } -func (p *TDiskInfo) GetUsed() (v int64) { - return p.Used +var TGetMetaPartition_Key_DEFAULT string + +func (p *TGetMetaPartition) GetKey() (v string) { + if !p.IsSetKey() { + return TGetMetaPartition_Key_DEFAULT + } + return *p.Key } -func (p *TDiskInfo) GetAvailable() (v int64) { - return p.Available +var TGetMetaPartition_Range_DEFAULT string + +func (p *TGetMetaPartition) GetRange() (v string) { + if !p.IsSetRange() { + return TGetMetaPartition_Range_DEFAULT + } + return *p.Range } -func (p *TDiskInfo) GetUseRate() (v int32) { - return p.UseRate +var TGetMetaPartition_IsTemp_DEFAULT bool + +func (p *TGetMetaPartition) GetIsTemp() (v bool) { + if !p.IsSetIsTemp() { + return TGetMetaPartition_IsTemp_DEFAULT + } + return *p.IsTemp } -func (p *TDiskInfo) GetMountedOn() (v string) { - return p.MountedOn +var TGetMetaPartition_Indexes_DEFAULT []*TGetMetaIndex + +func (p *TGetMetaPartition) GetIndexes() (v []*TGetMetaIndex) { + if !p.IsSetIndexes() { + return TGetMetaPartition_Indexes_DEFAULT + } + return p.Indexes } -func (p *TDiskInfo) SetDirType(val string) { - p.DirType = val +func (p *TGetMetaPartition) SetId(val *int64) { + p.Id = val } -func (p *TDiskInfo) SetDir(val string) { - p.Dir = val +func (p *TGetMetaPartition) SetName(val *string) { + p.Name = val } -func (p *TDiskInfo) SetFilesystem(val string) { - p.Filesystem = val +func (p *TGetMetaPartition) SetKey(val *string) { + p.Key = val } -func (p *TDiskInfo) SetBlocks(val int64) { - p.Blocks = val +func (p *TGetMetaPartition) SetRange(val *string) { + p.Range = val } -func (p *TDiskInfo) SetUsed(val int64) { - p.Used = val +func (p *TGetMetaPartition) SetIsTemp(val *bool) { + p.IsTemp = val } -func (p *TDiskInfo) SetAvailable(val int64) { - p.Available = val +func (p *TGetMetaPartition) SetIndexes(val []*TGetMetaIndex) { + p.Indexes = val } -func (p *TDiskInfo) SetUseRate(val int32) { - p.UseRate = val + +var fieldIDToName_TGetMetaPartition = map[int16]string{ + 1: "id", + 2: "name", + 3: "key", + 4: "range", + 5: "is_temp", + 6: "indexes", } -func (p *TDiskInfo) SetMountedOn(val string) { - p.MountedOn = val + +func (p *TGetMetaPartition) IsSetId() bool { + return p.Id != nil } -var fieldIDToName_TDiskInfo = map[int16]string{ - 1: "dirType", - 2: "dir", - 3: "filesystem", - 4: "blocks", - 5: "used", - 6: "available", - 7: "useRate", - 8: "mountedOn", +func (p *TGetMetaPartition) IsSetName() bool { + return p.Name != nil } -func (p *TDiskInfo) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaPartition) IsSetKey() bool { + return p.Key != nil +} + +func (p *TGetMetaPartition) IsSetRange() bool { + return p.Range != nil +} + +func (p *TGetMetaPartition) IsSetIsTemp() bool { + return p.IsTemp != nil +} + +func (p *TGetMetaPartition) IsSetIndexes() bool { + return p.Indexes != nil +} + +func (p *TGetMetaPartition) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetDirType bool = false - var issetDir bool = false - var issetFilesystem bool = false - var issetBlocks bool = false - var issetUsed bool = false - var issetAvailable bool = false - var issetUseRate bool = false - var issetMountedOn bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -36378,99 +67351,58 @@ func (p *TDiskInfo) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetDirType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetDir = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetFilesystem = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetBlocks = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - issetUsed = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - issetAvailable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - issetUseRate = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - issetMountedOn = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -36479,52 +67411,13 @@ func (p *TDiskInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetDirType { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetDir { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetFilesystem { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetBlocks { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetUsed { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetAvailable { - fieldId = 6 - goto RequiredFieldNotSetError - } - - if !issetUseRate { - fieldId = 7 - goto RequiredFieldNotSetError - } - - if !issetMountedOn { - fieldId = 8 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDiskInfo[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaPartition[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -36532,85 +67425,90 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TDiskInfo[fieldId])) } -func (p *TDiskInfo) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TGetMetaPartition) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DirType = v + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaPartition) ReadField2(iprot thrift.TProtocol) error { -func (p *TDiskInfo) ReadField2(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Dir = v + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaPartition) ReadField3(iprot thrift.TProtocol) error { -func (p *TDiskInfo) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Filesystem = v + _field = &v } + p.Key = _field return nil } +func (p *TGetMetaPartition) ReadField4(iprot thrift.TProtocol) error { -func (p *TDiskInfo) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.Blocks = v + _field = &v } + p.Range = _field return nil } +func (p *TGetMetaPartition) ReadField5(iprot thrift.TProtocol) error { -func (p *TDiskInfo) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Used = v + _field = &v } + p.IsTemp = _field return nil } - -func (p *TDiskInfo) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TGetMetaPartition) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.Available = v } - return nil -} + _field := make([]*TGetMetaIndex, 0, size) + values := make([]TGetMetaIndex, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TDiskInfo) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.UseRate = v - } - return nil -} + if err := _elem.Read(iprot); err != nil { + return err + } -func (p *TDiskInfo) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.MountedOn = v } + p.Indexes = _field return nil } -func (p *TDiskInfo) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaPartition) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TDiskInfo"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaPartition"); err != nil { goto WriteStructBeginError } if p != nil { @@ -36638,15 +67536,6 @@ func (p *TDiskInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -36665,15 +67554,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TDiskInfo) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("dirType", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.DirType); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaPartition) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36682,15 +67573,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TDiskInfo) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("dir", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Dir); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaPartition) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36699,15 +67592,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TDiskInfo) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("filesystem", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Filesystem); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaPartition) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetKey() { + if err = oprot.WriteFieldBegin("key", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Key); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36716,15 +67611,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TDiskInfo) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("blocks", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.Blocks); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaPartition) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetRange() { + if err = oprot.WriteFieldBegin("range", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Range); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36733,15 +67630,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TDiskInfo) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("used", thrift.I64, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.Used); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaPartition) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetIsTemp() { + if err = oprot.WriteFieldBegin("is_temp", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsTemp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36750,15 +67649,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TDiskInfo) writeField6(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("available", thrift.I64, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.Available); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaPartition) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetIndexes() { + if err = oprot.WriteFieldBegin("indexes", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Indexes)); err != nil { + return err + } + for _, v := range p.Indexes { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -36767,287 +67676,204 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TDiskInfo) writeField7(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("useRate", thrift.I32, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.UseRate); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TDiskInfo) writeField8(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("mountedOn", thrift.STRING, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.MountedOn); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TDiskInfo) String() string { +func (p *TGetMetaPartition) String() string { if p == nil { return "" } - return fmt.Sprintf("TDiskInfo(%+v)", *p) + return fmt.Sprintf("TGetMetaPartition(%+v)", *p) + } -func (p *TDiskInfo) DeepEqual(ano *TDiskInfo) bool { +func (p *TGetMetaPartition) DeepEqual(ano *TGetMetaPartition) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.DirType) { - return false - } - if !p.Field2DeepEqual(ano.Dir) { - return false - } - if !p.Field3DeepEqual(ano.Filesystem) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field4DeepEqual(ano.Blocks) { + if !p.Field2DeepEqual(ano.Name) { return false } - if !p.Field5DeepEqual(ano.Used) { + if !p.Field3DeepEqual(ano.Key) { return false } - if !p.Field6DeepEqual(ano.Available) { + if !p.Field4DeepEqual(ano.Range) { return false } - if !p.Field7DeepEqual(ano.UseRate) { + if !p.Field5DeepEqual(ano.IsTemp) { return false } - if !p.Field8DeepEqual(ano.MountedOn) { + if !p.Field6DeepEqual(ano.Indexes) { return false } return true } -func (p *TDiskInfo) Field1DeepEqual(src string) bool { +func (p *TGetMetaPartition) Field1DeepEqual(src *int64) bool { - if strings.Compare(p.DirType, src) != 0 { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { return false } - return true -} -func (p *TDiskInfo) Field2DeepEqual(src string) bool { - - if strings.Compare(p.Dir, src) != 0 { + if *p.Id != *src { return false } return true } -func (p *TDiskInfo) Field3DeepEqual(src string) bool { +func (p *TGetMetaPartition) Field2DeepEqual(src *string) bool { - if strings.Compare(p.Filesystem, src) != 0 { + if p.Name == src { + return true + } else if p.Name == nil || src == nil { return false } - return true -} -func (p *TDiskInfo) Field4DeepEqual(src int64) bool { - - if p.Blocks != src { + if strings.Compare(*p.Name, *src) != 0 { return false } return true } -func (p *TDiskInfo) Field5DeepEqual(src int64) bool { +func (p *TGetMetaPartition) Field3DeepEqual(src *string) bool { - if p.Used != src { + if p.Key == src { + return true + } else if p.Key == nil || src == nil { + return false + } + if strings.Compare(*p.Key, *src) != 0 { return false } return true } -func (p *TDiskInfo) Field6DeepEqual(src int64) bool { +func (p *TGetMetaPartition) Field4DeepEqual(src *string) bool { - if p.Available != src { + if p.Range == src { + return true + } else if p.Range == nil || src == nil { + return false + } + if strings.Compare(*p.Range, *src) != 0 { return false } return true } -func (p *TDiskInfo) Field7DeepEqual(src int32) bool { +func (p *TGetMetaPartition) Field5DeepEqual(src *bool) bool { - if p.UseRate != src { + if p.IsTemp == src { + return true + } else if p.IsTemp == nil || src == nil { + return false + } + if *p.IsTemp != *src { return false } return true } -func (p *TDiskInfo) Field8DeepEqual(src string) bool { +func (p *TGetMetaPartition) Field6DeepEqual(src []*TGetMetaIndex) bool { - if strings.Compare(p.MountedOn, src) != 0 { + if len(p.Indexes) != len(src) { return false } + for i, v := range p.Indexes { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } return true } -type TFrontendPingFrontendResult_ struct { - Status TFrontendPingFrontendStatusCode `thrift:"status,1,required" frugal:"1,required,TFrontendPingFrontendStatusCode" json:"status"` - Msg string `thrift:"msg,2,required" frugal:"2,required,string" json:"msg"` - QueryPort int32 `thrift:"queryPort,3,required" frugal:"3,required,i32" json:"queryPort"` - RpcPort int32 `thrift:"rpcPort,4,required" frugal:"4,required,i32" json:"rpcPort"` - ReplayedJournalId int64 `thrift:"replayedJournalId,5,required" frugal:"5,required,i64" json:"replayedJournalId"` - Version string `thrift:"version,6,required" frugal:"6,required,string" json:"version"` - LastStartupTime *int64 `thrift:"lastStartupTime,7,optional" frugal:"7,optional,i64" json:"lastStartupTime,omitempty"` - DiskInfos []*TDiskInfo `thrift:"diskInfos,8,optional" frugal:"8,optional,list" json:"diskInfos,omitempty"` - ProcessUUID *int64 `thrift:"processUUID,9,optional" frugal:"9,optional,i64" json:"processUUID,omitempty"` - ArrowFlightSqlPort *int32 `thrift:"arrowFlightSqlPort,10,optional" frugal:"10,optional,i32" json:"arrowFlightSqlPort,omitempty"` -} - -func NewTFrontendPingFrontendResult_() *TFrontendPingFrontendResult_ { - return &TFrontendPingFrontendResult_{} -} - -func (p *TFrontendPingFrontendResult_) InitDefault() { - *p = TFrontendPingFrontendResult_{} -} - -func (p *TFrontendPingFrontendResult_) GetStatus() (v TFrontendPingFrontendStatusCode) { - return p.Status -} - -func (p *TFrontendPingFrontendResult_) GetMsg() (v string) { - return p.Msg -} - -func (p *TFrontendPingFrontendResult_) GetQueryPort() (v int32) { - return p.QueryPort -} - -func (p *TFrontendPingFrontendResult_) GetRpcPort() (v int32) { - return p.RpcPort +type TGetMetaTable struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + InTrash *bool `thrift:"in_trash,3,optional" frugal:"3,optional,bool" json:"in_trash,omitempty"` + Partitions []*TGetMetaPartition `thrift:"partitions,4,optional" frugal:"4,optional,list" json:"partitions,omitempty"` } -func (p *TFrontendPingFrontendResult_) GetReplayedJournalId() (v int64) { - return p.ReplayedJournalId +func NewTGetMetaTable() *TGetMetaTable { + return &TGetMetaTable{} } -func (p *TFrontendPingFrontendResult_) GetVersion() (v string) { - return p.Version +func (p *TGetMetaTable) InitDefault() { } -var TFrontendPingFrontendResult__LastStartupTime_DEFAULT int64 +var TGetMetaTable_Id_DEFAULT int64 -func (p *TFrontendPingFrontendResult_) GetLastStartupTime() (v int64) { - if !p.IsSetLastStartupTime() { - return TFrontendPingFrontendResult__LastStartupTime_DEFAULT +func (p *TGetMetaTable) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaTable_Id_DEFAULT } - return *p.LastStartupTime + return *p.Id } -var TFrontendPingFrontendResult__DiskInfos_DEFAULT []*TDiskInfo +var TGetMetaTable_Name_DEFAULT string -func (p *TFrontendPingFrontendResult_) GetDiskInfos() (v []*TDiskInfo) { - if !p.IsSetDiskInfos() { - return TFrontendPingFrontendResult__DiskInfos_DEFAULT +func (p *TGetMetaTable) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaTable_Name_DEFAULT } - return p.DiskInfos + return *p.Name } -var TFrontendPingFrontendResult__ProcessUUID_DEFAULT int64 +var TGetMetaTable_InTrash_DEFAULT bool -func (p *TFrontendPingFrontendResult_) GetProcessUUID() (v int64) { - if !p.IsSetProcessUUID() { - return TFrontendPingFrontendResult__ProcessUUID_DEFAULT +func (p *TGetMetaTable) GetInTrash() (v bool) { + if !p.IsSetInTrash() { + return TGetMetaTable_InTrash_DEFAULT } - return *p.ProcessUUID + return *p.InTrash } -var TFrontendPingFrontendResult__ArrowFlightSqlPort_DEFAULT int32 +var TGetMetaTable_Partitions_DEFAULT []*TGetMetaPartition -func (p *TFrontendPingFrontendResult_) GetArrowFlightSqlPort() (v int32) { - if !p.IsSetArrowFlightSqlPort() { - return TFrontendPingFrontendResult__ArrowFlightSqlPort_DEFAULT +func (p *TGetMetaTable) GetPartitions() (v []*TGetMetaPartition) { + if !p.IsSetPartitions() { + return TGetMetaTable_Partitions_DEFAULT } - return *p.ArrowFlightSqlPort -} -func (p *TFrontendPingFrontendResult_) SetStatus(val TFrontendPingFrontendStatusCode) { - p.Status = val -} -func (p *TFrontendPingFrontendResult_) SetMsg(val string) { - p.Msg = val -} -func (p *TFrontendPingFrontendResult_) SetQueryPort(val int32) { - p.QueryPort = val -} -func (p *TFrontendPingFrontendResult_) SetRpcPort(val int32) { - p.RpcPort = val -} -func (p *TFrontendPingFrontendResult_) SetReplayedJournalId(val int64) { - p.ReplayedJournalId = val -} -func (p *TFrontendPingFrontendResult_) SetVersion(val string) { - p.Version = val + return p.Partitions } -func (p *TFrontendPingFrontendResult_) SetLastStartupTime(val *int64) { - p.LastStartupTime = val +func (p *TGetMetaTable) SetId(val *int64) { + p.Id = val } -func (p *TFrontendPingFrontendResult_) SetDiskInfos(val []*TDiskInfo) { - p.DiskInfos = val +func (p *TGetMetaTable) SetName(val *string) { + p.Name = val } -func (p *TFrontendPingFrontendResult_) SetProcessUUID(val *int64) { - p.ProcessUUID = val +func (p *TGetMetaTable) SetInTrash(val *bool) { + p.InTrash = val } -func (p *TFrontendPingFrontendResult_) SetArrowFlightSqlPort(val *int32) { - p.ArrowFlightSqlPort = val +func (p *TGetMetaTable) SetPartitions(val []*TGetMetaPartition) { + p.Partitions = val } -var fieldIDToName_TFrontendPingFrontendResult_ = map[int16]string{ - 1: "status", - 2: "msg", - 3: "queryPort", - 4: "rpcPort", - 5: "replayedJournalId", - 6: "version", - 7: "lastStartupTime", - 8: "diskInfos", - 9: "processUUID", - 10: "arrowFlightSqlPort", +var fieldIDToName_TGetMetaTable = map[int16]string{ + 1: "id", + 2: "name", + 3: "in_trash", + 4: "partitions", } -func (p *TFrontendPingFrontendResult_) IsSetLastStartupTime() bool { - return p.LastStartupTime != nil +func (p *TGetMetaTable) IsSetId() bool { + return p.Id != nil } -func (p *TFrontendPingFrontendResult_) IsSetDiskInfos() bool { - return p.DiskInfos != nil +func (p *TGetMetaTable) IsSetName() bool { + return p.Name != nil } -func (p *TFrontendPingFrontendResult_) IsSetProcessUUID() bool { - return p.ProcessUUID != nil +func (p *TGetMetaTable) IsSetInTrash() bool { + return p.InTrash != nil } -func (p *TFrontendPingFrontendResult_) IsSetArrowFlightSqlPort() bool { - return p.ArrowFlightSqlPort != nil +func (p *TGetMetaTable) IsSetPartitions() bool { + return p.Partitions != nil } -func (p *TFrontendPingFrontendResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaTable) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false - var issetMsg bool = false - var issetQueryPort bool = false - var issetRpcPort bool = false - var issetReplayedJournalId bool = false - var issetVersion bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -37064,117 +67890,42 @@ func (p *TFrontendPingFrontendResult_) Read(iprot thrift.TProtocol) (err error) switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetMsg = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetQueryPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.I32 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - issetRpcPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - issetReplayedJournalId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: if fieldTypeId == thrift.LIST { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I32 { - if err = p.ReadField10(iprot); err != nil { + if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -37183,42 +67934,13 @@ func (p *TFrontendPingFrontendResult_) Read(iprot thrift.TProtocol) (err error) goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetMsg { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetQueryPort { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetRpcPort { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetReplayedJournalId { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetVersion { - fieldId = 6 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTable[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -37226,114 +67948,68 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendResult_[fieldId])) -} - -func (p *TFrontendPingFrontendResult_) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.Status = TFrontendPingFrontendStatusCode(v) - } - return nil -} - -func (p *TFrontendPingFrontendResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Msg = v - } - return nil -} - -func (p *TFrontendPingFrontendResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.QueryPort = v - } - return nil } -func (p *TFrontendPingFrontendResult_) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.RpcPort = v - } - return nil -} +func (p *TGetMetaTable) ReadField1(iprot thrift.TProtocol) error { -func (p *TFrontendPingFrontendResult_) ReadField5(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplayedJournalId = v + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaTable) ReadField2(iprot thrift.TProtocol) error { -func (p *TFrontendPingFrontendResult_) ReadField6(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Version = v + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaTable) ReadField3(iprot thrift.TProtocol) error { -func (p *TFrontendPingFrontendResult_) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.LastStartupTime = &v + _field = &v } + p.InTrash = _field return nil } - -func (p *TFrontendPingFrontendResult_) ReadField8(iprot thrift.TProtocol) error { +func (p *TGetMetaTable) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DiskInfos = make([]*TDiskInfo, 0, size) + _field := make([]*TGetMetaPartition, 0, size) + values := make([]TGetMetaPartition, size) for i := 0; i < size; i++ { - _elem := NewTDiskInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.DiskInfos = append(p.DiskInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Partitions = _field return nil } -func (p *TFrontendPingFrontendResult_) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ProcessUUID = &v - } - return nil -} - -func (p *TFrontendPingFrontendResult_) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ArrowFlightSqlPort = &v - } - return nil -} - -func (p *TFrontendPingFrontendResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaTable) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFrontendPingFrontendResult"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaTable"); err != nil { goto WriteStructBeginError } if p != nil { @@ -37353,31 +68029,6 @@ func (p *TFrontendPingFrontendResult_) Write(oprot thrift.TProtocol) (err error) fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -37396,114 +68047,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFrontendPingFrontendResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.Status)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TFrontendPingFrontendResult_) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("msg", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Msg); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TFrontendPingFrontendResult_) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("queryPort", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.QueryPort); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TFrontendPingFrontendResult_) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("rpcPort", thrift.I32, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.RpcPort); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TFrontendPingFrontendResult_) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("replayedJournalId", thrift.I64, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.ReplayedJournalId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TFrontendPingFrontendResult_) writeField6(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("version", thrift.STRING, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Version); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TFrontendPingFrontendResult_) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetLastStartupTime() { - if err = oprot.WriteFieldBegin("lastStartupTime", thrift.I64, 7); err != nil { +func (p *TGetMetaTable) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.LastStartupTime); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -37512,25 +68061,17 @@ func (p *TFrontendPingFrontendResult_) writeField7(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFrontendPingFrontendResult_) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetDiskInfos() { - if err = oprot.WriteFieldBegin("diskInfos", thrift.LIST, 8); err != nil { +func (p *TGetMetaTable) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DiskInfos)); err != nil { - return err - } - for _, v := range p.DiskInfos { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -37539,17 +68080,17 @@ func (p *TFrontendPingFrontendResult_) writeField8(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFrontendPingFrontendResult_) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetProcessUUID() { - if err = oprot.WriteFieldBegin("processUUID", thrift.I64, 9); err != nil { +func (p *TGetMetaTable) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetInTrash() { + if err = oprot.WriteFieldBegin("in_trash", thrift.BOOL, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ProcessUUID); err != nil { + if err := oprot.WriteBool(*p.InTrash); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -37558,17 +68099,25 @@ func (p *TFrontendPingFrontendResult_) writeField9(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TFrontendPingFrontendResult_) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetArrowFlightSqlPort() { - if err = oprot.WriteFieldBegin("arrowFlightSqlPort", thrift.I32, 10); err != nil { +func (p *TGetMetaTable) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitions() { + if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.ArrowFlightSqlPort); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return err + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -37577,117 +68126,82 @@ func (p *TFrontendPingFrontendResult_) writeField10(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TFrontendPingFrontendResult_) String() string { +func (p *TGetMetaTable) String() string { if p == nil { return "" } - return fmt.Sprintf("TFrontendPingFrontendResult_(%+v)", *p) + return fmt.Sprintf("TGetMetaTable(%+v)", *p) + } -func (p *TFrontendPingFrontendResult_) DeepEqual(ano *TFrontendPingFrontendResult_) bool { +func (p *TGetMetaTable) DeepEqual(ano *TGetMetaTable) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Msg) { - return false - } - if !p.Field3DeepEqual(ano.QueryPort) { - return false - } - if !p.Field4DeepEqual(ano.RpcPort) { - return false - } - if !p.Field5DeepEqual(ano.ReplayedJournalId) { - return false - } - if !p.Field6DeepEqual(ano.Version) { - return false - } - if !p.Field7DeepEqual(ano.LastStartupTime) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field8DeepEqual(ano.DiskInfos) { - return false - } - if !p.Field9DeepEqual(ano.ProcessUUID) { + if !p.Field2DeepEqual(ano.Name) { return false } - if !p.Field10DeepEqual(ano.ArrowFlightSqlPort) { + if !p.Field3DeepEqual(ano.InTrash) { return false } - return true -} - -func (p *TFrontendPingFrontendResult_) Field1DeepEqual(src TFrontendPingFrontendStatusCode) bool { - - if p.Status != src { + if !p.Field4DeepEqual(ano.Partitions) { return false } return true } -func (p *TFrontendPingFrontendResult_) Field2DeepEqual(src string) bool { - if strings.Compare(p.Msg, src) != 0 { - return false - } - return true -} -func (p *TFrontendPingFrontendResult_) Field3DeepEqual(src int32) bool { +func (p *TGetMetaTable) Field1DeepEqual(src *int64) bool { - if p.QueryPort != src { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { return false } - return true -} -func (p *TFrontendPingFrontendResult_) Field4DeepEqual(src int32) bool { - - if p.RpcPort != src { + if *p.Id != *src { return false } return true } -func (p *TFrontendPingFrontendResult_) Field5DeepEqual(src int64) bool { +func (p *TGetMetaTable) Field2DeepEqual(src *string) bool { - if p.ReplayedJournalId != src { + if p.Name == src { + return true + } else if p.Name == nil || src == nil { return false } - return true -} -func (p *TFrontendPingFrontendResult_) Field6DeepEqual(src string) bool { - - if strings.Compare(p.Version, src) != 0 { + if strings.Compare(*p.Name, *src) != 0 { return false } return true } -func (p *TFrontendPingFrontendResult_) Field7DeepEqual(src *int64) bool { +func (p *TGetMetaTable) Field3DeepEqual(src *bool) bool { - if p.LastStartupTime == src { + if p.InTrash == src { return true - } else if p.LastStartupTime == nil || src == nil { + } else if p.InTrash == nil || src == nil { return false } - if *p.LastStartupTime != *src { + if *p.InTrash != *src { return false } return true } -func (p *TFrontendPingFrontendResult_) Field8DeepEqual(src []*TDiskInfo) bool { +func (p *TGetMetaTable) Field4DeepEqual(src []*TGetMetaPartition) bool { - if len(p.DiskInfos) != len(src) { + if len(p.Partitions) != len(src) { return false } - for i, v := range p.DiskInfos { + for i, v := range p.Partitions { _src := src[i] if !v.DeepEqual(_src) { return false @@ -37695,118 +68209,93 @@ func (p *TFrontendPingFrontendResult_) Field8DeepEqual(src []*TDiskInfo) bool { } return true } -func (p *TFrontendPingFrontendResult_) Field9DeepEqual(src *int64) bool { - - if p.ProcessUUID == src { - return true - } else if p.ProcessUUID == nil || src == nil { - return false - } - if *p.ProcessUUID != *src { - return false - } - return true -} -func (p *TFrontendPingFrontendResult_) Field10DeepEqual(src *int32) bool { - - if p.ArrowFlightSqlPort == src { - return true - } else if p.ArrowFlightSqlPort == nil || src == nil { - return false - } - if *p.ArrowFlightSqlPort != *src { - return false - } - return true -} -type TPropertyVal struct { - StrVal *string `thrift:"strVal,1,optional" frugal:"1,optional,string" json:"strVal,omitempty"` - IntVal *int32 `thrift:"intVal,2,optional" frugal:"2,optional,i32" json:"intVal,omitempty"` - LongVal *int64 `thrift:"longVal,3,optional" frugal:"3,optional,i64" json:"longVal,omitempty"` - BoolVal *bool `thrift:"boolVal,4,optional" frugal:"4,optional,bool" json:"boolVal,omitempty"` +type TGetMetaDB struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + OnlyTableNames *bool `thrift:"only_table_names,3,optional" frugal:"3,optional,bool" json:"only_table_names,omitempty"` + Tables []*TGetMetaTable `thrift:"tables,4,optional" frugal:"4,optional,list" json:"tables,omitempty"` } -func NewTPropertyVal() *TPropertyVal { - return &TPropertyVal{} +func NewTGetMetaDB() *TGetMetaDB { + return &TGetMetaDB{} } -func (p *TPropertyVal) InitDefault() { - *p = TPropertyVal{} +func (p *TGetMetaDB) InitDefault() { } -var TPropertyVal_StrVal_DEFAULT string +var TGetMetaDB_Id_DEFAULT int64 -func (p *TPropertyVal) GetStrVal() (v string) { - if !p.IsSetStrVal() { - return TPropertyVal_StrVal_DEFAULT +func (p *TGetMetaDB) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaDB_Id_DEFAULT } - return *p.StrVal + return *p.Id } -var TPropertyVal_IntVal_DEFAULT int32 +var TGetMetaDB_Name_DEFAULT string -func (p *TPropertyVal) GetIntVal() (v int32) { - if !p.IsSetIntVal() { - return TPropertyVal_IntVal_DEFAULT +func (p *TGetMetaDB) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaDB_Name_DEFAULT } - return *p.IntVal + return *p.Name } -var TPropertyVal_LongVal_DEFAULT int64 +var TGetMetaDB_OnlyTableNames_DEFAULT bool -func (p *TPropertyVal) GetLongVal() (v int64) { - if !p.IsSetLongVal() { - return TPropertyVal_LongVal_DEFAULT +func (p *TGetMetaDB) GetOnlyTableNames() (v bool) { + if !p.IsSetOnlyTableNames() { + return TGetMetaDB_OnlyTableNames_DEFAULT } - return *p.LongVal + return *p.OnlyTableNames } -var TPropertyVal_BoolVal_DEFAULT bool +var TGetMetaDB_Tables_DEFAULT []*TGetMetaTable -func (p *TPropertyVal) GetBoolVal() (v bool) { - if !p.IsSetBoolVal() { - return TPropertyVal_BoolVal_DEFAULT +func (p *TGetMetaDB) GetTables() (v []*TGetMetaTable) { + if !p.IsSetTables() { + return TGetMetaDB_Tables_DEFAULT } - return *p.BoolVal + return p.Tables } -func (p *TPropertyVal) SetStrVal(val *string) { - p.StrVal = val +func (p *TGetMetaDB) SetId(val *int64) { + p.Id = val } -func (p *TPropertyVal) SetIntVal(val *int32) { - p.IntVal = val +func (p *TGetMetaDB) SetName(val *string) { + p.Name = val } -func (p *TPropertyVal) SetLongVal(val *int64) { - p.LongVal = val +func (p *TGetMetaDB) SetOnlyTableNames(val *bool) { + p.OnlyTableNames = val } -func (p *TPropertyVal) SetBoolVal(val *bool) { - p.BoolVal = val +func (p *TGetMetaDB) SetTables(val []*TGetMetaTable) { + p.Tables = val } -var fieldIDToName_TPropertyVal = map[int16]string{ - 1: "strVal", - 2: "intVal", - 3: "longVal", - 4: "boolVal", +var fieldIDToName_TGetMetaDB = map[int16]string{ + 1: "id", + 2: "name", + 3: "only_table_names", + 4: "tables", } -func (p *TPropertyVal) IsSetStrVal() bool { - return p.StrVal != nil +func (p *TGetMetaDB) IsSetId() bool { + return p.Id != nil } -func (p *TPropertyVal) IsSetIntVal() bool { - return p.IntVal != nil +func (p *TGetMetaDB) IsSetName() bool { + return p.Name != nil } -func (p *TPropertyVal) IsSetLongVal() bool { - return p.LongVal != nil +func (p *TGetMetaDB) IsSetOnlyTableNames() bool { + return p.OnlyTableNames != nil } -func (p *TPropertyVal) IsSetBoolVal() bool { - return p.BoolVal != nil +func (p *TGetMetaDB) IsSetTables() bool { + return p.Tables != nil } -func (p *TPropertyVal) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaDB) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -37826,51 +68315,42 @@ func (p *TPropertyVal) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -37885,7 +68365,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPropertyVal[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaDB[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -37895,45 +68375,66 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPropertyVal) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TGetMetaDB) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.StrVal = &v + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaDB) ReadField2(iprot thrift.TProtocol) error { -func (p *TPropertyVal) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.IntVal = &v + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaDB) ReadField3(iprot thrift.TProtocol) error { -func (p *TPropertyVal) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.LongVal = &v + _field = &v } + p.OnlyTableNames = _field return nil } +func (p *TGetMetaDB) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TGetMetaTable, 0, size) + values := make([]TGetMetaTable, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TPropertyVal) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.BoolVal = &v } + p.Tables = _field return nil } -func (p *TPropertyVal) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaDB) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TPropertyVal"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaDB"); err != nil { goto WriteStructBeginError } if p != nil { @@ -37953,7 +68454,6 @@ func (p *TPropertyVal) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -37972,12 +68472,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPropertyVal) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStrVal() { - if err = oprot.WriteFieldBegin("strVal", thrift.STRING, 1); err != nil { +func (p *TGetMetaDB) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.StrVal); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -37991,12 +68491,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPropertyVal) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetIntVal() { - if err = oprot.WriteFieldBegin("intVal", thrift.I32, 2); err != nil { +func (p *TGetMetaDB) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.IntVal); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38010,12 +68510,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPropertyVal) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetLongVal() { - if err = oprot.WriteFieldBegin("longVal", thrift.I64, 3); err != nil { +func (p *TGetMetaDB) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetOnlyTableNames() { + if err = oprot.WriteFieldBegin("only_table_names", thrift.BOOL, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.LongVal); err != nil { + if err := oprot.WriteBool(*p.OnlyTableNames); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38029,12 +68529,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TPropertyVal) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetBoolVal() { - if err = oprot.WriteFieldBegin("boolVal", thrift.BOOL, 4); err != nil { +func (p *TGetMetaDB) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTables() { + if err = oprot.WriteFieldBegin("tables", thrift.LIST, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.BoolVal); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tables)); err != nil { + return err + } + for _, v := range p.Tables { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38048,152 +68556,207 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TPropertyVal) String() string { +func (p *TGetMetaDB) String() string { if p == nil { return "" } - return fmt.Sprintf("TPropertyVal(%+v)", *p) + return fmt.Sprintf("TGetMetaDB(%+v)", *p) + } -func (p *TPropertyVal) DeepEqual(ano *TPropertyVal) bool { +func (p *TGetMetaDB) DeepEqual(ano *TGetMetaDB) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.StrVal) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.IntVal) { + if !p.Field2DeepEqual(ano.Name) { return false } - if !p.Field3DeepEqual(ano.LongVal) { + if !p.Field3DeepEqual(ano.OnlyTableNames) { return false } - if !p.Field4DeepEqual(ano.BoolVal) { + if !p.Field4DeepEqual(ano.Tables) { return false } return true } -func (p *TPropertyVal) Field1DeepEqual(src *string) bool { +func (p *TGetMetaDB) Field1DeepEqual(src *int64) bool { - if p.StrVal == src { + if p.Id == src { return true - } else if p.StrVal == nil || src == nil { + } else if p.Id == nil || src == nil { return false } - if strings.Compare(*p.StrVal, *src) != 0 { + if *p.Id != *src { return false } return true } -func (p *TPropertyVal) Field2DeepEqual(src *int32) bool { +func (p *TGetMetaDB) Field2DeepEqual(src *string) bool { - if p.IntVal == src { + if p.Name == src { return true - } else if p.IntVal == nil || src == nil { + } else if p.Name == nil || src == nil { return false } - if *p.IntVal != *src { + if strings.Compare(*p.Name, *src) != 0 { return false } return true } -func (p *TPropertyVal) Field3DeepEqual(src *int64) bool { +func (p *TGetMetaDB) Field3DeepEqual(src *bool) bool { - if p.LongVal == src { + if p.OnlyTableNames == src { return true - } else if p.LongVal == nil || src == nil { + } else if p.OnlyTableNames == nil || src == nil { return false } - if *p.LongVal != *src { + if *p.OnlyTableNames != *src { return false } return true } -func (p *TPropertyVal) Field4DeepEqual(src *bool) bool { +func (p *TGetMetaDB) Field4DeepEqual(src []*TGetMetaTable) bool { - if p.BoolVal == src { - return true - } else if p.BoolVal == nil || src == nil { + if len(p.Tables) != len(src) { return false } - if *p.BoolVal != *src { - return false + for i, v := range p.Tables { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -type TWaitingTxnStatusRequest struct { - DbId *int64 `thrift:"db_id,1,optional" frugal:"1,optional,i64" json:"db_id,omitempty"` - TxnId *int64 `thrift:"txn_id,2,optional" frugal:"2,optional,i64" json:"txn_id,omitempty"` - Label *string `thrift:"label,3,optional" frugal:"3,optional,string" json:"label,omitempty"` +type TGetMetaRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + UserIp *string `thrift:"user_ip,4,optional" frugal:"4,optional,string" json:"user_ip,omitempty"` + Token *string `thrift:"token,5,optional" frugal:"5,optional,string" json:"token,omitempty"` + Db *TGetMetaDB `thrift:"db,6,optional" frugal:"6,optional,TGetMetaDB" json:"db,omitempty"` } -func NewTWaitingTxnStatusRequest() *TWaitingTxnStatusRequest { - return &TWaitingTxnStatusRequest{} +func NewTGetMetaRequest() *TGetMetaRequest { + return &TGetMetaRequest{} } -func (p *TWaitingTxnStatusRequest) InitDefault() { - *p = TWaitingTxnStatusRequest{} +func (p *TGetMetaRequest) InitDefault() { } -var TWaitingTxnStatusRequest_DbId_DEFAULT int64 +var TGetMetaRequest_Cluster_DEFAULT string -func (p *TWaitingTxnStatusRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TWaitingTxnStatusRequest_DbId_DEFAULT +func (p *TGetMetaRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGetMetaRequest_Cluster_DEFAULT } - return *p.DbId + return *p.Cluster } -var TWaitingTxnStatusRequest_TxnId_DEFAULT int64 +var TGetMetaRequest_User_DEFAULT string -func (p *TWaitingTxnStatusRequest) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TWaitingTxnStatusRequest_TxnId_DEFAULT +func (p *TGetMetaRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TGetMetaRequest_User_DEFAULT } - return *p.TxnId + return *p.User } -var TWaitingTxnStatusRequest_Label_DEFAULT string +var TGetMetaRequest_Passwd_DEFAULT string -func (p *TWaitingTxnStatusRequest) GetLabel() (v string) { - if !p.IsSetLabel() { - return TWaitingTxnStatusRequest_Label_DEFAULT +func (p *TGetMetaRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TGetMetaRequest_Passwd_DEFAULT } - return *p.Label + return *p.Passwd } -func (p *TWaitingTxnStatusRequest) SetDbId(val *int64) { - p.DbId = val + +var TGetMetaRequest_UserIp_DEFAULT string + +func (p *TGetMetaRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TGetMetaRequest_UserIp_DEFAULT + } + return *p.UserIp } -func (p *TWaitingTxnStatusRequest) SetTxnId(val *int64) { - p.TxnId = val + +var TGetMetaRequest_Token_DEFAULT string + +func (p *TGetMetaRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TGetMetaRequest_Token_DEFAULT + } + return *p.Token } -func (p *TWaitingTxnStatusRequest) SetLabel(val *string) { - p.Label = val + +var TGetMetaRequest_Db_DEFAULT *TGetMetaDB + +func (p *TGetMetaRequest) GetDb() (v *TGetMetaDB) { + if !p.IsSetDb() { + return TGetMetaRequest_Db_DEFAULT + } + return p.Db +} +func (p *TGetMetaRequest) SetCluster(val *string) { + p.Cluster = val +} +func (p *TGetMetaRequest) SetUser(val *string) { + p.User = val +} +func (p *TGetMetaRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TGetMetaRequest) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TGetMetaRequest) SetToken(val *string) { + p.Token = val +} +func (p *TGetMetaRequest) SetDb(val *TGetMetaDB) { + p.Db = val } -var fieldIDToName_TWaitingTxnStatusRequest = map[int16]string{ - 1: "db_id", - 2: "txn_id", - 3: "label", +var fieldIDToName_TGetMetaRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "user_ip", + 5: "token", + 6: "db", } -func (p *TWaitingTxnStatusRequest) IsSetDbId() bool { - return p.DbId != nil +func (p *TGetMetaRequest) IsSetCluster() bool { + return p.Cluster != nil } -func (p *TWaitingTxnStatusRequest) IsSetTxnId() bool { - return p.TxnId != nil +func (p *TGetMetaRequest) IsSetUser() bool { + return p.User != nil } -func (p *TWaitingTxnStatusRequest) IsSetLabel() bool { - return p.Label != nil +func (p *TGetMetaRequest) IsSetPasswd() bool { + return p.Passwd != nil } -func (p *TWaitingTxnStatusRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TGetMetaRequest) IsSetToken() bool { + return p.Token != nil +} + +func (p *TGetMetaRequest) IsSetDb() bool { + return p.Db != nil +} + +func (p *TGetMetaRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -38213,41 +68776,58 @@ func (p *TWaitingTxnStatusRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -38262,7 +68842,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -38272,36 +68852,73 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TWaitingTxnStatusRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TGetMetaRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbId = &v + _field = &v } + p.Cluster = _field return nil } +func (p *TGetMetaRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TWaitingTxnStatusRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.TxnId = &v + _field = &v } + p.User = _field return nil } +func (p *TGetMetaRequest) ReadField3(iprot thrift.TProtocol) error { -func (p *TWaitingTxnStatusRequest) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Label = &v + _field = &v } + p.Passwd = _field return nil } +func (p *TGetMetaRequest) ReadField4(iprot thrift.TProtocol) error { -func (p *TWaitingTxnStatusRequest) Write(oprot thrift.TProtocol) (err error) { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil +} +func (p *TGetMetaRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TGetMetaRequest) ReadField6(iprot thrift.TProtocol) error { + _field := NewTGetMetaDB() + if err := _field.Read(iprot); err != nil { + return err + } + p.Db = _field + return nil +} + +func (p *TGetMetaRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TWaitingTxnStatusRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -38317,31 +68934,99 @@ func (p *TWaitingTxnStatusRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGetMetaRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetMetaRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.User); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetMetaRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Passwd); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TWaitingTxnStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 1); err != nil { +func (p *TGetMetaRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.DbId); err != nil { + if err := oprot.WriteString(*p.UserIp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38350,17 +69035,17 @@ func (p *TWaitingTxnStatusRequest) writeField1(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TWaitingTxnStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 2); err != nil { +func (p *TGetMetaRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TxnId); err != nil { + if err := oprot.WriteString(*p.Token); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38369,17 +69054,17 @@ func (p *TWaitingTxnStatusRequest) writeField2(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TWaitingTxnStatusRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetLabel() { - if err = oprot.WriteFieldBegin("label", thrift.STRING, 3); err != nil { +func (p *TGetMetaRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRUCT, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Label); err != nil { + if err := p.Db.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38388,124 +69073,182 @@ func (p *TWaitingTxnStatusRequest) writeField3(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TWaitingTxnStatusRequest) String() string { +func (p *TGetMetaRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TWaitingTxnStatusRequest(%+v)", *p) + return fmt.Sprintf("TGetMetaRequest(%+v)", *p) + } -func (p *TWaitingTxnStatusRequest) DeepEqual(ano *TWaitingTxnStatusRequest) bool { +func (p *TGetMetaRequest) DeepEqual(ano *TGetMetaRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.DbId) { + if !p.Field1DeepEqual(ano.Cluster) { return false } - if !p.Field2DeepEqual(ano.TxnId) { + if !p.Field2DeepEqual(ano.User) { return false } - if !p.Field3DeepEqual(ano.Label) { + if !p.Field3DeepEqual(ano.Passwd) { + return false + } + if !p.Field4DeepEqual(ano.UserIp) { + return false + } + if !p.Field5DeepEqual(ano.Token) { + return false + } + if !p.Field6DeepEqual(ano.Db) { return false } return true } -func (p *TWaitingTxnStatusRequest) Field1DeepEqual(src *int64) bool { +func (p *TGetMetaRequest) Field1DeepEqual(src *string) bool { - if p.DbId == src { + if p.Cluster == src { return true - } else if p.DbId == nil || src == nil { + } else if p.Cluster == nil || src == nil { return false } - if *p.DbId != *src { + if strings.Compare(*p.Cluster, *src) != 0 { return false } return true } -func (p *TWaitingTxnStatusRequest) Field2DeepEqual(src *int64) bool { +func (p *TGetMetaRequest) Field2DeepEqual(src *string) bool { - if p.TxnId == src { + if p.User == src { return true - } else if p.TxnId == nil || src == nil { + } else if p.User == nil || src == nil { return false } - if *p.TxnId != *src { + if strings.Compare(*p.User, *src) != 0 { return false } return true } -func (p *TWaitingTxnStatusRequest) Field3DeepEqual(src *string) bool { +func (p *TGetMetaRequest) Field3DeepEqual(src *string) bool { - if p.Label == src { + if p.Passwd == src { return true - } else if p.Label == nil || src == nil { + } else if p.Passwd == nil || src == nil { return false } - if strings.Compare(*p.Label, *src) != 0 { + if strings.Compare(*p.Passwd, *src) != 0 { return false } return true } +func (p *TGetMetaRequest) Field4DeepEqual(src *string) bool { -type TWaitingTxnStatusResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - TxnStatusId *int32 `thrift:"txn_status_id,2,optional" frugal:"2,optional,i32" json:"txn_status_id,omitempty"` + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false + } + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true } +func (p *TGetMetaRequest) Field5DeepEqual(src *string) bool { -func NewTWaitingTxnStatusResult_() *TWaitingTxnStatusResult_ { - return &TWaitingTxnStatusResult_{} + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true } +func (p *TGetMetaRequest) Field6DeepEqual(src *TGetMetaDB) bool { -func (p *TWaitingTxnStatusResult_) InitDefault() { - *p = TWaitingTxnStatusResult_{} + if !p.Db.DeepEqual(src) { + return false + } + return true } -var TWaitingTxnStatusResult__Status_DEFAULT *status.TStatus +type TGetMetaReplicaMeta struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + BackendId *int64 `thrift:"backend_id,2,optional" frugal:"2,optional,i64" json:"backend_id,omitempty"` + Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"` +} -func (p *TWaitingTxnStatusResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TWaitingTxnStatusResult__Status_DEFAULT +func NewTGetMetaReplicaMeta() *TGetMetaReplicaMeta { + return &TGetMetaReplicaMeta{} +} + +func (p *TGetMetaReplicaMeta) InitDefault() { +} + +var TGetMetaReplicaMeta_Id_DEFAULT int64 + +func (p *TGetMetaReplicaMeta) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaReplicaMeta_Id_DEFAULT } - return p.Status + return *p.Id } -var TWaitingTxnStatusResult__TxnStatusId_DEFAULT int32 +var TGetMetaReplicaMeta_BackendId_DEFAULT int64 -func (p *TWaitingTxnStatusResult_) GetTxnStatusId() (v int32) { - if !p.IsSetTxnStatusId() { - return TWaitingTxnStatusResult__TxnStatusId_DEFAULT +func (p *TGetMetaReplicaMeta) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TGetMetaReplicaMeta_BackendId_DEFAULT } - return *p.TxnStatusId + return *p.BackendId } -func (p *TWaitingTxnStatusResult_) SetStatus(val *status.TStatus) { - p.Status = val + +var TGetMetaReplicaMeta_Version_DEFAULT int64 + +func (p *TGetMetaReplicaMeta) GetVersion() (v int64) { + if !p.IsSetVersion() { + return TGetMetaReplicaMeta_Version_DEFAULT + } + return *p.Version } -func (p *TWaitingTxnStatusResult_) SetTxnStatusId(val *int32) { - p.TxnStatusId = val +func (p *TGetMetaReplicaMeta) SetId(val *int64) { + p.Id = val +} +func (p *TGetMetaReplicaMeta) SetBackendId(val *int64) { + p.BackendId = val +} +func (p *TGetMetaReplicaMeta) SetVersion(val *int64) { + p.Version = val } -var fieldIDToName_TWaitingTxnStatusResult_ = map[int16]string{ - 1: "status", - 2: "txn_status_id", +var fieldIDToName_TGetMetaReplicaMeta = map[int16]string{ + 1: "id", + 2: "backend_id", + 3: "version", } -func (p *TWaitingTxnStatusResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TGetMetaReplicaMeta) IsSetId() bool { + return p.Id != nil } -func (p *TWaitingTxnStatusResult_) IsSetTxnStatusId() bool { - return p.TxnStatusId != nil +func (p *TGetMetaReplicaMeta) IsSetBackendId() bool { + return p.BackendId != nil } -func (p *TWaitingTxnStatusResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaReplicaMeta) IsSetVersion() bool { + return p.Version != nil +} + +func (p *TGetMetaReplicaMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -38525,31 +69268,34 @@ func (p *TWaitingTxnStatusResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -38564,7 +69310,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaReplicaMeta[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -38574,26 +69320,43 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TWaitingTxnStatusResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TGetMetaReplicaMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaReplicaMeta) ReadField2(iprot thrift.TProtocol) error { -func (p *TWaitingTxnStatusResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnStatusId = &v + _field = &v } + p.BackendId = _field return nil } +func (p *TGetMetaReplicaMeta) ReadField3(iprot thrift.TProtocol) error { -func (p *TWaitingTxnStatusResult_) Write(oprot thrift.TProtocol) (err error) { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Version = _field + return nil +} + +func (p *TGetMetaReplicaMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TWaitingTxnStatusResult"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaReplicaMeta"); err != nil { goto WriteStructBeginError } if p != nil { @@ -38605,7 +69368,10 @@ func (p *TWaitingTxnStatusResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -38624,12 +69390,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TWaitingTxnStatusResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TGetMetaReplicaMeta) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38643,12 +69409,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TWaitingTxnStatusResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnStatusId() { - if err = oprot.WriteFieldBegin("txn_status_id", thrift.I32, 2); err != nil { +func (p *TGetMetaReplicaMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.TxnStatusId); err != nil { + if err := oprot.WriteI64(*p.BackendId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38662,117 +69428,138 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TWaitingTxnStatusResult_) String() string { +func (p *TGetMetaReplicaMeta) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetMetaReplicaMeta) String() string { if p == nil { return "" } - return fmt.Sprintf("TWaitingTxnStatusResult_(%+v)", *p) + return fmt.Sprintf("TGetMetaReplicaMeta(%+v)", *p) + } -func (p *TWaitingTxnStatusResult_) DeepEqual(ano *TWaitingTxnStatusResult_) bool { +func (p *TGetMetaReplicaMeta) DeepEqual(ano *TGetMetaReplicaMeta) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.TxnStatusId) { + if !p.Field2DeepEqual(ano.BackendId) { + return false + } + if !p.Field3DeepEqual(ano.Version) { return false } return true } -func (p *TWaitingTxnStatusResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TGetMetaReplicaMeta) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { return false } return true } -func (p *TWaitingTxnStatusResult_) Field2DeepEqual(src *int32) bool { +func (p *TGetMetaReplicaMeta) Field2DeepEqual(src *int64) bool { - if p.TxnStatusId == src { + if p.BackendId == src { return true - } else if p.TxnStatusId == nil || src == nil { + } else if p.BackendId == nil || src == nil { return false } - if *p.TxnStatusId != *src { + if *p.BackendId != *src { return false } return true } +func (p *TGetMetaReplicaMeta) Field3DeepEqual(src *int64) bool { -type TInitExternalCtlMetaRequest struct { - CatalogId *int64 `thrift:"catalogId,1,optional" frugal:"1,optional,i64" json:"catalogId,omitempty"` - DbId *int64 `thrift:"dbId,2,optional" frugal:"2,optional,i64" json:"dbId,omitempty"` - TableId *int64 `thrift:"tableId,3,optional" frugal:"3,optional,i64" json:"tableId,omitempty"` + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false + } + if *p.Version != *src { + return false + } + return true } -func NewTInitExternalCtlMetaRequest() *TInitExternalCtlMetaRequest { - return &TInitExternalCtlMetaRequest{} +type TGetMetaTabletMeta struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Replicas []*TGetMetaReplicaMeta `thrift:"replicas,2,optional" frugal:"2,optional,list" json:"replicas,omitempty"` } -func (p *TInitExternalCtlMetaRequest) InitDefault() { - *p = TInitExternalCtlMetaRequest{} +func NewTGetMetaTabletMeta() *TGetMetaTabletMeta { + return &TGetMetaTabletMeta{} } -var TInitExternalCtlMetaRequest_CatalogId_DEFAULT int64 - -func (p *TInitExternalCtlMetaRequest) GetCatalogId() (v int64) { - if !p.IsSetCatalogId() { - return TInitExternalCtlMetaRequest_CatalogId_DEFAULT - } - return *p.CatalogId +func (p *TGetMetaTabletMeta) InitDefault() { } -var TInitExternalCtlMetaRequest_DbId_DEFAULT int64 +var TGetMetaTabletMeta_Id_DEFAULT int64 -func (p *TInitExternalCtlMetaRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TInitExternalCtlMetaRequest_DbId_DEFAULT +func (p *TGetMetaTabletMeta) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaTabletMeta_Id_DEFAULT } - return *p.DbId + return *p.Id } -var TInitExternalCtlMetaRequest_TableId_DEFAULT int64 +var TGetMetaTabletMeta_Replicas_DEFAULT []*TGetMetaReplicaMeta -func (p *TInitExternalCtlMetaRequest) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TInitExternalCtlMetaRequest_TableId_DEFAULT +func (p *TGetMetaTabletMeta) GetReplicas() (v []*TGetMetaReplicaMeta) { + if !p.IsSetReplicas() { + return TGetMetaTabletMeta_Replicas_DEFAULT } - return *p.TableId -} -func (p *TInitExternalCtlMetaRequest) SetCatalogId(val *int64) { - p.CatalogId = val -} -func (p *TInitExternalCtlMetaRequest) SetDbId(val *int64) { - p.DbId = val + return p.Replicas } -func (p *TInitExternalCtlMetaRequest) SetTableId(val *int64) { - p.TableId = val +func (p *TGetMetaTabletMeta) SetId(val *int64) { + p.Id = val } - -var fieldIDToName_TInitExternalCtlMetaRequest = map[int16]string{ - 1: "catalogId", - 2: "dbId", - 3: "tableId", +func (p *TGetMetaTabletMeta) SetReplicas(val []*TGetMetaReplicaMeta) { + p.Replicas = val } -func (p *TInitExternalCtlMetaRequest) IsSetCatalogId() bool { - return p.CatalogId != nil +var fieldIDToName_TGetMetaTabletMeta = map[int16]string{ + 1: "id", + 2: "replicas", } -func (p *TInitExternalCtlMetaRequest) IsSetDbId() bool { - return p.DbId != nil +func (p *TGetMetaTabletMeta) IsSetId() bool { + return p.Id != nil } -func (p *TInitExternalCtlMetaRequest) IsSetTableId() bool { - return p.TableId != nil +func (p *TGetMetaTabletMeta) IsSetReplicas() bool { + return p.Replicas != nil } -func (p *TInitExternalCtlMetaRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaTabletMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -38796,37 +69583,22 @@ func (p *TInitExternalCtlMetaRequest) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -38841,7 +69613,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTabletMeta[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -38851,36 +69623,44 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TInitExternalCtlMetaRequest) ReadField1(iprot thrift.TProtocol) error { +func (p *TGetMetaTabletMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CatalogId = &v + _field = &v } + p.Id = _field return nil } - -func (p *TInitExternalCtlMetaRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TGetMetaTabletMeta) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.DbId = &v } - return nil -} + _field := make([]*TGetMetaReplicaMeta, 0, size) + values := make([]TGetMetaReplicaMeta, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TInitExternalCtlMetaRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.TableId = &v } + p.Replicas = _field return nil } -func (p *TInitExternalCtlMetaRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaTabletMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TInitExternalCtlMetaRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaTabletMeta"); err != nil { goto WriteStructBeginError } if p != nil { @@ -38892,11 +69672,6 @@ func (p *TInitExternalCtlMetaRequest) Write(oprot thrift.TProtocol) (err error) fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -38915,12 +69690,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TInitExternalCtlMetaRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCatalogId() { - if err = oprot.WriteFieldBegin("catalogId", thrift.I64, 1); err != nil { +func (p *TGetMetaTabletMeta) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.CatalogId); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38934,31 +69709,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TInitExternalCtlMetaRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("dbId", thrift.I64, 2); err != nil { +func (p *TGetMetaTabletMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetReplicas() { + if err = oprot.WriteFieldBegin("replicas", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.DbId); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Replicas)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TInitExternalCtlMetaRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("tableId", thrift.I64, 3); err != nil { - goto WriteFieldBeginError + for _, v := range p.Replicas { + if err := v.Write(oprot); err != nil { + return err + } } - if err := oprot.WriteI64(*p.TableId); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -38967,124 +69731,128 @@ func (p *TInitExternalCtlMetaRequest) writeField3(oprot thrift.TProtocol) (err e } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TInitExternalCtlMetaRequest) String() string { +func (p *TGetMetaTabletMeta) String() string { if p == nil { return "" } - return fmt.Sprintf("TInitExternalCtlMetaRequest(%+v)", *p) + return fmt.Sprintf("TGetMetaTabletMeta(%+v)", *p) + } -func (p *TInitExternalCtlMetaRequest) DeepEqual(ano *TInitExternalCtlMetaRequest) bool { +func (p *TGetMetaTabletMeta) DeepEqual(ano *TGetMetaTabletMeta) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.CatalogId) { - return false - } - if !p.Field2DeepEqual(ano.DbId) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field3DeepEqual(ano.TableId) { + if !p.Field2DeepEqual(ano.Replicas) { return false } return true } -func (p *TInitExternalCtlMetaRequest) Field1DeepEqual(src *int64) bool { +func (p *TGetMetaTabletMeta) Field1DeepEqual(src *int64) bool { - if p.CatalogId == src { + if p.Id == src { return true - } else if p.CatalogId == nil || src == nil { + } else if p.Id == nil || src == nil { return false } - if *p.CatalogId != *src { + if *p.Id != *src { return false } return true } -func (p *TInitExternalCtlMetaRequest) Field2DeepEqual(src *int64) bool { +func (p *TGetMetaTabletMeta) Field2DeepEqual(src []*TGetMetaReplicaMeta) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { + if len(p.Replicas) != len(src) { return false } - if *p.DbId != *src { - return false + for i, v := range p.Replicas { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TInitExternalCtlMetaRequest) Field3DeepEqual(src *int64) bool { - if p.TableId == src { - return true - } else if p.TableId == nil || src == nil { - return false - } - if *p.TableId != *src { - return false - } - return true +type TGetMetaIndexMeta struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Tablets []*TGetMetaTabletMeta `thrift:"tablets,3,optional" frugal:"3,optional,list" json:"tablets,omitempty"` } -type TInitExternalCtlMetaResult_ struct { - MaxJournalId *int64 `thrift:"maxJournalId,1,optional" frugal:"1,optional,i64" json:"maxJournalId,omitempty"` - Status *string `thrift:"status,2,optional" frugal:"2,optional,string" json:"status,omitempty"` +func NewTGetMetaIndexMeta() *TGetMetaIndexMeta { + return &TGetMetaIndexMeta{} } -func NewTInitExternalCtlMetaResult_() *TInitExternalCtlMetaResult_ { - return &TInitExternalCtlMetaResult_{} +func (p *TGetMetaIndexMeta) InitDefault() { } -func (p *TInitExternalCtlMetaResult_) InitDefault() { - *p = TInitExternalCtlMetaResult_{} +var TGetMetaIndexMeta_Id_DEFAULT int64 + +func (p *TGetMetaIndexMeta) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaIndexMeta_Id_DEFAULT + } + return *p.Id } -var TInitExternalCtlMetaResult__MaxJournalId_DEFAULT int64 +var TGetMetaIndexMeta_Name_DEFAULT string -func (p *TInitExternalCtlMetaResult_) GetMaxJournalId() (v int64) { - if !p.IsSetMaxJournalId() { - return TInitExternalCtlMetaResult__MaxJournalId_DEFAULT +func (p *TGetMetaIndexMeta) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaIndexMeta_Name_DEFAULT } - return *p.MaxJournalId + return *p.Name } -var TInitExternalCtlMetaResult__Status_DEFAULT string +var TGetMetaIndexMeta_Tablets_DEFAULT []*TGetMetaTabletMeta -func (p *TInitExternalCtlMetaResult_) GetStatus() (v string) { - if !p.IsSetStatus() { - return TInitExternalCtlMetaResult__Status_DEFAULT +func (p *TGetMetaIndexMeta) GetTablets() (v []*TGetMetaTabletMeta) { + if !p.IsSetTablets() { + return TGetMetaIndexMeta_Tablets_DEFAULT } - return *p.Status + return p.Tablets } -func (p *TInitExternalCtlMetaResult_) SetMaxJournalId(val *int64) { - p.MaxJournalId = val +func (p *TGetMetaIndexMeta) SetId(val *int64) { + p.Id = val } -func (p *TInitExternalCtlMetaResult_) SetStatus(val *string) { - p.Status = val +func (p *TGetMetaIndexMeta) SetName(val *string) { + p.Name = val +} +func (p *TGetMetaIndexMeta) SetTablets(val []*TGetMetaTabletMeta) { + p.Tablets = val } -var fieldIDToName_TInitExternalCtlMetaResult_ = map[int16]string{ - 1: "maxJournalId", - 2: "status", +var fieldIDToName_TGetMetaIndexMeta = map[int16]string{ + 1: "id", + 2: "name", + 3: "tablets", } -func (p *TInitExternalCtlMetaResult_) IsSetMaxJournalId() bool { - return p.MaxJournalId != nil +func (p *TGetMetaIndexMeta) IsSetId() bool { + return p.Id != nil } -func (p *TInitExternalCtlMetaResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TGetMetaIndexMeta) IsSetName() bool { + return p.Name != nil } -func (p *TInitExternalCtlMetaResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaIndexMeta) IsSetTablets() bool { + return p.Tablets != nil +} + +func (p *TGetMetaIndexMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -39108,27 +69876,30 @@ func (p *TInitExternalCtlMetaResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -39143,7 +69914,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaIndexMeta[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -39153,27 +69924,55 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TInitExternalCtlMetaResult_) ReadField1(iprot thrift.TProtocol) error { +func (p *TGetMetaIndexMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxJournalId = &v + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaIndexMeta) ReadField2(iprot thrift.TProtocol) error { -func (p *TInitExternalCtlMetaResult_) ReadField2(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Status = &v + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaIndexMeta) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TGetMetaTabletMeta, 0, size) + values := make([]TGetMetaTabletMeta, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TInitExternalCtlMetaResult_) Write(oprot thrift.TProtocol) (err error) { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tablets = _field + return nil +} + +func (p *TGetMetaIndexMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TInitExternalCtlMetaResult"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaIndexMeta"); err != nil { goto WriteStructBeginError } if p != nil { @@ -39185,7 +69984,10 @@ func (p *TInitExternalCtlMetaResult_) Write(oprot thrift.TProtocol) (err error) fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -39204,12 +70006,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TInitExternalCtlMetaResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxJournalId() { - if err = oprot.WriteFieldBegin("maxJournalId", thrift.I64, 1); err != nil { +func (p *TGetMetaIndexMeta) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.MaxJournalId); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39223,12 +70025,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TInitExternalCtlMetaResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRING, 2); err != nil { +func (p *TGetMetaIndexMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Status); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39242,176 +70044,237 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TInitExternalCtlMetaResult_) String() string { +func (p *TGetMetaIndexMeta) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTablets() { + if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { + return err + } + for _, v := range p.Tablets { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetMetaIndexMeta) String() string { if p == nil { return "" } - return fmt.Sprintf("TInitExternalCtlMetaResult_(%+v)", *p) + return fmt.Sprintf("TGetMetaIndexMeta(%+v)", *p) + } -func (p *TInitExternalCtlMetaResult_) DeepEqual(ano *TInitExternalCtlMetaResult_) bool { +func (p *TGetMetaIndexMeta) DeepEqual(ano *TGetMetaIndexMeta) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.MaxJournalId) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.Status) { + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Tablets) { return false } return true } -func (p *TInitExternalCtlMetaResult_) Field1DeepEqual(src *int64) bool { +func (p *TGetMetaIndexMeta) Field1DeepEqual(src *int64) bool { - if p.MaxJournalId == src { + if p.Id == src { return true - } else if p.MaxJournalId == nil || src == nil { + } else if p.Id == nil || src == nil { return false } - if *p.MaxJournalId != *src { + if *p.Id != *src { return false } return true } -func (p *TInitExternalCtlMetaResult_) Field2DeepEqual(src *string) bool { +func (p *TGetMetaIndexMeta) Field2DeepEqual(src *string) bool { - if p.Status == src { + if p.Name == src { return true - } else if p.Status == nil || src == nil { + } else if p.Name == nil || src == nil { return false } - if strings.Compare(*p.Status, *src) != 0 { + if strings.Compare(*p.Name, *src) != 0 { return false } return true } +func (p *TGetMetaIndexMeta) Field3DeepEqual(src []*TGetMetaTabletMeta) bool { -type TMetadataTableRequestParams struct { - MetadataType *types.TMetadataType `thrift:"metadata_type,1,optional" frugal:"1,optional,TMetadataType" json:"metadata_type,omitempty"` - IcebergMetadataParams *plannodes.TIcebergMetadataParams `thrift:"iceberg_metadata_params,2,optional" frugal:"2,optional,plannodes.TIcebergMetadataParams" json:"iceberg_metadata_params,omitempty"` - BackendsMetadataParams *plannodes.TBackendsMetadataParams `thrift:"backends_metadata_params,3,optional" frugal:"3,optional,plannodes.TBackendsMetadataParams" json:"backends_metadata_params,omitempty"` - ColumnsName []string `thrift:"columns_name,4,optional" frugal:"4,optional,list" json:"columns_name,omitempty"` - FrontendsMetadataParams *plannodes.TFrontendsMetadataParams `thrift:"frontends_metadata_params,5,optional" frugal:"5,optional,plannodes.TFrontendsMetadataParams" json:"frontends_metadata_params,omitempty"` - CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,6,optional" frugal:"6,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` + if len(p.Tablets) != len(src) { + return false + } + for i, v := range p.Tablets { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } -func NewTMetadataTableRequestParams() *TMetadataTableRequestParams { - return &TMetadataTableRequestParams{} +type TGetMetaPartitionMeta struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Key *string `thrift:"key,3,optional" frugal:"3,optional,string" json:"key,omitempty"` + Range *string `thrift:"range,4,optional" frugal:"4,optional,string" json:"range,omitempty"` + VisibleVersion *int64 `thrift:"visible_version,5,optional" frugal:"5,optional,i64" json:"visible_version,omitempty"` + IsTemp *bool `thrift:"is_temp,6,optional" frugal:"6,optional,bool" json:"is_temp,omitempty"` + Indexes []*TGetMetaIndexMeta `thrift:"indexes,7,optional" frugal:"7,optional,list" json:"indexes,omitempty"` } -func (p *TMetadataTableRequestParams) InitDefault() { - *p = TMetadataTableRequestParams{} +func NewTGetMetaPartitionMeta() *TGetMetaPartitionMeta { + return &TGetMetaPartitionMeta{} } -var TMetadataTableRequestParams_MetadataType_DEFAULT types.TMetadataType +func (p *TGetMetaPartitionMeta) InitDefault() { +} -func (p *TMetadataTableRequestParams) GetMetadataType() (v types.TMetadataType) { - if !p.IsSetMetadataType() { - return TMetadataTableRequestParams_MetadataType_DEFAULT +var TGetMetaPartitionMeta_Id_DEFAULT int64 + +func (p *TGetMetaPartitionMeta) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaPartitionMeta_Id_DEFAULT } - return *p.MetadataType + return *p.Id } -var TMetadataTableRequestParams_IcebergMetadataParams_DEFAULT *plannodes.TIcebergMetadataParams +var TGetMetaPartitionMeta_Name_DEFAULT string -func (p *TMetadataTableRequestParams) GetIcebergMetadataParams() (v *plannodes.TIcebergMetadataParams) { - if !p.IsSetIcebergMetadataParams() { - return TMetadataTableRequestParams_IcebergMetadataParams_DEFAULT +func (p *TGetMetaPartitionMeta) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaPartitionMeta_Name_DEFAULT } - return p.IcebergMetadataParams + return *p.Name } -var TMetadataTableRequestParams_BackendsMetadataParams_DEFAULT *plannodes.TBackendsMetadataParams +var TGetMetaPartitionMeta_Key_DEFAULT string -func (p *TMetadataTableRequestParams) GetBackendsMetadataParams() (v *plannodes.TBackendsMetadataParams) { - if !p.IsSetBackendsMetadataParams() { - return TMetadataTableRequestParams_BackendsMetadataParams_DEFAULT +func (p *TGetMetaPartitionMeta) GetKey() (v string) { + if !p.IsSetKey() { + return TGetMetaPartitionMeta_Key_DEFAULT } - return p.BackendsMetadataParams + return *p.Key } -var TMetadataTableRequestParams_ColumnsName_DEFAULT []string +var TGetMetaPartitionMeta_Range_DEFAULT string -func (p *TMetadataTableRequestParams) GetColumnsName() (v []string) { - if !p.IsSetColumnsName() { - return TMetadataTableRequestParams_ColumnsName_DEFAULT +func (p *TGetMetaPartitionMeta) GetRange() (v string) { + if !p.IsSetRange() { + return TGetMetaPartitionMeta_Range_DEFAULT } - return p.ColumnsName + return *p.Range } -var TMetadataTableRequestParams_FrontendsMetadataParams_DEFAULT *plannodes.TFrontendsMetadataParams +var TGetMetaPartitionMeta_VisibleVersion_DEFAULT int64 -func (p *TMetadataTableRequestParams) GetFrontendsMetadataParams() (v *plannodes.TFrontendsMetadataParams) { - if !p.IsSetFrontendsMetadataParams() { - return TMetadataTableRequestParams_FrontendsMetadataParams_DEFAULT +func (p *TGetMetaPartitionMeta) GetVisibleVersion() (v int64) { + if !p.IsSetVisibleVersion() { + return TGetMetaPartitionMeta_VisibleVersion_DEFAULT } - return p.FrontendsMetadataParams + return *p.VisibleVersion } -var TMetadataTableRequestParams_CurrentUserIdent_DEFAULT *types.TUserIdentity +var TGetMetaPartitionMeta_IsTemp_DEFAULT bool -func (p *TMetadataTableRequestParams) GetCurrentUserIdent() (v *types.TUserIdentity) { - if !p.IsSetCurrentUserIdent() { - return TMetadataTableRequestParams_CurrentUserIdent_DEFAULT +func (p *TGetMetaPartitionMeta) GetIsTemp() (v bool) { + if !p.IsSetIsTemp() { + return TGetMetaPartitionMeta_IsTemp_DEFAULT } - return p.CurrentUserIdent + return *p.IsTemp } -func (p *TMetadataTableRequestParams) SetMetadataType(val *types.TMetadataType) { - p.MetadataType = val + +var TGetMetaPartitionMeta_Indexes_DEFAULT []*TGetMetaIndexMeta + +func (p *TGetMetaPartitionMeta) GetIndexes() (v []*TGetMetaIndexMeta) { + if !p.IsSetIndexes() { + return TGetMetaPartitionMeta_Indexes_DEFAULT + } + return p.Indexes } -func (p *TMetadataTableRequestParams) SetIcebergMetadataParams(val *plannodes.TIcebergMetadataParams) { - p.IcebergMetadataParams = val +func (p *TGetMetaPartitionMeta) SetId(val *int64) { + p.Id = val } -func (p *TMetadataTableRequestParams) SetBackendsMetadataParams(val *plannodes.TBackendsMetadataParams) { - p.BackendsMetadataParams = val +func (p *TGetMetaPartitionMeta) SetName(val *string) { + p.Name = val } -func (p *TMetadataTableRequestParams) SetColumnsName(val []string) { - p.ColumnsName = val +func (p *TGetMetaPartitionMeta) SetKey(val *string) { + p.Key = val } -func (p *TMetadataTableRequestParams) SetFrontendsMetadataParams(val *plannodes.TFrontendsMetadataParams) { - p.FrontendsMetadataParams = val +func (p *TGetMetaPartitionMeta) SetRange(val *string) { + p.Range = val } -func (p *TMetadataTableRequestParams) SetCurrentUserIdent(val *types.TUserIdentity) { - p.CurrentUserIdent = val +func (p *TGetMetaPartitionMeta) SetVisibleVersion(val *int64) { + p.VisibleVersion = val +} +func (p *TGetMetaPartitionMeta) SetIsTemp(val *bool) { + p.IsTemp = val +} +func (p *TGetMetaPartitionMeta) SetIndexes(val []*TGetMetaIndexMeta) { + p.Indexes = val } -var fieldIDToName_TMetadataTableRequestParams = map[int16]string{ - 1: "metadata_type", - 2: "iceberg_metadata_params", - 3: "backends_metadata_params", - 4: "columns_name", - 5: "frontends_metadata_params", - 6: "current_user_ident", +var fieldIDToName_TGetMetaPartitionMeta = map[int16]string{ + 1: "id", + 2: "name", + 3: "key", + 4: "range", + 5: "visible_version", + 6: "is_temp", + 7: "indexes", } -func (p *TMetadataTableRequestParams) IsSetMetadataType() bool { - return p.MetadataType != nil +func (p *TGetMetaPartitionMeta) IsSetId() bool { + return p.Id != nil } -func (p *TMetadataTableRequestParams) IsSetIcebergMetadataParams() bool { - return p.IcebergMetadataParams != nil +func (p *TGetMetaPartitionMeta) IsSetName() bool { + return p.Name != nil } -func (p *TMetadataTableRequestParams) IsSetBackendsMetadataParams() bool { - return p.BackendsMetadataParams != nil +func (p *TGetMetaPartitionMeta) IsSetKey() bool { + return p.Key != nil } -func (p *TMetadataTableRequestParams) IsSetColumnsName() bool { - return p.ColumnsName != nil +func (p *TGetMetaPartitionMeta) IsSetRange() bool { + return p.Range != nil } -func (p *TMetadataTableRequestParams) IsSetFrontendsMetadataParams() bool { - return p.FrontendsMetadataParams != nil +func (p *TGetMetaPartitionMeta) IsSetVisibleVersion() bool { + return p.VisibleVersion != nil } -func (p *TMetadataTableRequestParams) IsSetCurrentUserIdent() bool { - return p.CurrentUserIdent != nil +func (p *TGetMetaPartitionMeta) IsSetIsTemp() bool { + return p.IsTemp != nil } -func (p *TMetadataTableRequestParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaPartitionMeta) IsSetIndexes() bool { + return p.Indexes != nil +} + +func (p *TGetMetaPartitionMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -39431,71 +70294,66 @@ func (p *TMetadataTableRequestParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -39510,7 +70368,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMetadataTableRequestParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaPartitionMeta[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -39520,73 +70378,99 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TMetadataTableRequestParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TGetMetaPartitionMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - tmp := types.TMetadataType(v) - p.MetadataType = &tmp + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaPartitionMeta) ReadField2(iprot thrift.TProtocol) error { -func (p *TMetadataTableRequestParams) ReadField2(iprot thrift.TProtocol) error { - p.IcebergMetadataParams = plannodes.NewTIcebergMetadataParams() - if err := p.IcebergMetadataParams.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaPartitionMeta) ReadField3(iprot thrift.TProtocol) error { -func (p *TMetadataTableRequestParams) ReadField3(iprot thrift.TProtocol) error { - p.BackendsMetadataParams = plannodes.NewTBackendsMetadataParams() - if err := p.BackendsMetadataParams.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Key = _field return nil } +func (p *TGetMetaPartitionMeta) ReadField4(iprot thrift.TProtocol) error { -func (p *TMetadataTableRequestParams) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } - p.ColumnsName = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } + p.Range = _field + return nil +} +func (p *TGetMetaPartitionMeta) ReadField5(iprot thrift.TProtocol) error { - p.ColumnsName = append(p.ColumnsName, _elem) - } - if err := iprot.ReadListEnd(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.VisibleVersion = _field return nil } +func (p *TGetMetaPartitionMeta) ReadField6(iprot thrift.TProtocol) error { -func (p *TMetadataTableRequestParams) ReadField5(iprot thrift.TProtocol) error { - p.FrontendsMetadataParams = plannodes.NewTFrontendsMetadataParams() - if err := p.FrontendsMetadataParams.Read(iprot); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } + p.IsTemp = _field return nil } +func (p *TGetMetaPartitionMeta) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TGetMetaIndexMeta, 0, size) + values := make([]TGetMetaIndexMeta, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TMetadataTableRequestParams) ReadField6(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.Indexes = _field return nil } -func (p *TMetadataTableRequestParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaPartitionMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TMetadataTableRequestParams"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaPartitionMeta"); err != nil { goto WriteStructBeginError } if p != nil { @@ -39614,7 +70498,10 @@ func (p *TMetadataTableRequestParams) Write(oprot thrift.TProtocol) (err error) fieldId = 6 goto WriteFieldError } - + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -39633,12 +70520,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TMetadataTableRequestParams) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetMetadataType() { - if err = oprot.WriteFieldBegin("metadata_type", thrift.I32, 1); err != nil { +func (p *TGetMetaPartitionMeta) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.MetadataType)); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39652,12 +70539,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TMetadataTableRequestParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetIcebergMetadataParams() { - if err = oprot.WriteFieldBegin("iceberg_metadata_params", thrift.STRUCT, 2); err != nil { +func (p *TGetMetaPartitionMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := p.IcebergMetadataParams.Write(oprot); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39671,12 +70558,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TMetadataTableRequestParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendsMetadataParams() { - if err = oprot.WriteFieldBegin("backends_metadata_params", thrift.STRUCT, 3); err != nil { +func (p *TGetMetaPartitionMeta) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetKey() { + if err = oprot.WriteFieldBegin("key", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := p.BackendsMetadataParams.Write(oprot); err != nil { + if err := oprot.WriteString(*p.Key); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39690,20 +70577,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TMetadataTableRequestParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnsName() { - if err = oprot.WriteFieldBegin("columns_name", thrift.LIST, 4); err != nil { +func (p *TGetMetaPartitionMeta) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetRange() { + if err = oprot.WriteFieldBegin("range", thrift.STRING, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsName)); err != nil { - return err - } - for _, v := range p.ColumnsName { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.Range); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39717,12 +70596,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TMetadataTableRequestParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetFrontendsMetadataParams() { - if err = oprot.WriteFieldBegin("frontends_metadata_params", thrift.STRUCT, 5); err != nil { +func (p *TGetMetaPartitionMeta) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetVisibleVersion() { + if err = oprot.WriteFieldBegin("visible_version", thrift.I64, 5); err != nil { goto WriteFieldBeginError } - if err := p.FrontendsMetadataParams.Write(oprot); err != nil { + if err := oprot.WriteI64(*p.VisibleVersion); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39736,12 +70615,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TMetadataTableRequestParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetCurrentUserIdent() { - if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 6); err != nil { +func (p *TGetMetaPartitionMeta) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetIsTemp() { + if err = oprot.WriteFieldBegin("is_temp", thrift.BOOL, 6); err != nil { goto WriteFieldBeginError } - if err := p.CurrentUserIdent.Write(oprot); err != nil { + if err := oprot.WriteBool(*p.IsTemp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -39755,163 +70634,243 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TMetadataTableRequestParams) String() string { +func (p *TGetMetaPartitionMeta) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetIndexes() { + if err = oprot.WriteFieldBegin("indexes", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Indexes)); err != nil { + return err + } + for _, v := range p.Indexes { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TGetMetaPartitionMeta) String() string { if p == nil { return "" } - return fmt.Sprintf("TMetadataTableRequestParams(%+v)", *p) + return fmt.Sprintf("TGetMetaPartitionMeta(%+v)", *p) + } -func (p *TMetadataTableRequestParams) DeepEqual(ano *TMetadataTableRequestParams) bool { +func (p *TGetMetaPartitionMeta) DeepEqual(ano *TGetMetaPartitionMeta) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.MetadataType) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.IcebergMetadataParams) { + if !p.Field2DeepEqual(ano.Name) { return false } - if !p.Field3DeepEqual(ano.BackendsMetadataParams) { + if !p.Field3DeepEqual(ano.Key) { return false } - if !p.Field4DeepEqual(ano.ColumnsName) { + if !p.Field4DeepEqual(ano.Range) { return false } - if !p.Field5DeepEqual(ano.FrontendsMetadataParams) { + if !p.Field5DeepEqual(ano.VisibleVersion) { return false } - if !p.Field6DeepEqual(ano.CurrentUserIdent) { + if !p.Field6DeepEqual(ano.IsTemp) { + return false + } + if !p.Field7DeepEqual(ano.Indexes) { return false } return true } -func (p *TMetadataTableRequestParams) Field1DeepEqual(src *types.TMetadataType) bool { +func (p *TGetMetaPartitionMeta) Field1DeepEqual(src *int64) bool { - if p.MetadataType == src { + if p.Id == src { return true - } else if p.MetadataType == nil || src == nil { + } else if p.Id == nil || src == nil { return false } - if *p.MetadataType != *src { + if *p.Id != *src { return false } return true } -func (p *TMetadataTableRequestParams) Field2DeepEqual(src *plannodes.TIcebergMetadataParams) bool { +func (p *TGetMetaPartitionMeta) Field2DeepEqual(src *string) bool { - if !p.IcebergMetadataParams.DeepEqual(src) { + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { return false } return true } -func (p *TMetadataTableRequestParams) Field3DeepEqual(src *plannodes.TBackendsMetadataParams) bool { +func (p *TGetMetaPartitionMeta) Field3DeepEqual(src *string) bool { - if !p.BackendsMetadataParams.DeepEqual(src) { + if p.Key == src { + return true + } else if p.Key == nil || src == nil { + return false + } + if strings.Compare(*p.Key, *src) != 0 { return false } return true } -func (p *TMetadataTableRequestParams) Field4DeepEqual(src []string) bool { +func (p *TGetMetaPartitionMeta) Field4DeepEqual(src *string) bool { - if len(p.ColumnsName) != len(src) { + if p.Range == src { + return true + } else if p.Range == nil || src == nil { return false } - for i, v := range p.ColumnsName { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } + if strings.Compare(*p.Range, *src) != 0 { + return false } return true } -func (p *TMetadataTableRequestParams) Field5DeepEqual(src *plannodes.TFrontendsMetadataParams) bool { +func (p *TGetMetaPartitionMeta) Field5DeepEqual(src *int64) bool { - if !p.FrontendsMetadataParams.DeepEqual(src) { + if p.VisibleVersion == src { + return true + } else if p.VisibleVersion == nil || src == nil { + return false + } + if *p.VisibleVersion != *src { return false } return true } -func (p *TMetadataTableRequestParams) Field6DeepEqual(src *types.TUserIdentity) bool { +func (p *TGetMetaPartitionMeta) Field6DeepEqual(src *bool) bool { - if !p.CurrentUserIdent.DeepEqual(src) { + if p.IsTemp == src { + return true + } else if p.IsTemp == nil || src == nil { + return false + } + if *p.IsTemp != *src { return false } return true } +func (p *TGetMetaPartitionMeta) Field7DeepEqual(src []*TGetMetaIndexMeta) bool { -type TFetchSchemaTableDataRequest struct { - ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` - SchemaTableName *TSchemaTableName `thrift:"schema_table_name,2,optional" frugal:"2,optional,TSchemaTableName" json:"schema_table_name,omitempty"` - MetadaTableParams *TMetadataTableRequestParams `thrift:"metada_table_params,3,optional" frugal:"3,optional,TMetadataTableRequestParams" json:"metada_table_params,omitempty"` + if len(p.Indexes) != len(src) { + return false + } + for i, v := range p.Indexes { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } -func NewTFetchSchemaTableDataRequest() *TFetchSchemaTableDataRequest { - return &TFetchSchemaTableDataRequest{} +type TGetMetaTableMeta struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + InTrash *bool `thrift:"in_trash,3,optional" frugal:"3,optional,bool" json:"in_trash,omitempty"` + Partitions []*TGetMetaPartitionMeta `thrift:"partitions,4,optional" frugal:"4,optional,list" json:"partitions,omitempty"` } -func (p *TFetchSchemaTableDataRequest) InitDefault() { - *p = TFetchSchemaTableDataRequest{} +func NewTGetMetaTableMeta() *TGetMetaTableMeta { + return &TGetMetaTableMeta{} } -var TFetchSchemaTableDataRequest_ClusterName_DEFAULT string +func (p *TGetMetaTableMeta) InitDefault() { +} -func (p *TFetchSchemaTableDataRequest) GetClusterName() (v string) { - if !p.IsSetClusterName() { - return TFetchSchemaTableDataRequest_ClusterName_DEFAULT +var TGetMetaTableMeta_Id_DEFAULT int64 + +func (p *TGetMetaTableMeta) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaTableMeta_Id_DEFAULT } - return *p.ClusterName + return *p.Id } -var TFetchSchemaTableDataRequest_SchemaTableName_DEFAULT TSchemaTableName +var TGetMetaTableMeta_Name_DEFAULT string -func (p *TFetchSchemaTableDataRequest) GetSchemaTableName() (v TSchemaTableName) { - if !p.IsSetSchemaTableName() { - return TFetchSchemaTableDataRequest_SchemaTableName_DEFAULT +func (p *TGetMetaTableMeta) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaTableMeta_Name_DEFAULT } - return *p.SchemaTableName + return *p.Name } -var TFetchSchemaTableDataRequest_MetadaTableParams_DEFAULT *TMetadataTableRequestParams +var TGetMetaTableMeta_InTrash_DEFAULT bool -func (p *TFetchSchemaTableDataRequest) GetMetadaTableParams() (v *TMetadataTableRequestParams) { - if !p.IsSetMetadaTableParams() { - return TFetchSchemaTableDataRequest_MetadaTableParams_DEFAULT +func (p *TGetMetaTableMeta) GetInTrash() (v bool) { + if !p.IsSetInTrash() { + return TGetMetaTableMeta_InTrash_DEFAULT } - return p.MetadaTableParams + return *p.InTrash } -func (p *TFetchSchemaTableDataRequest) SetClusterName(val *string) { - p.ClusterName = val + +var TGetMetaTableMeta_Partitions_DEFAULT []*TGetMetaPartitionMeta + +func (p *TGetMetaTableMeta) GetPartitions() (v []*TGetMetaPartitionMeta) { + if !p.IsSetPartitions() { + return TGetMetaTableMeta_Partitions_DEFAULT + } + return p.Partitions } -func (p *TFetchSchemaTableDataRequest) SetSchemaTableName(val *TSchemaTableName) { - p.SchemaTableName = val +func (p *TGetMetaTableMeta) SetId(val *int64) { + p.Id = val } -func (p *TFetchSchemaTableDataRequest) SetMetadaTableParams(val *TMetadataTableRequestParams) { - p.MetadaTableParams = val +func (p *TGetMetaTableMeta) SetName(val *string) { + p.Name = val +} +func (p *TGetMetaTableMeta) SetInTrash(val *bool) { + p.InTrash = val +} +func (p *TGetMetaTableMeta) SetPartitions(val []*TGetMetaPartitionMeta) { + p.Partitions = val } -var fieldIDToName_TFetchSchemaTableDataRequest = map[int16]string{ - 1: "cluster_name", - 2: "schema_table_name", - 3: "metada_table_params", +var fieldIDToName_TGetMetaTableMeta = map[int16]string{ + 1: "id", + 2: "name", + 3: "in_trash", + 4: "partitions", } -func (p *TFetchSchemaTableDataRequest) IsSetClusterName() bool { - return p.ClusterName != nil +func (p *TGetMetaTableMeta) IsSetId() bool { + return p.Id != nil } -func (p *TFetchSchemaTableDataRequest) IsSetSchemaTableName() bool { - return p.SchemaTableName != nil +func (p *TGetMetaTableMeta) IsSetName() bool { + return p.Name != nil } -func (p *TFetchSchemaTableDataRequest) IsSetMetadaTableParams() bool { - return p.MetadaTableParams != nil +func (p *TGetMetaTableMeta) IsSetInTrash() bool { + return p.InTrash != nil } -func (p *TFetchSchemaTableDataRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaTableMeta) IsSetPartitions() bool { + return p.Partitions != nil +} + +func (p *TGetMetaTableMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -39931,41 +70890,42 @@ func (p *TFetchSchemaTableDataRequest) Read(iprot thrift.TProtocol) (err error) switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -39980,7 +70940,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTableMeta[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -39990,36 +70950,66 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFetchSchemaTableDataRequest) ReadField1(iprot thrift.TProtocol) error { +func (p *TGetMetaTableMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Id = _field + return nil +} +func (p *TGetMetaTableMeta) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ClusterName = &v + _field = &v } + p.Name = _field return nil } +func (p *TGetMetaTableMeta) ReadField3(iprot thrift.TProtocol) error { -func (p *TFetchSchemaTableDataRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - tmp := TSchemaTableName(v) - p.SchemaTableName = &tmp + _field = &v } + p.InTrash = _field return nil } +func (p *TGetMetaTableMeta) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TGetMetaPartitionMeta, 0, size) + values := make([]TGetMetaPartitionMeta, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TFetchSchemaTableDataRequest) ReadField3(iprot thrift.TProtocol) error { - p.MetadaTableParams = NewTMetadataTableRequestParams() - if err := p.MetadaTableParams.Read(iprot); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.Partitions = _field return nil } -func (p *TFetchSchemaTableDataRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaTableMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFetchSchemaTableDataRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaTableMeta"); err != nil { goto WriteStructBeginError } if p != nil { @@ -40035,7 +71025,10 @@ func (p *TFetchSchemaTableDataRequest) Write(oprot thrift.TProtocol) (err error) fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -40054,12 +71047,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFetchSchemaTableDataRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetClusterName() { - if err = oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { +func (p *TGetMetaTableMeta) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.ClusterName); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -40073,12 +71066,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFetchSchemaTableDataRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetSchemaTableName() { - if err = oprot.WriteFieldBegin("schema_table_name", thrift.I32, 2); err != nil { +func (p *TGetMetaTableMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.SchemaTableName)); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -40092,12 +71085,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFetchSchemaTableDataRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetMetadaTableParams() { - if err = oprot.WriteFieldBegin("metada_table_params", thrift.STRUCT, 3); err != nil { +func (p *TGetMetaTableMeta) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetInTrash() { + if err = oprot.WriteFieldBegin("in_trash", thrift.BOOL, 3); err != nil { goto WriteFieldBeginError } - if err := p.MetadaTableParams.Write(oprot); err != nil { + if err := oprot.WriteBool(*p.InTrash); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -40108,121 +71101,240 @@ func (p *TFetchSchemaTableDataRequest) writeField3(oprot thrift.TProtocol) (err WriteFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGetMetaTableMeta) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitions() { + if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return err + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TFetchSchemaTableDataRequest) String() string { +func (p *TGetMetaTableMeta) String() string { if p == nil { return "" } - return fmt.Sprintf("TFetchSchemaTableDataRequest(%+v)", *p) + return fmt.Sprintf("TGetMetaTableMeta(%+v)", *p) + } -func (p *TFetchSchemaTableDataRequest) DeepEqual(ano *TFetchSchemaTableDataRequest) bool { +func (p *TGetMetaTableMeta) DeepEqual(ano *TGetMetaTableMeta) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ClusterName) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.SchemaTableName) { + if !p.Field2DeepEqual(ano.Name) { return false } - if !p.Field3DeepEqual(ano.MetadaTableParams) { + if !p.Field3DeepEqual(ano.InTrash) { + return false + } + if !p.Field4DeepEqual(ano.Partitions) { return false } return true } -func (p *TFetchSchemaTableDataRequest) Field1DeepEqual(src *string) bool { +func (p *TGetMetaTableMeta) Field1DeepEqual(src *int64) bool { - if p.ClusterName == src { + if p.Id == src { return true - } else if p.ClusterName == nil || src == nil { + } else if p.Id == nil || src == nil { return false } - if strings.Compare(*p.ClusterName, *src) != 0 { + if *p.Id != *src { return false } return true } -func (p *TFetchSchemaTableDataRequest) Field2DeepEqual(src *TSchemaTableName) bool { +func (p *TGetMetaTableMeta) Field2DeepEqual(src *string) bool { - if p.SchemaTableName == src { + if p.Name == src { return true - } else if p.SchemaTableName == nil || src == nil { + } else if p.Name == nil || src == nil { return false } - if *p.SchemaTableName != *src { + if strings.Compare(*p.Name, *src) != 0 { return false } return true } -func (p *TFetchSchemaTableDataRequest) Field3DeepEqual(src *TMetadataTableRequestParams) bool { +func (p *TGetMetaTableMeta) Field3DeepEqual(src *bool) bool { - if !p.MetadaTableParams.DeepEqual(src) { + if p.InTrash == src { + return true + } else if p.InTrash == nil || src == nil { + return false + } + if *p.InTrash != *src { return false } return true } +func (p *TGetMetaTableMeta) Field4DeepEqual(src []*TGetMetaPartitionMeta) bool { -type TFetchSchemaTableDataResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` - DataBatch []*data.TRow `thrift:"data_batch,2,optional" frugal:"2,optional,list" json:"data_batch,omitempty"` + if len(p.Partitions) != len(src) { + return false + } + for i, v := range p.Partitions { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } -func NewTFetchSchemaTableDataResult_() *TFetchSchemaTableDataResult_ { - return &TFetchSchemaTableDataResult_{} +type TGetMetaDBMeta struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Tables []*TGetMetaTableMeta `thrift:"tables,3,optional" frugal:"3,optional,list" json:"tables,omitempty"` + DroppedPartitions []int64 `thrift:"dropped_partitions,4,optional" frugal:"4,optional,list" json:"dropped_partitions,omitempty"` + DroppedTables []int64 `thrift:"dropped_tables,5,optional" frugal:"5,optional,list" json:"dropped_tables,omitempty"` + DroppedIndexes []int64 `thrift:"dropped_indexes,6,optional" frugal:"6,optional,list" json:"dropped_indexes,omitempty"` } -func (p *TFetchSchemaTableDataResult_) InitDefault() { - *p = TFetchSchemaTableDataResult_{} +func NewTGetMetaDBMeta() *TGetMetaDBMeta { + return &TGetMetaDBMeta{} } -var TFetchSchemaTableDataResult__Status_DEFAULT *status.TStatus +func (p *TGetMetaDBMeta) InitDefault() { +} -func (p *TFetchSchemaTableDataResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TFetchSchemaTableDataResult__Status_DEFAULT +var TGetMetaDBMeta_Id_DEFAULT int64 + +func (p *TGetMetaDBMeta) GetId() (v int64) { + if !p.IsSetId() { + return TGetMetaDBMeta_Id_DEFAULT } - return p.Status + return *p.Id } -var TFetchSchemaTableDataResult__DataBatch_DEFAULT []*data.TRow +var TGetMetaDBMeta_Name_DEFAULT string -func (p *TFetchSchemaTableDataResult_) GetDataBatch() (v []*data.TRow) { - if !p.IsSetDataBatch() { - return TFetchSchemaTableDataResult__DataBatch_DEFAULT +func (p *TGetMetaDBMeta) GetName() (v string) { + if !p.IsSetName() { + return TGetMetaDBMeta_Name_DEFAULT } - return p.DataBatch + return *p.Name } -func (p *TFetchSchemaTableDataResult_) SetStatus(val *status.TStatus) { - p.Status = val + +var TGetMetaDBMeta_Tables_DEFAULT []*TGetMetaTableMeta + +func (p *TGetMetaDBMeta) GetTables() (v []*TGetMetaTableMeta) { + if !p.IsSetTables() { + return TGetMetaDBMeta_Tables_DEFAULT + } + return p.Tables } -func (p *TFetchSchemaTableDataResult_) SetDataBatch(val []*data.TRow) { - p.DataBatch = val + +var TGetMetaDBMeta_DroppedPartitions_DEFAULT []int64 + +func (p *TGetMetaDBMeta) GetDroppedPartitions() (v []int64) { + if !p.IsSetDroppedPartitions() { + return TGetMetaDBMeta_DroppedPartitions_DEFAULT + } + return p.DroppedPartitions } -var fieldIDToName_TFetchSchemaTableDataResult_ = map[int16]string{ - 1: "status", - 2: "data_batch", +var TGetMetaDBMeta_DroppedTables_DEFAULT []int64 + +func (p *TGetMetaDBMeta) GetDroppedTables() (v []int64) { + if !p.IsSetDroppedTables() { + return TGetMetaDBMeta_DroppedTables_DEFAULT + } + return p.DroppedTables } -func (p *TFetchSchemaTableDataResult_) IsSetStatus() bool { - return p.Status != nil +var TGetMetaDBMeta_DroppedIndexes_DEFAULT []int64 + +func (p *TGetMetaDBMeta) GetDroppedIndexes() (v []int64) { + if !p.IsSetDroppedIndexes() { + return TGetMetaDBMeta_DroppedIndexes_DEFAULT + } + return p.DroppedIndexes +} +func (p *TGetMetaDBMeta) SetId(val *int64) { + p.Id = val +} +func (p *TGetMetaDBMeta) SetName(val *string) { + p.Name = val +} +func (p *TGetMetaDBMeta) SetTables(val []*TGetMetaTableMeta) { + p.Tables = val +} +func (p *TGetMetaDBMeta) SetDroppedPartitions(val []int64) { + p.DroppedPartitions = val +} +func (p *TGetMetaDBMeta) SetDroppedTables(val []int64) { + p.DroppedTables = val +} +func (p *TGetMetaDBMeta) SetDroppedIndexes(val []int64) { + p.DroppedIndexes = val } -func (p *TFetchSchemaTableDataResult_) IsSetDataBatch() bool { - return p.DataBatch != nil +var fieldIDToName_TGetMetaDBMeta = map[int16]string{ + 1: "id", + 2: "name", + 3: "tables", + 4: "dropped_partitions", + 5: "dropped_tables", + 6: "dropped_indexes", } -func (p *TFetchSchemaTableDataResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaDBMeta) IsSetId() bool { + return p.Id != nil +} + +func (p *TGetMetaDBMeta) IsSetName() bool { + return p.Name != nil +} + +func (p *TGetMetaDBMeta) IsSetTables() bool { + return p.Tables != nil +} + +func (p *TGetMetaDBMeta) IsSetDroppedPartitions() bool { + return p.DroppedPartitions != nil +} + +func (p *TGetMetaDBMeta) IsSetDroppedTables() bool { + return p.DroppedTables != nil +} + +func (p *TGetMetaDBMeta) IsSetDroppedIndexes() bool { + return p.DroppedIndexes != nil +} + +func (p *TGetMetaDBMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -40239,32 +71351,58 @@ func (p *TFetchSchemaTableDataResult_) Read(iprot thrift.TProtocol) (err error) switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.LIST { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -40273,17 +71411,13 @@ func (p *TFetchSchemaTableDataResult_) Read(iprot thrift.TProtocol) (err error) goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaDBMeta[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -40291,41 +71425,126 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchSchemaTableDataResult_[fieldId])) } -func (p *TFetchSchemaTableDataResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TGetMetaDBMeta) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.Id = _field return nil } +func (p *TGetMetaDBMeta) ReadField2(iprot thrift.TProtocol) error { -func (p *TFetchSchemaTableDataResult_) ReadField2(iprot thrift.TProtocol) error { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *TGetMetaDBMeta) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DataBatch = make([]*data.TRow, 0, size) + _field := make([]*TGetMetaTableMeta, 0, size) + values := make([]TGetMetaTableMeta, size) for i := 0; i < size; i++ { - _elem := data.NewTRow() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.DataBatch = append(p.DataBatch, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Tables = _field return nil } +func (p *TGetMetaDBMeta) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { -func (p *TFetchSchemaTableDataResult_) Write(oprot thrift.TProtocol) (err error) { + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DroppedPartitions = _field + return nil +} +func (p *TGetMetaDBMeta) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DroppedTables = _field + return nil +} +func (p *TGetMetaDBMeta) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DroppedIndexes = _field + return nil +} + +func (p *TGetMetaDBMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFetchSchemaTableDataResult"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaDBMeta"); err != nil { goto WriteStructBeginError } if p != nil { @@ -40337,7 +71556,22 @@ func (p *TFetchSchemaTableDataResult_) Write(oprot thrift.TProtocol) (err error) fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -40356,15 +71590,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFetchSchemaTableDataResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TGetMetaDBMeta) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -40373,15 +71609,34 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFetchSchemaTableDataResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDataBatch() { - if err = oprot.WriteFieldBegin("data_batch", thrift.LIST, 2); err != nil { +func (p *TGetMetaDBMeta) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DataBatch)); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } - for _, v := range p.DataBatch { + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGetMetaDBMeta) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTables() { + if err = oprot.WriteFieldBegin("tables", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tables)); err != nil { + return err + } + for _, v := range p.Tables { if err := v.Write(oprot); err != nil { return err } @@ -40395,46 +71650,157 @@ func (p *TFetchSchemaTableDataResult_) writeField2(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TFetchSchemaTableDataResult_) String() string { +func (p *TGetMetaDBMeta) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDroppedPartitions() { + if err = oprot.WriteFieldBegin("dropped_partitions", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.DroppedPartitions)); err != nil { + return err + } + for _, v := range p.DroppedPartitions { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGetMetaDBMeta) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetDroppedTables() { + if err = oprot.WriteFieldBegin("dropped_tables", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.DroppedTables)); err != nil { + return err + } + for _, v := range p.DroppedTables { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetMetaDBMeta) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetDroppedIndexes() { + if err = oprot.WriteFieldBegin("dropped_indexes", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.DroppedIndexes)); err != nil { + return err + } + for _, v := range p.DroppedIndexes { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetMetaDBMeta) String() string { if p == nil { return "" } - return fmt.Sprintf("TFetchSchemaTableDataResult_(%+v)", *p) + return fmt.Sprintf("TGetMetaDBMeta(%+v)", *p) + } -func (p *TFetchSchemaTableDataResult_) DeepEqual(ano *TFetchSchemaTableDataResult_) bool { +func (p *TGetMetaDBMeta) DeepEqual(ano *TGetMetaDBMeta) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.DataBatch) { + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Tables) { + return false + } + if !p.Field4DeepEqual(ano.DroppedPartitions) { + return false + } + if !p.Field5DeepEqual(ano.DroppedTables) { + return false + } + if !p.Field6DeepEqual(ano.DroppedIndexes) { return false } return true } -func (p *TFetchSchemaTableDataResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TGetMetaDBMeta) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { return false } return true } -func (p *TFetchSchemaTableDataResult_) Field2DeepEqual(src []*data.TRow) bool { +func (p *TGetMetaDBMeta) Field2DeepEqual(src *string) bool { - if len(p.DataBatch) != len(src) { + if p.Name == src { + return true + } else if p.Name == nil || src == nil { return false } - for i, v := range p.DataBatch { + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *TGetMetaDBMeta) Field3DeepEqual(src []*TGetMetaTableMeta) bool { + + if len(p.Tables) != len(src) { + return false + } + for i, v := range p.Tables { _src := src[i] if !v.DeepEqual(_src) { return false @@ -40442,115 +71808,118 @@ func (p *TFetchSchemaTableDataResult_) Field2DeepEqual(src []*data.TRow) bool { } return true } +func (p *TGetMetaDBMeta) Field4DeepEqual(src []int64) bool { -type TAddColumnsRequest struct { - TableId *int64 `thrift:"table_id,1,optional" frugal:"1,optional,i64" json:"table_id,omitempty"` - AddColumns []*TColumnDef `thrift:"addColumns,2,optional" frugal:"2,optional,list" json:"addColumns,omitempty"` - TableName *string `thrift:"table_name,3,optional" frugal:"3,optional,string" json:"table_name,omitempty"` - DbName *string `thrift:"db_name,4,optional" frugal:"4,optional,string" json:"db_name,omitempty"` - AllowTypeConflict *bool `thrift:"allow_type_conflict,5,optional" frugal:"5,optional,bool" json:"allow_type_conflict,omitempty"` + if len(p.DroppedPartitions) != len(src) { + return false + } + for i, v := range p.DroppedPartitions { + _src := src[i] + if v != _src { + return false + } + } + return true } +func (p *TGetMetaDBMeta) Field5DeepEqual(src []int64) bool { -func NewTAddColumnsRequest() *TAddColumnsRequest { - return &TAddColumnsRequest{} + if len(p.DroppedTables) != len(src) { + return false + } + for i, v := range p.DroppedTables { + _src := src[i] + if v != _src { + return false + } + } + return true } +func (p *TGetMetaDBMeta) Field6DeepEqual(src []int64) bool { -func (p *TAddColumnsRequest) InitDefault() { - *p = TAddColumnsRequest{} + if len(p.DroppedIndexes) != len(src) { + return false + } + for i, v := range p.DroppedIndexes { + _src := src[i] + if v != _src { + return false + } + } + return true } -var TAddColumnsRequest_TableId_DEFAULT int64 - -func (p *TAddColumnsRequest) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TAddColumnsRequest_TableId_DEFAULT - } - return *p.TableId +type TGetMetaResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + DbMeta *TGetMetaDBMeta `thrift:"db_meta,2,optional" frugal:"2,optional,TGetMetaDBMeta" json:"db_meta,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,3,optional" frugal:"3,optional,types.TNetworkAddress" json:"master_address,omitempty"` } -var TAddColumnsRequest_AddColumns_DEFAULT []*TColumnDef +func NewTGetMetaResult_() *TGetMetaResult_ { + return &TGetMetaResult_{} +} -func (p *TAddColumnsRequest) GetAddColumns() (v []*TColumnDef) { - if !p.IsSetAddColumns() { - return TAddColumnsRequest_AddColumns_DEFAULT - } - return p.AddColumns +func (p *TGetMetaResult_) InitDefault() { } -var TAddColumnsRequest_TableName_DEFAULT string +var TGetMetaResult__Status_DEFAULT *status.TStatus -func (p *TAddColumnsRequest) GetTableName() (v string) { - if !p.IsSetTableName() { - return TAddColumnsRequest_TableName_DEFAULT +func (p *TGetMetaResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetMetaResult__Status_DEFAULT } - return *p.TableName + return p.Status } -var TAddColumnsRequest_DbName_DEFAULT string +var TGetMetaResult__DbMeta_DEFAULT *TGetMetaDBMeta -func (p *TAddColumnsRequest) GetDbName() (v string) { - if !p.IsSetDbName() { - return TAddColumnsRequest_DbName_DEFAULT +func (p *TGetMetaResult_) GetDbMeta() (v *TGetMetaDBMeta) { + if !p.IsSetDbMeta() { + return TGetMetaResult__DbMeta_DEFAULT } - return *p.DbName + return p.DbMeta } -var TAddColumnsRequest_AllowTypeConflict_DEFAULT bool +var TGetMetaResult__MasterAddress_DEFAULT *types.TNetworkAddress -func (p *TAddColumnsRequest) GetAllowTypeConflict() (v bool) { - if !p.IsSetAllowTypeConflict() { - return TAddColumnsRequest_AllowTypeConflict_DEFAULT +func (p *TGetMetaResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TGetMetaResult__MasterAddress_DEFAULT } - return *p.AllowTypeConflict -} -func (p *TAddColumnsRequest) SetTableId(val *int64) { - p.TableId = val + return p.MasterAddress } -func (p *TAddColumnsRequest) SetAddColumns(val []*TColumnDef) { - p.AddColumns = val -} -func (p *TAddColumnsRequest) SetTableName(val *string) { - p.TableName = val -} -func (p *TAddColumnsRequest) SetDbName(val *string) { - p.DbName = val -} -func (p *TAddColumnsRequest) SetAllowTypeConflict(val *bool) { - p.AllowTypeConflict = val +func (p *TGetMetaResult_) SetStatus(val *status.TStatus) { + p.Status = val } - -var fieldIDToName_TAddColumnsRequest = map[int16]string{ - 1: "table_id", - 2: "addColumns", - 3: "table_name", - 4: "db_name", - 5: "allow_type_conflict", +func (p *TGetMetaResult_) SetDbMeta(val *TGetMetaDBMeta) { + p.DbMeta = val } - -func (p *TAddColumnsRequest) IsSetTableId() bool { - return p.TableId != nil +func (p *TGetMetaResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val } -func (p *TAddColumnsRequest) IsSetAddColumns() bool { - return p.AddColumns != nil +var fieldIDToName_TGetMetaResult_ = map[int16]string{ + 1: "status", + 2: "db_meta", + 3: "master_address", } -func (p *TAddColumnsRequest) IsSetTableName() bool { - return p.TableName != nil +func (p *TGetMetaResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TAddColumnsRequest) IsSetDbName() bool { - return p.DbName != nil +func (p *TGetMetaResult_) IsSetDbMeta() bool { + return p.DbMeta != nil } -func (p *TAddColumnsRequest) IsSetAllowTypeConflict() bool { - return p.AllowTypeConflict != nil +func (p *TGetMetaResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil } -func (p *TAddColumnsRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetMetaResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -40567,144 +71936,93 @@ func (p *TAddColumnsRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddColumnsRequest[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TAddColumnsRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TableId = &v - } - return nil -} - -func (p *TAddColumnsRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.AddColumns = make([]*TColumnDef, 0, size) - for i := 0; i < size; i++ { - _elem := NewTColumnDef() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.AddColumns = append(p.AddColumns, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError } return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetMetaResult_[fieldId])) } -func (p *TAddColumnsRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TGetMetaResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err - } else { - p.TableName = &v } + p.Status = _field return nil } - -func (p *TAddColumnsRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TGetMetaResult_) ReadField2(iprot thrift.TProtocol) error { + _field := NewTGetMetaDBMeta() + if err := _field.Read(iprot); err != nil { return err - } else { - p.DbName = &v } + p.DbMeta = _field return nil } - -func (p *TAddColumnsRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TGetMetaResult_) ReadField3(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err - } else { - p.AllowTypeConflict = &v } + p.MasterAddress = _field return nil } -func (p *TAddColumnsRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetMetaResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TAddColumnsRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetMetaResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -40720,15 +72038,6 @@ func (p *TAddColumnsRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -40747,17 +72056,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TAddColumnsRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("table_id", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TableId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TGetMetaResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -40766,20 +72073,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TAddColumnsRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetAddColumns() { - if err = oprot.WriteFieldBegin("addColumns", thrift.LIST, 2); err != nil { +func (p *TGetMetaResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbMeta() { + if err = oprot.WriteFieldBegin("db_meta", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.AddColumns)); err != nil { - return err - } - for _, v := range p.AddColumns { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.DbMeta.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -40793,12 +72092,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TAddColumnsRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTableName() { - if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 3); err != nil { +func (p *TGetMetaResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.TableName); err != nil { + if err := p.MasterAddress.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -40812,224 +72111,176 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TAddColumnsRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDbName() { - if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.DbName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TAddColumnsRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetAllowTypeConflict() { - if err = oprot.WriteFieldBegin("allow_type_conflict", thrift.BOOL, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.AllowTypeConflict); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TAddColumnsRequest) String() string { +func (p *TGetMetaResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TAddColumnsRequest(%+v)", *p) + return fmt.Sprintf("TGetMetaResult_(%+v)", *p) + } -func (p *TAddColumnsRequest) DeepEqual(ano *TAddColumnsRequest) bool { +func (p *TGetMetaResult_) DeepEqual(ano *TGetMetaResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TableId) { - return false - } - if !p.Field2DeepEqual(ano.AddColumns) { - return false - } - if !p.Field3DeepEqual(ano.TableName) { + if !p.Field1DeepEqual(ano.Status) { return false } - if !p.Field4DeepEqual(ano.DbName) { + if !p.Field2DeepEqual(ano.DbMeta) { return false } - if !p.Field5DeepEqual(ano.AllowTypeConflict) { + if !p.Field3DeepEqual(ano.MasterAddress) { return false } return true } -func (p *TAddColumnsRequest) Field1DeepEqual(src *int64) bool { +func (p *TGetMetaResult_) Field1DeepEqual(src *status.TStatus) bool { - if p.TableId == src { - return true - } else if p.TableId == nil || src == nil { - return false - } - if *p.TableId != *src { + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TAddColumnsRequest) Field2DeepEqual(src []*TColumnDef) bool { +func (p *TGetMetaResult_) Field2DeepEqual(src *TGetMetaDBMeta) bool { - if len(p.AddColumns) != len(src) { + if !p.DbMeta.DeepEqual(src) { return false } - for i, v := range p.AddColumns { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TAddColumnsRequest) Field3DeepEqual(src *string) bool { +func (p *TGetMetaResult_) Field3DeepEqual(src *types.TNetworkAddress) bool { - if p.TableName == src { - return true - } else if p.TableName == nil || src == nil { - return false - } - if strings.Compare(*p.TableName, *src) != 0 { + if !p.MasterAddress.DeepEqual(src) { return false } return true } -func (p *TAddColumnsRequest) Field4DeepEqual(src *string) bool { - if p.DbName == src { - return true - } else if p.DbName == nil || src == nil { - return false - } - if strings.Compare(*p.DbName, *src) != 0 { - return false - } - return true +type TGetBackendMetaRequest struct { + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + UserIp *string `thrift:"user_ip,4,optional" frugal:"4,optional,string" json:"user_ip,omitempty"` + Token *string `thrift:"token,5,optional" frugal:"5,optional,string" json:"token,omitempty"` + BackendId *int64 `thrift:"backend_id,6,optional" frugal:"6,optional,i64" json:"backend_id,omitempty"` } -func (p *TAddColumnsRequest) Field5DeepEqual(src *bool) bool { - if p.AllowTypeConflict == src { - return true - } else if p.AllowTypeConflict == nil || src == nil { - return false - } - if *p.AllowTypeConflict != *src { - return false - } - return true +func NewTGetBackendMetaRequest() *TGetBackendMetaRequest { + return &TGetBackendMetaRequest{} } -type TAddColumnsResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - TableId *int64 `thrift:"table_id,2,optional" frugal:"2,optional,i64" json:"table_id,omitempty"` - AllColumns []*descriptors.TColumn `thrift:"allColumns,3,optional" frugal:"3,optional,list" json:"allColumns,omitempty"` - SchemaVersion *int32 `thrift:"schema_version,4,optional" frugal:"4,optional,i32" json:"schema_version,omitempty"` +func (p *TGetBackendMetaRequest) InitDefault() { } -func NewTAddColumnsResult_() *TAddColumnsResult_ { - return &TAddColumnsResult_{} +var TGetBackendMetaRequest_Cluster_DEFAULT string + +func (p *TGetBackendMetaRequest) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGetBackendMetaRequest_Cluster_DEFAULT + } + return *p.Cluster } -func (p *TAddColumnsResult_) InitDefault() { - *p = TAddColumnsResult_{} +var TGetBackendMetaRequest_User_DEFAULT string + +func (p *TGetBackendMetaRequest) GetUser() (v string) { + if !p.IsSetUser() { + return TGetBackendMetaRequest_User_DEFAULT + } + return *p.User } -var TAddColumnsResult__Status_DEFAULT *status.TStatus +var TGetBackendMetaRequest_Passwd_DEFAULT string -func (p *TAddColumnsResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TAddColumnsResult__Status_DEFAULT +func (p *TGetBackendMetaRequest) GetPasswd() (v string) { + if !p.IsSetPasswd() { + return TGetBackendMetaRequest_Passwd_DEFAULT } - return p.Status + return *p.Passwd } -var TAddColumnsResult__TableId_DEFAULT int64 +var TGetBackendMetaRequest_UserIp_DEFAULT string -func (p *TAddColumnsResult_) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TAddColumnsResult__TableId_DEFAULT +func (p *TGetBackendMetaRequest) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TGetBackendMetaRequest_UserIp_DEFAULT } - return *p.TableId + return *p.UserIp } -var TAddColumnsResult__AllColumns_DEFAULT []*descriptors.TColumn +var TGetBackendMetaRequest_Token_DEFAULT string -func (p *TAddColumnsResult_) GetAllColumns() (v []*descriptors.TColumn) { - if !p.IsSetAllColumns() { - return TAddColumnsResult__AllColumns_DEFAULT +func (p *TGetBackendMetaRequest) GetToken() (v string) { + if !p.IsSetToken() { + return TGetBackendMetaRequest_Token_DEFAULT } - return p.AllColumns + return *p.Token } -var TAddColumnsResult__SchemaVersion_DEFAULT int32 +var TGetBackendMetaRequest_BackendId_DEFAULT int64 -func (p *TAddColumnsResult_) GetSchemaVersion() (v int32) { - if !p.IsSetSchemaVersion() { - return TAddColumnsResult__SchemaVersion_DEFAULT +func (p *TGetBackendMetaRequest) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TGetBackendMetaRequest_BackendId_DEFAULT } - return *p.SchemaVersion + return *p.BackendId } -func (p *TAddColumnsResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TGetBackendMetaRequest) SetCluster(val *string) { + p.Cluster = val } -func (p *TAddColumnsResult_) SetTableId(val *int64) { - p.TableId = val +func (p *TGetBackendMetaRequest) SetUser(val *string) { + p.User = val +} +func (p *TGetBackendMetaRequest) SetPasswd(val *string) { + p.Passwd = val +} +func (p *TGetBackendMetaRequest) SetUserIp(val *string) { + p.UserIp = val } -func (p *TAddColumnsResult_) SetAllColumns(val []*descriptors.TColumn) { - p.AllColumns = val +func (p *TGetBackendMetaRequest) SetToken(val *string) { + p.Token = val } -func (p *TAddColumnsResult_) SetSchemaVersion(val *int32) { - p.SchemaVersion = val +func (p *TGetBackendMetaRequest) SetBackendId(val *int64) { + p.BackendId = val } -var fieldIDToName_TAddColumnsResult_ = map[int16]string{ - 1: "status", - 2: "table_id", - 3: "allColumns", - 4: "schema_version", +var fieldIDToName_TGetBackendMetaRequest = map[int16]string{ + 1: "cluster", + 2: "user", + 3: "passwd", + 4: "user_ip", + 5: "token", + 6: "backend_id", } -func (p *TAddColumnsResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TGetBackendMetaRequest) IsSetCluster() bool { + return p.Cluster != nil } -func (p *TAddColumnsResult_) IsSetTableId() bool { - return p.TableId != nil +func (p *TGetBackendMetaRequest) IsSetUser() bool { + return p.User != nil +} + +func (p *TGetBackendMetaRequest) IsSetPasswd() bool { + return p.Passwd != nil } -func (p *TAddColumnsResult_) IsSetAllColumns() bool { - return p.AllColumns != nil +func (p *TGetBackendMetaRequest) IsSetUserIp() bool { + return p.UserIp != nil +} + +func (p *TGetBackendMetaRequest) IsSetToken() bool { + return p.Token != nil } -func (p *TAddColumnsResult_) IsSetSchemaVersion() bool { - return p.SchemaVersion != nil +func (p *TGetBackendMetaRequest) IsSetBackendId() bool { + return p.BackendId != nil } -func (p *TAddColumnsResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetBackendMetaRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -41049,51 +72300,58 @@ func (p *TAddColumnsResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -41108,7 +72366,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddColumnsResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBackendMetaRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -41118,55 +72376,76 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TAddColumnsResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TGetBackendMetaRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Cluster = _field return nil } +func (p *TGetBackendMetaRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TAddColumnsResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableId = &v + _field = &v } + p.User = _field return nil } +func (p *TGetBackendMetaRequest) ReadField3(iprot thrift.TProtocol) error { -func (p *TAddColumnsResult_) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } - p.AllColumns = make([]*descriptors.TColumn, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() - if err := _elem.Read(iprot); err != nil { - return err - } + p.Passwd = _field + return nil +} +func (p *TGetBackendMetaRequest) ReadField4(iprot thrift.TProtocol) error { - p.AllColumns = append(p.AllColumns, _elem) + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - if err := iprot.ReadListEnd(); err != nil { + p.UserIp = _field + return nil +} +func (p *TGetBackendMetaRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Token = _field return nil } +func (p *TGetBackendMetaRequest) ReadField6(iprot thrift.TProtocol) error { -func (p *TAddColumnsResult_) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.SchemaVersion = &v + _field = &v } + p.BackendId = _field return nil } -func (p *TAddColumnsResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetBackendMetaRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TAddColumnsResult"); err != nil { + if err = oprot.WriteStructBegin("TGetBackendMetaRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -41186,7 +72465,14 @@ func (p *TAddColumnsResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -41205,12 +72491,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TAddColumnsResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TGetBackendMetaRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteString(*p.Cluster); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41224,12 +72510,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TAddColumnsResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("table_id", thrift.I64, 2); err != nil { +func (p *TGetBackendMetaRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUser() { + if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TableId); err != nil { + if err := oprot.WriteString(*p.User); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41243,20 +72529,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TAddColumnsResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetAllColumns() { - if err = oprot.WriteFieldBegin("allColumns", thrift.LIST, 3); err != nil { +func (p *TGetBackendMetaRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPasswd() { + if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.AllColumns)); err != nil { - return err - } - for _, v := range p.AllColumns { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.Passwd); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41270,12 +72548,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TAddColumnsResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetSchemaVersion() { - if err = oprot.WriteFieldBegin("schema_version", thrift.I32, 4); err != nil { +func (p *TGetBackendMetaRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.SchemaVersion); err != nil { + if err := oprot.WriteString(*p.UserIp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41289,133 +72567,224 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TAddColumnsResult_) String() string { +func (p *TGetBackendMetaRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGetBackendMetaRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGetBackendMetaRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TAddColumnsResult_(%+v)", *p) + return fmt.Sprintf("TGetBackendMetaRequest(%+v)", *p) + } -func (p *TAddColumnsResult_) DeepEqual(ano *TAddColumnsResult_) bool { +func (p *TGetBackendMetaRequest) DeepEqual(ano *TGetBackendMetaRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Cluster) { return false } - if !p.Field2DeepEqual(ano.TableId) { + if !p.Field2DeepEqual(ano.User) { + return false + } + if !p.Field3DeepEqual(ano.Passwd) { return false } - if !p.Field3DeepEqual(ano.AllColumns) { + if !p.Field4DeepEqual(ano.UserIp) { + return false + } + if !p.Field5DeepEqual(ano.Token) { return false } - if !p.Field4DeepEqual(ano.SchemaVersion) { + if !p.Field6DeepEqual(ano.BackendId) { return false } return true } -func (p *TAddColumnsResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TGetBackendMetaRequest) Field1DeepEqual(src *string) bool { - if !p.Status.DeepEqual(src) { + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { return false } return true } -func (p *TAddColumnsResult_) Field2DeepEqual(src *int64) bool { +func (p *TGetBackendMetaRequest) Field2DeepEqual(src *string) bool { - if p.TableId == src { + if p.User == src { return true - } else if p.TableId == nil || src == nil { + } else if p.User == nil || src == nil { return false } - if *p.TableId != *src { + if strings.Compare(*p.User, *src) != 0 { return false } return true } -func (p *TAddColumnsResult_) Field3DeepEqual(src []*descriptors.TColumn) bool { +func (p *TGetBackendMetaRequest) Field3DeepEqual(src *string) bool { - if len(p.AllColumns) != len(src) { + if p.Passwd == src { + return true + } else if p.Passwd == nil || src == nil { return false } - for i, v := range p.AllColumns { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if strings.Compare(*p.Passwd, *src) != 0 { + return false } return true } -func (p *TAddColumnsResult_) Field4DeepEqual(src *int32) bool { +func (p *TGetBackendMetaRequest) Field4DeepEqual(src *string) bool { - if p.SchemaVersion == src { + if p.UserIp == src { return true - } else if p.SchemaVersion == nil || src == nil { + } else if p.UserIp == nil || src == nil { return false } - if *p.SchemaVersion != *src { + if strings.Compare(*p.UserIp, *src) != 0 { return false } return true } +func (p *TGetBackendMetaRequest) Field5DeepEqual(src *string) bool { -type TMySqlLoadAcquireTokenResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - Token *string `thrift:"token,2,optional" frugal:"2,optional,string" json:"token,omitempty"` + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true } +func (p *TGetBackendMetaRequest) Field6DeepEqual(src *int64) bool { -func NewTMySqlLoadAcquireTokenResult_() *TMySqlLoadAcquireTokenResult_ { - return &TMySqlLoadAcquireTokenResult_{} + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true } -func (p *TMySqlLoadAcquireTokenResult_) InitDefault() { - *p = TMySqlLoadAcquireTokenResult_{} +type TGetBackendMetaResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + Backends []*types.TBackend `thrift:"backends,2,optional" frugal:"2,optional,list" json:"backends,omitempty"` + MasterAddress *types.TNetworkAddress `thrift:"master_address,3,optional" frugal:"3,optional,types.TNetworkAddress" json:"master_address,omitempty"` } -var TMySqlLoadAcquireTokenResult__Status_DEFAULT *status.TStatus +func NewTGetBackendMetaResult_() *TGetBackendMetaResult_ { + return &TGetBackendMetaResult_{} +} -func (p *TMySqlLoadAcquireTokenResult_) GetStatus() (v *status.TStatus) { +func (p *TGetBackendMetaResult_) InitDefault() { +} + +var TGetBackendMetaResult__Status_DEFAULT *status.TStatus + +func (p *TGetBackendMetaResult_) GetStatus() (v *status.TStatus) { if !p.IsSetStatus() { - return TMySqlLoadAcquireTokenResult__Status_DEFAULT + return TGetBackendMetaResult__Status_DEFAULT } return p.Status } -var TMySqlLoadAcquireTokenResult__Token_DEFAULT string +var TGetBackendMetaResult__Backends_DEFAULT []*types.TBackend -func (p *TMySqlLoadAcquireTokenResult_) GetToken() (v string) { - if !p.IsSetToken() { - return TMySqlLoadAcquireTokenResult__Token_DEFAULT +func (p *TGetBackendMetaResult_) GetBackends() (v []*types.TBackend) { + if !p.IsSetBackends() { + return TGetBackendMetaResult__Backends_DEFAULT } - return *p.Token + return p.Backends } -func (p *TMySqlLoadAcquireTokenResult_) SetStatus(val *status.TStatus) { + +var TGetBackendMetaResult__MasterAddress_DEFAULT *types.TNetworkAddress + +func (p *TGetBackendMetaResult_) GetMasterAddress() (v *types.TNetworkAddress) { + if !p.IsSetMasterAddress() { + return TGetBackendMetaResult__MasterAddress_DEFAULT + } + return p.MasterAddress +} +func (p *TGetBackendMetaResult_) SetStatus(val *status.TStatus) { p.Status = val } -func (p *TMySqlLoadAcquireTokenResult_) SetToken(val *string) { - p.Token = val +func (p *TGetBackendMetaResult_) SetBackends(val []*types.TBackend) { + p.Backends = val +} +func (p *TGetBackendMetaResult_) SetMasterAddress(val *types.TNetworkAddress) { + p.MasterAddress = val } -var fieldIDToName_TMySqlLoadAcquireTokenResult_ = map[int16]string{ +var fieldIDToName_TGetBackendMetaResult_ = map[int16]string{ 1: "status", - 2: "token", + 2: "backends", + 3: "master_address", } -func (p *TMySqlLoadAcquireTokenResult_) IsSetStatus() bool { +func (p *TGetBackendMetaResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TMySqlLoadAcquireTokenResult_) IsSetToken() bool { - return p.Token != nil +func (p *TGetBackendMetaResult_) IsSetBackends() bool { + return p.Backends != nil } -func (p *TMySqlLoadAcquireTokenResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetBackendMetaResult_) IsSetMasterAddress() bool { + return p.MasterAddress != nil +} + +func (p *TGetBackendMetaResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -41436,27 +72805,31 @@ func (p *TMySqlLoadAcquireTokenResult_) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -41465,13 +72838,17 @@ func (p *TMySqlLoadAcquireTokenResult_) Read(iprot thrift.TProtocol) (err error) goto ReadStructEndError } + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMySqlLoadAcquireTokenResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBackendMetaResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -41479,28 +72856,53 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetBackendMetaResult_[fieldId])) } -func (p *TMySqlLoadAcquireTokenResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TGetBackendMetaResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } +func (p *TGetBackendMetaResult_) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TBackend, 0, size) + values := make([]types.TBackend, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TMySqlLoadAcquireTokenResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Backends = _field + return nil +} +func (p *TGetBackendMetaResult_) ReadField3(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Token = &v } + p.MasterAddress = _field return nil } -func (p *TMySqlLoadAcquireTokenResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetBackendMetaResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TMySqlLoadAcquireTokenResult"); err != nil { + if err = oprot.WriteStructBegin("TGetBackendMetaResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -41512,7 +72914,10 @@ func (p *TMySqlLoadAcquireTokenResult_) Write(oprot thrift.TProtocol) (err error fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -41531,12 +72936,37 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TMySqlLoadAcquireTokenResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TGetBackendMetaResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetBackendMetaResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBackends() { + if err = oprot.WriteFieldBegin("backends", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Backends)); err != nil { + return err + } + for _, v := range p.Backends { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41545,17 +72975,17 @@ func (p *TMySqlLoadAcquireTokenResult_) writeField1(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TMySqlLoadAcquireTokenResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { +func (p *TGetBackendMetaResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMasterAddress() { + if err = oprot.WriteFieldBegin("master_address", thrift.STRUCT, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Token); err != nil { + if err := p.MasterAddress.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41564,19 +72994,20 @@ func (p *TMySqlLoadAcquireTokenResult_) writeField2(oprot thrift.TProtocol) (err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TMySqlLoadAcquireTokenResult_) String() string { +func (p *TGetBackendMetaResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TMySqlLoadAcquireTokenResult_(%+v)", *p) + return fmt.Sprintf("TGetBackendMetaResult_(%+v)", *p) + } -func (p *TMySqlLoadAcquireTokenResult_) DeepEqual(ano *TMySqlLoadAcquireTokenResult_) bool { +func (p *TGetBackendMetaResult_) DeepEqual(ano *TGetBackendMetaResult_) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -41585,101 +73016,93 @@ func (p *TMySqlLoadAcquireTokenResult_) DeepEqual(ano *TMySqlLoadAcquireTokenRes if !p.Field1DeepEqual(ano.Status) { return false } - if !p.Field2DeepEqual(ano.Token) { + if !p.Field2DeepEqual(ano.Backends) { + return false + } + if !p.Field3DeepEqual(ano.MasterAddress) { return false } return true } -func (p *TMySqlLoadAcquireTokenResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TGetBackendMetaResult_) Field1DeepEqual(src *status.TStatus) bool { if !p.Status.DeepEqual(src) { return false } return true } -func (p *TMySqlLoadAcquireTokenResult_) Field2DeepEqual(src *string) bool { +func (p *TGetBackendMetaResult_) Field2DeepEqual(src []*types.TBackend) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { + if len(p.Backends) != len(src) { return false } - if strings.Compare(*p.Token, *src) != 0 { - return false + for i, v := range p.Backends { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } +func (p *TGetBackendMetaResult_) Field3DeepEqual(src *types.TNetworkAddress) bool { -type TTabletCooldownInfo struct { - TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"` - CooldownReplicaId *types.TReplicaId `thrift:"cooldown_replica_id,2,optional" frugal:"2,optional,i64" json:"cooldown_replica_id,omitempty"` - CooldownMetaId *types.TUniqueId `thrift:"cooldown_meta_id,3,optional" frugal:"3,optional,types.TUniqueId" json:"cooldown_meta_id,omitempty"` -} - -func NewTTabletCooldownInfo() *TTabletCooldownInfo { - return &TTabletCooldownInfo{} + if !p.MasterAddress.DeepEqual(src) { + return false + } + return true } -func (p *TTabletCooldownInfo) InitDefault() { - *p = TTabletCooldownInfo{} +type TColumnInfo struct { + ColumnName *string `thrift:"column_name,1,optional" frugal:"1,optional,string" json:"column_name,omitempty"` + ColumnId *int64 `thrift:"column_id,2,optional" frugal:"2,optional,i64" json:"column_id,omitempty"` } -var TTabletCooldownInfo_TabletId_DEFAULT types.TTabletId +func NewTColumnInfo() *TColumnInfo { + return &TColumnInfo{} +} -func (p *TTabletCooldownInfo) GetTabletId() (v types.TTabletId) { - if !p.IsSetTabletId() { - return TTabletCooldownInfo_TabletId_DEFAULT - } - return *p.TabletId +func (p *TColumnInfo) InitDefault() { } -var TTabletCooldownInfo_CooldownReplicaId_DEFAULT types.TReplicaId +var TColumnInfo_ColumnName_DEFAULT string -func (p *TTabletCooldownInfo) GetCooldownReplicaId() (v types.TReplicaId) { - if !p.IsSetCooldownReplicaId() { - return TTabletCooldownInfo_CooldownReplicaId_DEFAULT +func (p *TColumnInfo) GetColumnName() (v string) { + if !p.IsSetColumnName() { + return TColumnInfo_ColumnName_DEFAULT } - return *p.CooldownReplicaId + return *p.ColumnName } -var TTabletCooldownInfo_CooldownMetaId_DEFAULT *types.TUniqueId +var TColumnInfo_ColumnId_DEFAULT int64 -func (p *TTabletCooldownInfo) GetCooldownMetaId() (v *types.TUniqueId) { - if !p.IsSetCooldownMetaId() { - return TTabletCooldownInfo_CooldownMetaId_DEFAULT +func (p *TColumnInfo) GetColumnId() (v int64) { + if !p.IsSetColumnId() { + return TColumnInfo_ColumnId_DEFAULT } - return p.CooldownMetaId -} -func (p *TTabletCooldownInfo) SetTabletId(val *types.TTabletId) { - p.TabletId = val -} -func (p *TTabletCooldownInfo) SetCooldownReplicaId(val *types.TReplicaId) { - p.CooldownReplicaId = val + return *p.ColumnId } -func (p *TTabletCooldownInfo) SetCooldownMetaId(val *types.TUniqueId) { - p.CooldownMetaId = val +func (p *TColumnInfo) SetColumnName(val *string) { + p.ColumnName = val } - -var fieldIDToName_TTabletCooldownInfo = map[int16]string{ - 1: "tablet_id", - 2: "cooldown_replica_id", - 3: "cooldown_meta_id", +func (p *TColumnInfo) SetColumnId(val *int64) { + p.ColumnId = val } -func (p *TTabletCooldownInfo) IsSetTabletId() bool { - return p.TabletId != nil +var fieldIDToName_TColumnInfo = map[int16]string{ + 1: "column_name", + 2: "column_id", } -func (p *TTabletCooldownInfo) IsSetCooldownReplicaId() bool { - return p.CooldownReplicaId != nil +func (p *TColumnInfo) IsSetColumnName() bool { + return p.ColumnName != nil } -func (p *TTabletCooldownInfo) IsSetCooldownMetaId() bool { - return p.CooldownMetaId != nil +func (p *TColumnInfo) IsSetColumnId() bool { + return p.ColumnId != nil } -func (p *TTabletCooldownInfo) Read(iprot thrift.TProtocol) (err error) { +func (p *TColumnInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -41699,41 +73122,26 @@ func (p *TTabletCooldownInfo) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -41748,7 +73156,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletCooldownInfo[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnInfo[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -41758,35 +73166,32 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTabletCooldownInfo) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TColumnInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.TabletId = &v + _field = &v } + p.ColumnName = _field return nil } +func (p *TColumnInfo) ReadField2(iprot thrift.TProtocol) error { -func (p *TTabletCooldownInfo) ReadField2(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CooldownReplicaId = &v - } - return nil -} - -func (p *TTabletCooldownInfo) ReadField3(iprot thrift.TProtocol) error { - p.CooldownMetaId = types.NewTUniqueId() - if err := p.CooldownMetaId.Read(iprot); err != nil { - return err + _field = &v } + p.ColumnId = _field return nil } -func (p *TTabletCooldownInfo) Write(oprot thrift.TProtocol) (err error) { +func (p *TColumnInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletCooldownInfo"); err != nil { + if err = oprot.WriteStructBegin("TColumnInfo"); err != nil { goto WriteStructBeginError } if p != nil { @@ -41798,11 +73203,6 @@ func (p *TTabletCooldownInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -41821,12 +73221,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletCooldownInfo) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTabletId() { - if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil { +func (p *TColumnInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnName() { + if err = oprot.WriteFieldBegin("column_name", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TabletId); err != nil { + if err := oprot.WriteString(*p.ColumnName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41840,12 +73240,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletCooldownInfo) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetCooldownReplicaId() { - if err = oprot.WriteFieldBegin("cooldown_replica_id", thrift.I64, 2); err != nil { +func (p *TColumnInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnId() { + if err = oprot.WriteFieldBegin("column_id", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.CooldownReplicaId); err != nil { + if err := oprot.WriteI64(*p.ColumnId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -41859,115 +73259,104 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTabletCooldownInfo) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetCooldownMetaId() { - if err = oprot.WriteFieldBegin("cooldown_meta_id", thrift.STRUCT, 3); err != nil { - goto WriteFieldBeginError - } - if err := p.CooldownMetaId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TTabletCooldownInfo) String() string { +func (p *TColumnInfo) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletCooldownInfo(%+v)", *p) + return fmt.Sprintf("TColumnInfo(%+v)", *p) + } -func (p *TTabletCooldownInfo) DeepEqual(ano *TTabletCooldownInfo) bool { +func (p *TColumnInfo) DeepEqual(ano *TColumnInfo) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TabletId) { - return false - } - if !p.Field2DeepEqual(ano.CooldownReplicaId) { + if !p.Field1DeepEqual(ano.ColumnName) { return false } - if !p.Field3DeepEqual(ano.CooldownMetaId) { + if !p.Field2DeepEqual(ano.ColumnId) { return false } return true } -func (p *TTabletCooldownInfo) Field1DeepEqual(src *types.TTabletId) bool { +func (p *TColumnInfo) Field1DeepEqual(src *string) bool { - if p.TabletId == src { + if p.ColumnName == src { return true - } else if p.TabletId == nil || src == nil { + } else if p.ColumnName == nil || src == nil { return false } - if *p.TabletId != *src { + if strings.Compare(*p.ColumnName, *src) != 0 { return false } return true } -func (p *TTabletCooldownInfo) Field2DeepEqual(src *types.TReplicaId) bool { +func (p *TColumnInfo) Field2DeepEqual(src *int64) bool { - if p.CooldownReplicaId == src { + if p.ColumnId == src { return true - } else if p.CooldownReplicaId == nil || src == nil { + } else if p.ColumnId == nil || src == nil { return false } - if *p.CooldownReplicaId != *src { + if *p.ColumnId != *src { return false } return true } -func (p *TTabletCooldownInfo) Field3DeepEqual(src *types.TUniqueId) bool { - if !p.CooldownMetaId.DeepEqual(src) { - return false - } - return true +type TGetColumnInfoRequest struct { + DbId *int64 `thrift:"db_id,1,optional" frugal:"1,optional,i64" json:"db_id,omitempty"` + TableId *int64 `thrift:"table_id,2,optional" frugal:"2,optional,i64" json:"table_id,omitempty"` } -type TConfirmUnusedRemoteFilesRequest struct { - ConfirmList []*TTabletCooldownInfo `thrift:"confirm_list,1,optional" frugal:"1,optional,list" json:"confirm_list,omitempty"` +func NewTGetColumnInfoRequest() *TGetColumnInfoRequest { + return &TGetColumnInfoRequest{} } -func NewTConfirmUnusedRemoteFilesRequest() *TConfirmUnusedRemoteFilesRequest { - return &TConfirmUnusedRemoteFilesRequest{} +func (p *TGetColumnInfoRequest) InitDefault() { } -func (p *TConfirmUnusedRemoteFilesRequest) InitDefault() { - *p = TConfirmUnusedRemoteFilesRequest{} +var TGetColumnInfoRequest_DbId_DEFAULT int64 + +func (p *TGetColumnInfoRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TGetColumnInfoRequest_DbId_DEFAULT + } + return *p.DbId } -var TConfirmUnusedRemoteFilesRequest_ConfirmList_DEFAULT []*TTabletCooldownInfo +var TGetColumnInfoRequest_TableId_DEFAULT int64 -func (p *TConfirmUnusedRemoteFilesRequest) GetConfirmList() (v []*TTabletCooldownInfo) { - if !p.IsSetConfirmList() { - return TConfirmUnusedRemoteFilesRequest_ConfirmList_DEFAULT +func (p *TGetColumnInfoRequest) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TGetColumnInfoRequest_TableId_DEFAULT } - return p.ConfirmList + return *p.TableId } -func (p *TConfirmUnusedRemoteFilesRequest) SetConfirmList(val []*TTabletCooldownInfo) { - p.ConfirmList = val +func (p *TGetColumnInfoRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TGetColumnInfoRequest) SetTableId(val *int64) { + p.TableId = val } -var fieldIDToName_TConfirmUnusedRemoteFilesRequest = map[int16]string{ - 1: "confirm_list", +var fieldIDToName_TGetColumnInfoRequest = map[int16]string{ + 1: "db_id", + 2: "table_id", } -func (p *TConfirmUnusedRemoteFilesRequest) IsSetConfirmList() bool { - return p.ConfirmList != nil +func (p *TGetColumnInfoRequest) IsSetDbId() bool { + return p.DbId != nil } -func (p *TConfirmUnusedRemoteFilesRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetColumnInfoRequest) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TGetColumnInfoRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -41987,21 +73376,26 @@ func (p *TConfirmUnusedRemoteFilesRequest) Read(iprot thrift.TProtocol) (err err switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -42016,7 +73410,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetColumnInfoRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -42026,29 +73420,32 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesRequest) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { +func (p *TGetColumnInfoRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } - p.ConfirmList = make([]*TTabletCooldownInfo, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTabletCooldownInfo() - if err := _elem.Read(iprot); err != nil { - return err - } + p.DbId = _field + return nil +} +func (p *TGetColumnInfoRequest) ReadField2(iprot thrift.TProtocol) error { - p.ConfirmList = append(p.ConfirmList, _elem) - } - if err := iprot.ReadListEnd(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.TableId = _field return nil } -func (p *TConfirmUnusedRemoteFilesRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetColumnInfoRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TConfirmUnusedRemoteFilesRequest"); err != nil { + if err = oprot.WriteStructBegin("TGetColumnInfoRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -42056,7 +73453,10 @@ func (p *TConfirmUnusedRemoteFilesRequest) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -42075,20 +73475,31 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetConfirmList() { - if err = oprot.WriteFieldBegin("confirm_list", thrift.LIST, 1); err != nil { +func (p *TGetColumnInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ConfirmList)); err != nil { + if err := oprot.WriteI64(*p.DbId); err != nil { return err } - for _, v := range p.ConfirmList { - if err := v.Write(oprot); err != nil { - return err - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err := oprot.WriteListEnd(); err != nil { + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetColumnInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("table_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -42097,77 +73508,109 @@ func (p *TConfirmUnusedRemoteFilesRequest) writeField1(oprot thrift.TProtocol) ( } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesRequest) String() string { +func (p *TGetColumnInfoRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TConfirmUnusedRemoteFilesRequest(%+v)", *p) + return fmt.Sprintf("TGetColumnInfoRequest(%+v)", *p) + } -func (p *TConfirmUnusedRemoteFilesRequest) DeepEqual(ano *TConfirmUnusedRemoteFilesRequest) bool { +func (p *TGetColumnInfoRequest) DeepEqual(ano *TGetColumnInfoRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ConfirmList) { + if !p.Field1DeepEqual(ano.DbId) { + return false + } + if !p.Field2DeepEqual(ano.TableId) { return false } return true } -func (p *TConfirmUnusedRemoteFilesRequest) Field1DeepEqual(src []*TTabletCooldownInfo) bool { +func (p *TGetColumnInfoRequest) Field1DeepEqual(src *int64) bool { - if len(p.ConfirmList) != len(src) { + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { return false } - for i, v := range p.ConfirmList { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if *p.DbId != *src { + return false } return true } +func (p *TGetColumnInfoRequest) Field2DeepEqual(src *int64) bool { -type TConfirmUnusedRemoteFilesResult_ struct { - ConfirmedTablets []types.TTabletId `thrift:"confirmed_tablets,1,optional" frugal:"1,optional,list" json:"confirmed_tablets,omitempty"` + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true } -func NewTConfirmUnusedRemoteFilesResult_() *TConfirmUnusedRemoteFilesResult_ { - return &TConfirmUnusedRemoteFilesResult_{} +type TGetColumnInfoResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + Columns []*TColumnInfo `thrift:"columns,2,optional" frugal:"2,optional,list" json:"columns,omitempty"` } -func (p *TConfirmUnusedRemoteFilesResult_) InitDefault() { - *p = TConfirmUnusedRemoteFilesResult_{} +func NewTGetColumnInfoResult_() *TGetColumnInfoResult_ { + return &TGetColumnInfoResult_{} } -var TConfirmUnusedRemoteFilesResult__ConfirmedTablets_DEFAULT []types.TTabletId +func (p *TGetColumnInfoResult_) InitDefault() { +} -func (p *TConfirmUnusedRemoteFilesResult_) GetConfirmedTablets() (v []types.TTabletId) { - if !p.IsSetConfirmedTablets() { - return TConfirmUnusedRemoteFilesResult__ConfirmedTablets_DEFAULT +var TGetColumnInfoResult__Status_DEFAULT *status.TStatus + +func (p *TGetColumnInfoResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TGetColumnInfoResult__Status_DEFAULT } - return p.ConfirmedTablets + return p.Status } -func (p *TConfirmUnusedRemoteFilesResult_) SetConfirmedTablets(val []types.TTabletId) { - p.ConfirmedTablets = val + +var TGetColumnInfoResult__Columns_DEFAULT []*TColumnInfo + +func (p *TGetColumnInfoResult_) GetColumns() (v []*TColumnInfo) { + if !p.IsSetColumns() { + return TGetColumnInfoResult__Columns_DEFAULT + } + return p.Columns +} +func (p *TGetColumnInfoResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *TGetColumnInfoResult_) SetColumns(val []*TColumnInfo) { + p.Columns = val } -var fieldIDToName_TConfirmUnusedRemoteFilesResult_ = map[int16]string{ - 1: "confirmed_tablets", +var fieldIDToName_TGetColumnInfoResult_ = map[int16]string{ + 1: "status", + 2: "columns", } -func (p *TConfirmUnusedRemoteFilesResult_) IsSetConfirmedTablets() bool { - return p.ConfirmedTablets != nil +func (p *TGetColumnInfoResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TConfirmUnusedRemoteFilesResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TGetColumnInfoResult_) IsSetColumns() bool { + return p.Columns != nil +} + +func (p *TGetColumnInfoResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -42187,21 +73630,26 @@ func (p *TConfirmUnusedRemoteFilesResult_) Read(iprot thrift.TProtocol) (err err switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -42216,7 +73664,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetColumnInfoResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -42226,31 +73674,41 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesResult_) ReadField1(iprot thrift.TProtocol) error { +func (p *TGetColumnInfoResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *TGetColumnInfoResult_) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ConfirmedTablets = make([]types.TTabletId, 0, size) + _field := make([]*TColumnInfo, 0, size) + values := make([]TColumnInfo, size) for i := 0; i < size; i++ { - var _elem types.TTabletId - if v, err := iprot.ReadI64(); err != nil { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err - } else { - _elem = v } - p.ConfirmedTablets = append(p.ConfirmedTablets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } -func (p *TConfirmUnusedRemoteFilesResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TGetColumnInfoResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TConfirmUnusedRemoteFilesResult"); err != nil { + if err = oprot.WriteStructBegin("TGetColumnInfoResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -42258,7 +73716,10 @@ func (p *TConfirmUnusedRemoteFilesResult_) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -42277,16 +73738,35 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetConfirmedTablets() { - if err = oprot.WriteFieldBegin("confirmed_tablets", thrift.LIST, 1); err != nil { +func (p *TGetColumnInfoResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.I64, len(p.ConfirmedTablets)); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } - for _, v := range p.ConfirmedTablets { - if err := oprot.WriteI64(v); err != nil { + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGetColumnInfoResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetColumns() { + if err = oprot.WriteFieldBegin("columns", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { + return err + } + for _, v := range p.Columns { + if err := v.Write(oprot); err != nil { return err } } @@ -42299,162 +73779,108 @@ func (p *TConfirmUnusedRemoteFilesResult_) writeField1(oprot thrift.TProtocol) ( } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesResult_) String() string { +func (p *TGetColumnInfoResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TConfirmUnusedRemoteFilesResult_(%+v)", *p) + return fmt.Sprintf("TGetColumnInfoResult_(%+v)", *p) + } -func (p *TConfirmUnusedRemoteFilesResult_) DeepEqual(ano *TConfirmUnusedRemoteFilesResult_) bool { +func (p *TGetColumnInfoResult_) DeepEqual(ano *TGetColumnInfoResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ConfirmedTablets) { + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.Columns) { return false } return true } -func (p *TConfirmUnusedRemoteFilesResult_) Field1DeepEqual(src []types.TTabletId) bool { +func (p *TGetColumnInfoResult_) Field1DeepEqual(src *status.TStatus) bool { - if len(p.ConfirmedTablets) != len(src) { + if !p.Status.DeepEqual(src) { return false } - for i, v := range p.ConfirmedTablets { + return true +} +func (p *TGetColumnInfoResult_) Field2DeepEqual(src []*TColumnInfo) bool { + + if len(p.Columns) != len(src) { + return false + } + for i, v := range p.Columns { _src := src[i] - if v != _src { + if !v.DeepEqual(_src) { return false } } return true } -type TPrivilegeCtrl struct { - PrivHier TPrivilegeHier `thrift:"priv_hier,1,required" frugal:"1,required,TPrivilegeHier" json:"priv_hier"` - Ctl *string `thrift:"ctl,2,optional" frugal:"2,optional,string" json:"ctl,omitempty"` - Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` - Tbl *string `thrift:"tbl,4,optional" frugal:"4,optional,string" json:"tbl,omitempty"` - Cols []string `thrift:"cols,5,optional" frugal:"5,optional,set" json:"cols,omitempty"` - Res *string `thrift:"res,6,optional" frugal:"6,optional,string" json:"res,omitempty"` -} - -func NewTPrivilegeCtrl() *TPrivilegeCtrl { - return &TPrivilegeCtrl{} -} - -func (p *TPrivilegeCtrl) InitDefault() { - *p = TPrivilegeCtrl{} -} - -func (p *TPrivilegeCtrl) GetPrivHier() (v TPrivilegeHier) { - return p.PrivHier -} - -var TPrivilegeCtrl_Ctl_DEFAULT string - -func (p *TPrivilegeCtrl) GetCtl() (v string) { - if !p.IsSetCtl() { - return TPrivilegeCtrl_Ctl_DEFAULT - } - return *p.Ctl +type TShowProcessListRequest struct { + ShowFullSql *bool `thrift:"show_full_sql,1,optional" frugal:"1,optional,bool" json:"show_full_sql,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,2,optional" frugal:"2,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` } -var TPrivilegeCtrl_Db_DEFAULT string - -func (p *TPrivilegeCtrl) GetDb() (v string) { - if !p.IsSetDb() { - return TPrivilegeCtrl_Db_DEFAULT - } - return *p.Db +func NewTShowProcessListRequest() *TShowProcessListRequest { + return &TShowProcessListRequest{} } -var TPrivilegeCtrl_Tbl_DEFAULT string - -func (p *TPrivilegeCtrl) GetTbl() (v string) { - if !p.IsSetTbl() { - return TPrivilegeCtrl_Tbl_DEFAULT - } - return *p.Tbl +func (p *TShowProcessListRequest) InitDefault() { } -var TPrivilegeCtrl_Cols_DEFAULT []string +var TShowProcessListRequest_ShowFullSql_DEFAULT bool -func (p *TPrivilegeCtrl) GetCols() (v []string) { - if !p.IsSetCols() { - return TPrivilegeCtrl_Cols_DEFAULT +func (p *TShowProcessListRequest) GetShowFullSql() (v bool) { + if !p.IsSetShowFullSql() { + return TShowProcessListRequest_ShowFullSql_DEFAULT } - return p.Cols + return *p.ShowFullSql } -var TPrivilegeCtrl_Res_DEFAULT string +var TShowProcessListRequest_CurrentUserIdent_DEFAULT *types.TUserIdentity -func (p *TPrivilegeCtrl) GetRes() (v string) { - if !p.IsSetRes() { - return TPrivilegeCtrl_Res_DEFAULT +func (p *TShowProcessListRequest) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TShowProcessListRequest_CurrentUserIdent_DEFAULT } - return *p.Res -} -func (p *TPrivilegeCtrl) SetPrivHier(val TPrivilegeHier) { - p.PrivHier = val -} -func (p *TPrivilegeCtrl) SetCtl(val *string) { - p.Ctl = val -} -func (p *TPrivilegeCtrl) SetDb(val *string) { - p.Db = val -} -func (p *TPrivilegeCtrl) SetTbl(val *string) { - p.Tbl = val -} -func (p *TPrivilegeCtrl) SetCols(val []string) { - p.Cols = val -} -func (p *TPrivilegeCtrl) SetRes(val *string) { - p.Res = val -} - -var fieldIDToName_TPrivilegeCtrl = map[int16]string{ - 1: "priv_hier", - 2: "ctl", - 3: "db", - 4: "tbl", - 5: "cols", - 6: "res", + return p.CurrentUserIdent } - -func (p *TPrivilegeCtrl) IsSetCtl() bool { - return p.Ctl != nil +func (p *TShowProcessListRequest) SetShowFullSql(val *bool) { + p.ShowFullSql = val } - -func (p *TPrivilegeCtrl) IsSetDb() bool { - return p.Db != nil +func (p *TShowProcessListRequest) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val } -func (p *TPrivilegeCtrl) IsSetTbl() bool { - return p.Tbl != nil +var fieldIDToName_TShowProcessListRequest = map[int16]string{ + 1: "show_full_sql", + 2: "current_user_ident", } -func (p *TPrivilegeCtrl) IsSetCols() bool { - return p.Cols != nil +func (p *TShowProcessListRequest) IsSetShowFullSql() bool { + return p.ShowFullSql != nil } -func (p *TPrivilegeCtrl) IsSetRes() bool { - return p.Res != nil +func (p *TShowProcessListRequest) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil } -func (p *TPrivilegeCtrl) Read(iprot thrift.TProtocol) (err error) { +func (p *TShowProcessListRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetPrivHier bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -42471,72 +73897,26 @@ func (p *TPrivilegeCtrl) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetPrivHier = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.SET { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -42545,17 +73925,13 @@ func (p *TPrivilegeCtrl) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetPrivHier { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPrivilegeCtrl[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowProcessListRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -42563,80 +73939,31 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPrivilegeCtrl[fieldId])) -} - -func (p *TPrivilegeCtrl) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.PrivHier = TPrivilegeHier(v) - } - return nil -} - -func (p *TPrivilegeCtrl) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Ctl = &v - } - return nil } -func (p *TPrivilegeCtrl) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v - } - return nil -} +func (p *TShowProcessListRequest) ReadField1(iprot thrift.TProtocol) error { -func (p *TPrivilegeCtrl) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Tbl = &v - } - return nil -} - -func (p *TPrivilegeCtrl) ReadField5(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadSetBegin() - if err != nil { - return err - } - p.Cols = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } - - p.Cols = append(p.Cols, _elem) - } - if err := iprot.ReadSetEnd(); err != nil { - return err + _field = &v } + p.ShowFullSql = _field return nil } - -func (p *TPrivilegeCtrl) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TShowProcessListRequest) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Res = &v } + p.CurrentUserIdent = _field return nil } -func (p *TPrivilegeCtrl) Write(oprot thrift.TProtocol) (err error) { +func (p *TShowProcessListRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TPrivilegeCtrl"); err != nil { + if err = oprot.WriteStructBegin("TShowProcessListRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -42648,23 +73975,6 @@ func (p *TPrivilegeCtrl) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -42683,29 +73993,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPrivilegeCtrl) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("priv_hier", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.PrivHier)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TPrivilegeCtrl) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetCtl() { - if err = oprot.WriteFieldBegin("ctl", thrift.STRING, 2); err != nil { +func (p *TShowProcessListRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetShowFullSql() { + if err = oprot.WriteFieldBegin("show_full_sql", thrift.BOOL, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Ctl); err != nil { + if err := oprot.WriteBool(*p.ShowFullSql); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -42714,17 +74007,17 @@ func (p *TPrivilegeCtrl) writeField2(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPrivilegeCtrl) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 3); err != nil { +func (p *TShowProcessListRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Db); err != nil { + if err := p.CurrentUserIdent.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -42733,75 +74026,227 @@ func (p *TPrivilegeCtrl) writeField3(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPrivilegeCtrl) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTbl() { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError +func (p *TShowProcessListRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TShowProcessListRequest(%+v)", *p) + +} + +func (p *TShowProcessListRequest) DeepEqual(ano *TShowProcessListRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ShowFullSql) { + return false + } + if !p.Field2DeepEqual(ano.CurrentUserIdent) { + return false + } + return true +} + +func (p *TShowProcessListRequest) Field1DeepEqual(src *bool) bool { + + if p.ShowFullSql == src { + return true + } else if p.ShowFullSql == nil || src == nil { + return false + } + if *p.ShowFullSql != *src { + return false + } + return true +} +func (p *TShowProcessListRequest) Field2DeepEqual(src *types.TUserIdentity) bool { + + if !p.CurrentUserIdent.DeepEqual(src) { + return false + } + return true +} + +type TShowProcessListResult_ struct { + ProcessList [][]string `thrift:"process_list,1,optional" frugal:"1,optional,list>" json:"process_list,omitempty"` +} + +func NewTShowProcessListResult_() *TShowProcessListResult_ { + return &TShowProcessListResult_{} +} + +func (p *TShowProcessListResult_) InitDefault() { +} + +var TShowProcessListResult__ProcessList_DEFAULT [][]string + +func (p *TShowProcessListResult_) GetProcessList() (v [][]string) { + if !p.IsSetProcessList() { + return TShowProcessListResult__ProcessList_DEFAULT + } + return p.ProcessList +} +func (p *TShowProcessListResult_) SetProcessList(val [][]string) { + p.ProcessList = val +} + +var fieldIDToName_TShowProcessListResult_ = map[int16]string{ + 1: "process_list", +} + +func (p *TShowProcessListResult_) IsSetProcessList() bool { + return p.ProcessList != nil +} + +func (p *TShowProcessListResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteString(*p.Tbl); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowProcessListResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPrivilegeCtrl) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetCols() { - if err = oprot.WriteFieldBegin("cols", thrift.SET, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteSetBegin(thrift.STRING, len(p.Cols)); err != nil { +func (p *TShowProcessListResult_) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([][]string, 0, size) + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } - for i := 0; i < len(p.Cols); i++ { - for j := i + 1; j < len(p.Cols); j++ { - if func(tgt, src string) bool { - if strings.Compare(tgt, src) != 0 { - return false - } - return true - }(p.Cols[i], p.Cols[j]) { - return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.Cols[i])) - } - } - } - for _, v := range p.Cols { - if err := oprot.WriteString(v); err != nil { + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem1 string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _elem1 = v } + + _elem = append(_elem, _elem1) } - if err := oprot.WriteSetEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ProcessList = _field + return nil +} + +func (p *TShowProcessListResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TShowProcessListResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPrivilegeCtrl) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetRes() { - if err = oprot.WriteFieldBegin("res", thrift.STRING, 6); err != nil { +func (p *TShowProcessListResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetProcessList() { + if err = oprot.WriteFieldBegin("process_list", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Res); err != nil { + if err := oprot.WriteListBegin(thrift.LIST, len(p.ProcessList)); err != nil { + return err + } + for _, v := range p.ProcessList { + if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { + return err + } + for _, v := range v { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -42810,242 +74255,178 @@ func (p *TPrivilegeCtrl) writeField6(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPrivilegeCtrl) String() string { +func (p *TShowProcessListResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TPrivilegeCtrl(%+v)", *p) + return fmt.Sprintf("TShowProcessListResult_(%+v)", *p) + } -func (p *TPrivilegeCtrl) DeepEqual(ano *TPrivilegeCtrl) bool { +func (p *TShowProcessListResult_) DeepEqual(ano *TShowProcessListResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.PrivHier) { - return false - } - if !p.Field2DeepEqual(ano.Ctl) { - return false - } - if !p.Field3DeepEqual(ano.Db) { - return false - } - if !p.Field4DeepEqual(ano.Tbl) { - return false - } - if !p.Field5DeepEqual(ano.Cols) { - return false - } - if !p.Field6DeepEqual(ano.Res) { - return false - } - return true -} - -func (p *TPrivilegeCtrl) Field1DeepEqual(src TPrivilegeHier) bool { - - if p.PrivHier != src { - return false - } - return true -} -func (p *TPrivilegeCtrl) Field2DeepEqual(src *string) bool { - - if p.Ctl == src { - return true - } else if p.Ctl == nil || src == nil { - return false - } - if strings.Compare(*p.Ctl, *src) != 0 { - return false - } - return true -} -func (p *TPrivilegeCtrl) Field3DeepEqual(src *string) bool { - - if p.Db == src { - return true - } else if p.Db == nil || src == nil { - return false - } - if strings.Compare(*p.Db, *src) != 0 { + if !p.Field1DeepEqual(ano.ProcessList) { return false } return true } -func (p *TPrivilegeCtrl) Field4DeepEqual(src *string) bool { - if p.Tbl == src { - return true - } else if p.Tbl == nil || src == nil { - return false - } - if strings.Compare(*p.Tbl, *src) != 0 { - return false - } - return true -} -func (p *TPrivilegeCtrl) Field5DeepEqual(src []string) bool { +func (p *TShowProcessListResult_) Field1DeepEqual(src [][]string) bool { - if len(p.Cols) != len(src) { + if len(p.ProcessList) != len(src) { return false } - for i, v := range p.Cols { + for i, v := range p.ProcessList { _src := src[i] - if strings.Compare(v, _src) != 0 { + if len(v) != len(_src) { return false } + for i, v := range v { + _src1 := _src[i] + if strings.Compare(v, _src1) != 0 { + return false + } + } } return true } -func (p *TPrivilegeCtrl) Field6DeepEqual(src *string) bool { - - if p.Res == src { - return true - } else if p.Res == nil || src == nil { - return false - } - if strings.Compare(*p.Res, *src) != 0 { - return false - } - return true -} - -type TCheckAuthRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User string `thrift:"user,2,required" frugal:"2,required,string" json:"user"` - Passwd string `thrift:"passwd,3,required" frugal:"3,required,string" json:"passwd"` - UserIp *string `thrift:"user_ip,4,optional" frugal:"4,optional,string" json:"user_ip,omitempty"` - PrivCtrl *TPrivilegeCtrl `thrift:"priv_ctrl,5,optional" frugal:"5,optional,TPrivilegeCtrl" json:"priv_ctrl,omitempty"` - PrivType *TPrivilegeType `thrift:"priv_type,6,optional" frugal:"6,optional,TPrivilegeType" json:"priv_type,omitempty"` - ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,7,optional" frugal:"7,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` -} -func NewTCheckAuthRequest() *TCheckAuthRequest { - return &TCheckAuthRequest{} +type TShowUserRequest struct { } -func (p *TCheckAuthRequest) InitDefault() { - *p = TCheckAuthRequest{} +func NewTShowUserRequest() *TShowUserRequest { + return &TShowUserRequest{} } -var TCheckAuthRequest_Cluster_DEFAULT string - -func (p *TCheckAuthRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TCheckAuthRequest_Cluster_DEFAULT - } - return *p.Cluster +func (p *TShowUserRequest) InitDefault() { } -func (p *TCheckAuthRequest) GetUser() (v string) { - return p.User -} +var fieldIDToName_TShowUserRequest = map[int16]string{} -func (p *TCheckAuthRequest) GetPasswd() (v string) { - return p.Passwd -} +func (p *TShowUserRequest) Read(iprot thrift.TProtocol) (err error) { -var TCheckAuthRequest_UserIp_DEFAULT string + var fieldTypeId thrift.TType + var fieldId int16 -func (p *TCheckAuthRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TCheckAuthRequest_UserIp_DEFAULT + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return *p.UserIp -} - -var TCheckAuthRequest_PrivCtrl_DEFAULT *TPrivilegeCtrl -func (p *TCheckAuthRequest) GetPrivCtrl() (v *TPrivilegeCtrl) { - if !p.IsSetPrivCtrl() { - return TCheckAuthRequest_PrivCtrl_DEFAULT + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return p.PrivCtrl -} -var TCheckAuthRequest_PrivType_DEFAULT TPrivilegeType + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) -func (p *TCheckAuthRequest) GetPrivType() (v TPrivilegeType) { - if !p.IsSetPrivType() { - return TCheckAuthRequest_PrivType_DEFAULT - } - return *p.PrivType +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -var TCheckAuthRequest_ThriftRpcTimeoutMs_DEFAULT int64 - -func (p *TCheckAuthRequest) GetThriftRpcTimeoutMs() (v int64) { - if !p.IsSetThriftRpcTimeoutMs() { - return TCheckAuthRequest_ThriftRpcTimeoutMs_DEFAULT +func (p *TShowUserRequest) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TShowUserRequest"); err != nil { + goto WriteStructBeginError } - return *p.ThriftRpcTimeoutMs -} -func (p *TCheckAuthRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TCheckAuthRequest) SetUser(val string) { - p.User = val -} -func (p *TCheckAuthRequest) SetPasswd(val string) { - p.Passwd = val -} -func (p *TCheckAuthRequest) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TCheckAuthRequest) SetPrivCtrl(val *TPrivilegeCtrl) { - p.PrivCtrl = val + if p != nil { + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCheckAuthRequest) SetPrivType(val *TPrivilegeType) { - p.PrivType = val + +func (p *TShowUserRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TShowUserRequest(%+v)", *p) + } -func (p *TCheckAuthRequest) SetThriftRpcTimeoutMs(val *int64) { - p.ThriftRpcTimeoutMs = val + +func (p *TShowUserRequest) DeepEqual(ano *TShowUserRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + return true } -var fieldIDToName_TCheckAuthRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "user_ip", - 5: "priv_ctrl", - 6: "priv_type", - 7: "thrift_rpc_timeout_ms", +type TShowUserResult_ struct { + UserinfoList [][]string `thrift:"userinfo_list,1,optional" frugal:"1,optional,list>" json:"userinfo_list,omitempty"` } -func (p *TCheckAuthRequest) IsSetCluster() bool { - return p.Cluster != nil +func NewTShowUserResult_() *TShowUserResult_ { + return &TShowUserResult_{} } -func (p *TCheckAuthRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TShowUserResult_) InitDefault() { } -func (p *TCheckAuthRequest) IsSetPrivCtrl() bool { - return p.PrivCtrl != nil +var TShowUserResult__UserinfoList_DEFAULT [][]string + +func (p *TShowUserResult_) GetUserinfoList() (v [][]string) { + if !p.IsSetUserinfoList() { + return TShowUserResult__UserinfoList_DEFAULT + } + return p.UserinfoList +} +func (p *TShowUserResult_) SetUserinfoList(val [][]string) { + p.UserinfoList = val } -func (p *TCheckAuthRequest) IsSetPrivType() bool { - return p.PrivType != nil +var fieldIDToName_TShowUserResult_ = map[int16]string{ + 1: "userinfo_list", } -func (p *TCheckAuthRequest) IsSetThriftRpcTimeoutMs() bool { - return p.ThriftRpcTimeoutMs != nil +func (p *TShowUserResult_) IsSetUserinfoList() bool { + return p.UserinfoList != nil } -func (p *TCheckAuthRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TShowUserResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -43062,83 +74443,18 @@ func (p *TCheckAuthRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I32 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -43147,22 +74463,13 @@ func (p *TCheckAuthRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowUserResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -43170,76 +74477,47 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthRequest[fieldId])) -} - -func (p *TCheckAuthRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil } -func (p *TCheckAuthRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = v - } - return nil -} - -func (p *TCheckAuthRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TShowUserResult_) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.Passwd = v } - return nil -} + _field := make([][]string, 0, size) + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { -func (p *TCheckAuthRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v - } - return nil -} + var _elem1 string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem1 = v + } -func (p *TCheckAuthRequest) ReadField5(iprot thrift.TProtocol) error { - p.PrivCtrl = NewTPrivilegeCtrl() - if err := p.PrivCtrl.Read(iprot); err != nil { - return err - } - return nil -} + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } -func (p *TCheckAuthRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := TPrivilegeType(v) - p.PrivType = &tmp + _field = append(_field, _elem) } - return nil -} - -func (p *TCheckAuthRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.ThriftRpcTimeoutMs = &v } + p.UserinfoList = _field return nil } -func (p *TCheckAuthRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TShowUserResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCheckAuthRequest"); err != nil { + if err = oprot.WriteStructBegin("TShowUserResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -43247,31 +74525,6 @@ func (p *TCheckAuthRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -43290,122 +74543,28 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCheckAuthRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Cluster); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TCheckAuthRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TCheckAuthRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TCheckAuthRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TCheckAuthRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetPrivCtrl() { - if err = oprot.WriteFieldBegin("priv_ctrl", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.PrivCtrl.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TCheckAuthRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetPrivType() { - if err = oprot.WriteFieldBegin("priv_type", thrift.I32, 6); err != nil { +func (p *TShowUserResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetUserinfoList() { + if err = oprot.WriteFieldBegin("userinfo_list", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.PrivType)); err != nil { + if err := oprot.WriteListBegin(thrift.LIST, len(p.UserinfoList)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TCheckAuthRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetThriftRpcTimeoutMs() { - if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 7); err != nil { - goto WriteFieldBeginError + for _, v := range p.UserinfoList { + if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { + return err + } + for _, v := range v { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } } - if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -43414,155 +74573,140 @@ func (p *TCheckAuthRequest) writeField7(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCheckAuthRequest) String() string { +func (p *TShowUserResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TCheckAuthRequest(%+v)", *p) + return fmt.Sprintf("TShowUserResult_(%+v)", *p) + } -func (p *TCheckAuthRequest) DeepEqual(ano *TCheckAuthRequest) bool { +func (p *TShowUserResult_) DeepEqual(ano *TShowUserResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.UserIp) { - return false - } - if !p.Field5DeepEqual(ano.PrivCtrl) { - return false - } - if !p.Field6DeepEqual(ano.PrivType) { - return false - } - if !p.Field7DeepEqual(ano.ThriftRpcTimeoutMs) { + if !p.Field1DeepEqual(ano.UserinfoList) { return false } return true } -func (p *TCheckAuthRequest) Field1DeepEqual(src *string) bool { +func (p *TShowUserResult_) Field1DeepEqual(src [][]string) bool { - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { + if len(p.UserinfoList) != len(src) { return false } - if strings.Compare(*p.Cluster, *src) != 0 { - return false + for i, v := range p.UserinfoList { + _src := src[i] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if strings.Compare(v, _src1) != 0 { + return false + } + } } return true } -func (p *TCheckAuthRequest) Field2DeepEqual(src string) bool { - if strings.Compare(p.User, src) != 0 { - return false - } - return true +type TReportCommitTxnResultRequest struct { + DbId *int64 `thrift:"dbId,1,optional" frugal:"1,optional,i64" json:"dbId,omitempty"` + TxnId *int64 `thrift:"txnId,2,optional" frugal:"2,optional,i64" json:"txnId,omitempty"` + Label *string `thrift:"label,3,optional" frugal:"3,optional,string" json:"label,omitempty"` + Payload []byte `thrift:"payload,4,optional" frugal:"4,optional,binary" json:"payload,omitempty"` } -func (p *TCheckAuthRequest) Field3DeepEqual(src string) bool { - if strings.Compare(p.Passwd, src) != 0 { - return false - } - return true +func NewTReportCommitTxnResultRequest() *TReportCommitTxnResultRequest { + return &TReportCommitTxnResultRequest{} } -func (p *TCheckAuthRequest) Field4DeepEqual(src *string) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false - } - if strings.Compare(*p.UserIp, *src) != 0 { - return false - } - return true +func (p *TReportCommitTxnResultRequest) InitDefault() { } -func (p *TCheckAuthRequest) Field5DeepEqual(src *TPrivilegeCtrl) bool { - if !p.PrivCtrl.DeepEqual(src) { - return false +var TReportCommitTxnResultRequest_DbId_DEFAULT int64 + +func (p *TReportCommitTxnResultRequest) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TReportCommitTxnResultRequest_DbId_DEFAULT } - return true + return *p.DbId } -func (p *TCheckAuthRequest) Field6DeepEqual(src *TPrivilegeType) bool { - if p.PrivType == src { - return true - } else if p.PrivType == nil || src == nil { - return false - } - if *p.PrivType != *src { - return false +var TReportCommitTxnResultRequest_TxnId_DEFAULT int64 + +func (p *TReportCommitTxnResultRequest) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TReportCommitTxnResultRequest_TxnId_DEFAULT } - return true + return *p.TxnId } -func (p *TCheckAuthRequest) Field7DeepEqual(src *int64) bool { - if p.ThriftRpcTimeoutMs == src { - return true - } else if p.ThriftRpcTimeoutMs == nil || src == nil { - return false - } - if *p.ThriftRpcTimeoutMs != *src { - return false +var TReportCommitTxnResultRequest_Label_DEFAULT string + +func (p *TReportCommitTxnResultRequest) GetLabel() (v string) { + if !p.IsSetLabel() { + return TReportCommitTxnResultRequest_Label_DEFAULT } - return true + return *p.Label } -type TCheckAuthResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` -} +var TReportCommitTxnResultRequest_Payload_DEFAULT []byte -func NewTCheckAuthResult_() *TCheckAuthResult_ { - return &TCheckAuthResult_{} +func (p *TReportCommitTxnResultRequest) GetPayload() (v []byte) { + if !p.IsSetPayload() { + return TReportCommitTxnResultRequest_Payload_DEFAULT + } + return p.Payload } - -func (p *TCheckAuthResult_) InitDefault() { - *p = TCheckAuthResult_{} +func (p *TReportCommitTxnResultRequest) SetDbId(val *int64) { + p.DbId = val +} +func (p *TReportCommitTxnResultRequest) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TReportCommitTxnResultRequest) SetLabel(val *string) { + p.Label = val +} +func (p *TReportCommitTxnResultRequest) SetPayload(val []byte) { + p.Payload = val } -var TCheckAuthResult__Status_DEFAULT *status.TStatus +var fieldIDToName_TReportCommitTxnResultRequest = map[int16]string{ + 1: "dbId", + 2: "txnId", + 3: "label", + 4: "payload", +} -func (p *TCheckAuthResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TCheckAuthResult__Status_DEFAULT - } - return p.Status +func (p *TReportCommitTxnResultRequest) IsSetDbId() bool { + return p.DbId != nil } -func (p *TCheckAuthResult_) SetStatus(val *status.TStatus) { - p.Status = val + +func (p *TReportCommitTxnResultRequest) IsSetTxnId() bool { + return p.TxnId != nil } -var fieldIDToName_TCheckAuthResult_ = map[int16]string{ - 1: "status", +func (p *TReportCommitTxnResultRequest) IsSetLabel() bool { + return p.Label != nil } -func (p *TCheckAuthResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TReportCommitTxnResultRequest) IsSetPayload() bool { + return p.Payload != nil } -func (p *TCheckAuthResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TReportCommitTxnResultRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -43579,22 +74723,42 @@ func (p *TCheckAuthResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -43603,17 +74767,13 @@ func (p *TCheckAuthResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportCommitTxnResultRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -43621,21 +74781,56 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthResult_[fieldId])) } -func (p *TCheckAuthResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TReportCommitTxnResultRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil +} +func (p *TReportCommitTxnResultRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil +} +func (p *TReportCommitTxnResultRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Label = _field + return nil +} +func (p *TReportCommitTxnResultRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { return err + } else { + _field = []byte(v) } + p.Payload = _field return nil } -func (p *TCheckAuthResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TReportCommitTxnResultRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCheckAuthResult"); err != nil { + if err = oprot.WriteStructBegin("TReportCommitTxnResultRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -43643,7 +74838,18 @@ func (p *TCheckAuthResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -43662,15 +74868,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCheckAuthResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TReportCommitTxnResultRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -43679,156 +74887,222 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCheckAuthResult_) String() string { +func (p *TReportCommitTxnResultRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txnId", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TReportCommitTxnResultRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLabel() { + if err = oprot.WriteFieldBegin("label", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Label); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TReportCommitTxnResultRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPayload() { + if err = oprot.WriteFieldBegin("payload", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.Payload)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TReportCommitTxnResultRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TCheckAuthResult_(%+v)", *p) + return fmt.Sprintf("TReportCommitTxnResultRequest(%+v)", *p) + } -func (p *TCheckAuthResult_) DeepEqual(ano *TCheckAuthResult_) bool { +func (p *TReportCommitTxnResultRequest) DeepEqual(ano *TReportCommitTxnResultRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.DbId) { + return false + } + if !p.Field2DeepEqual(ano.TxnId) { + return false + } + if !p.Field3DeepEqual(ano.Label) { + return false + } + if !p.Field4DeepEqual(ano.Payload) { return false } return true } -func (p *TCheckAuthResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TReportCommitTxnResultRequest) Field1DeepEqual(src *int64) bool { - if !p.Status.DeepEqual(src) { + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if *p.DbId != *src { return false } return true } +func (p *TReportCommitTxnResultRequest) Field2DeepEqual(src *int64) bool { -type TGetQueryStatsRequest struct { - Type *TQueryStatsType `thrift:"type,1,optional" frugal:"1,optional,TQueryStatsType" json:"type,omitempty"` - Catalog *string `thrift:"catalog,2,optional" frugal:"2,optional,string" json:"catalog,omitempty"` - Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` - Tbl *string `thrift:"tbl,4,optional" frugal:"4,optional,string" json:"tbl,omitempty"` - ReplicaId *int64 `thrift:"replica_id,5,optional" frugal:"5,optional,i64" json:"replica_id,omitempty"` - ReplicaIds []int64 `thrift:"replica_ids,6,optional" frugal:"6,optional,list" json:"replica_ids,omitempty"` + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false + } + if *p.TxnId != *src { + return false + } + return true } +func (p *TReportCommitTxnResultRequest) Field3DeepEqual(src *string) bool { -func NewTGetQueryStatsRequest() *TGetQueryStatsRequest { - return &TGetQueryStatsRequest{} + if p.Label == src { + return true + } else if p.Label == nil || src == nil { + return false + } + if strings.Compare(*p.Label, *src) != 0 { + return false + } + return true } +func (p *TReportCommitTxnResultRequest) Field4DeepEqual(src []byte) bool { -func (p *TGetQueryStatsRequest) InitDefault() { - *p = TGetQueryStatsRequest{} + if bytes.Compare(p.Payload, src) != 0 { + return false + } + return true } -var TGetQueryStatsRequest_Type_DEFAULT TQueryStatsType - -func (p *TGetQueryStatsRequest) GetType() (v TQueryStatsType) { - if !p.IsSetType() { - return TGetQueryStatsRequest_Type_DEFAULT - } - return *p.Type +type TQueryColumn struct { + CatalogId *string `thrift:"catalogId,1,optional" frugal:"1,optional,string" json:"catalogId,omitempty"` + DbId *string `thrift:"dbId,2,optional" frugal:"2,optional,string" json:"dbId,omitempty"` + TblId *string `thrift:"tblId,3,optional" frugal:"3,optional,string" json:"tblId,omitempty"` + ColName *string `thrift:"colName,4,optional" frugal:"4,optional,string" json:"colName,omitempty"` } -var TGetQueryStatsRequest_Catalog_DEFAULT string +func NewTQueryColumn() *TQueryColumn { + return &TQueryColumn{} +} -func (p *TGetQueryStatsRequest) GetCatalog() (v string) { - if !p.IsSetCatalog() { - return TGetQueryStatsRequest_Catalog_DEFAULT - } - return *p.Catalog +func (p *TQueryColumn) InitDefault() { } -var TGetQueryStatsRequest_Db_DEFAULT string +var TQueryColumn_CatalogId_DEFAULT string -func (p *TGetQueryStatsRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TGetQueryStatsRequest_Db_DEFAULT +func (p *TQueryColumn) GetCatalogId() (v string) { + if !p.IsSetCatalogId() { + return TQueryColumn_CatalogId_DEFAULT } - return *p.Db + return *p.CatalogId } -var TGetQueryStatsRequest_Tbl_DEFAULT string +var TQueryColumn_DbId_DEFAULT string -func (p *TGetQueryStatsRequest) GetTbl() (v string) { - if !p.IsSetTbl() { - return TGetQueryStatsRequest_Tbl_DEFAULT +func (p *TQueryColumn) GetDbId() (v string) { + if !p.IsSetDbId() { + return TQueryColumn_DbId_DEFAULT } - return *p.Tbl + return *p.DbId } -var TGetQueryStatsRequest_ReplicaId_DEFAULT int64 +var TQueryColumn_TblId_DEFAULT string -func (p *TGetQueryStatsRequest) GetReplicaId() (v int64) { - if !p.IsSetReplicaId() { - return TGetQueryStatsRequest_ReplicaId_DEFAULT +func (p *TQueryColumn) GetTblId() (v string) { + if !p.IsSetTblId() { + return TQueryColumn_TblId_DEFAULT } - return *p.ReplicaId + return *p.TblId } -var TGetQueryStatsRequest_ReplicaIds_DEFAULT []int64 +var TQueryColumn_ColName_DEFAULT string -func (p *TGetQueryStatsRequest) GetReplicaIds() (v []int64) { - if !p.IsSetReplicaIds() { - return TGetQueryStatsRequest_ReplicaIds_DEFAULT +func (p *TQueryColumn) GetColName() (v string) { + if !p.IsSetColName() { + return TQueryColumn_ColName_DEFAULT } - return p.ReplicaIds -} -func (p *TGetQueryStatsRequest) SetType(val *TQueryStatsType) { - p.Type = val -} -func (p *TGetQueryStatsRequest) SetCatalog(val *string) { - p.Catalog = val -} -func (p *TGetQueryStatsRequest) SetDb(val *string) { - p.Db = val + return *p.ColName } -func (p *TGetQueryStatsRequest) SetTbl(val *string) { - p.Tbl = val -} -func (p *TGetQueryStatsRequest) SetReplicaId(val *int64) { - p.ReplicaId = val +func (p *TQueryColumn) SetCatalogId(val *string) { + p.CatalogId = val } -func (p *TGetQueryStatsRequest) SetReplicaIds(val []int64) { - p.ReplicaIds = val +func (p *TQueryColumn) SetDbId(val *string) { + p.DbId = val } - -var fieldIDToName_TGetQueryStatsRequest = map[int16]string{ - 1: "type", - 2: "catalog", - 3: "db", - 4: "tbl", - 5: "replica_id", - 6: "replica_ids", +func (p *TQueryColumn) SetTblId(val *string) { + p.TblId = val } - -func (p *TGetQueryStatsRequest) IsSetType() bool { - return p.Type != nil +func (p *TQueryColumn) SetColName(val *string) { + p.ColName = val } -func (p *TGetQueryStatsRequest) IsSetCatalog() bool { - return p.Catalog != nil +var fieldIDToName_TQueryColumn = map[int16]string{ + 1: "catalogId", + 2: "dbId", + 3: "tblId", + 4: "colName", } -func (p *TGetQueryStatsRequest) IsSetDb() bool { - return p.Db != nil +func (p *TQueryColumn) IsSetCatalogId() bool { + return p.CatalogId != nil } -func (p *TGetQueryStatsRequest) IsSetTbl() bool { - return p.Tbl != nil +func (p *TQueryColumn) IsSetDbId() bool { + return p.DbId != nil } -func (p *TGetQueryStatsRequest) IsSetReplicaId() bool { - return p.ReplicaId != nil +func (p *TQueryColumn) IsSetTblId() bool { + return p.TblId != nil } -func (p *TGetQueryStatsRequest) IsSetReplicaIds() bool { - return p.ReplicaIds != nil +func (p *TQueryColumn) IsSetColName() bool { + return p.ColName != nil } -func (p *TGetQueryStatsRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *TQueryColumn) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -43848,71 +75122,42 @@ func (p *TGetQueryStatsRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -43927,7 +75172,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetQueryStatsRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryColumn[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -43937,77 +75182,54 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetQueryStatsRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := TQueryStatsType(v) - p.Type = &tmp - } - return nil -} +func (p *TQueryColumn) ReadField1(iprot thrift.TProtocol) error { -func (p *TGetQueryStatsRequest) ReadField2(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Catalog = &v + _field = &v } + p.CatalogId = _field return nil } +func (p *TQueryColumn) ReadField2(iprot thrift.TProtocol) error { -func (p *TGetQueryStatsRequest) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.DbId = _field return nil } +func (p *TQueryColumn) ReadField3(iprot thrift.TProtocol) error { -func (p *TGetQueryStatsRequest) ReadField4(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Tbl = &v + _field = &v } + p.TblId = _field return nil } +func (p *TQueryColumn) ReadField4(iprot thrift.TProtocol) error { -func (p *TGetQueryStatsRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.ReplicaId = &v - } - return nil -} - -func (p *TGetQueryStatsRequest) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ReplicaIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _elem = v - } - - p.ReplicaIds = append(p.ReplicaIds, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + _field = &v } + p.ColName = _field return nil } -func (p *TGetQueryStatsRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *TQueryColumn) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TGetQueryStatsRequest"); err != nil { + if err = oprot.WriteStructBegin("TQueryColumn"); err != nil { goto WriteStructBeginError } if p != nil { @@ -44027,15 +75249,6 @@ func (p *TGetQueryStatsRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -44054,12 +75267,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TGetQueryStatsRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetType() { - if err = oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { +func (p *TQueryColumn) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalogId", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.Type)); err != nil { + if err := oprot.WriteString(*p.CatalogId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44073,12 +75286,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TGetQueryStatsRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetCatalog() { - if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 2); err != nil { +func (p *TQueryColumn) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("dbId", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Catalog); err != nil { + if err := oprot.WriteString(*p.DbId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44092,50 +75305,328 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TGetQueryStatsRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 3); err != nil { +func (p *TQueryColumn) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTblId() { + if err = oprot.WriteFieldBegin("tblId", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Db); err != nil { + if err := oprot.WriteString(*p.TblId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TQueryColumn) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetColName() { + if err = oprot.WriteFieldBegin("colName", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ColName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueryColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryColumn(%+v)", *p) + +} + +func (p *TQueryColumn) DeepEqual(ano *TQueryColumn) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.CatalogId) { + return false + } + if !p.Field2DeepEqual(ano.DbId) { + return false + } + if !p.Field3DeepEqual(ano.TblId) { + return false + } + if !p.Field4DeepEqual(ano.ColName) { + return false + } + return true +} + +func (p *TQueryColumn) Field1DeepEqual(src *string) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if strings.Compare(*p.CatalogId, *src) != 0 { + return false + } + return true +} +func (p *TQueryColumn) Field2DeepEqual(src *string) bool { + + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false + } + if strings.Compare(*p.DbId, *src) != 0 { + return false + } + return true +} +func (p *TQueryColumn) Field3DeepEqual(src *string) bool { + + if p.TblId == src { + return true + } else if p.TblId == nil || src == nil { + return false + } + if strings.Compare(*p.TblId, *src) != 0 { + return false + } + return true +} +func (p *TQueryColumn) Field4DeepEqual(src *string) bool { + + if p.ColName == src { + return true + } else if p.ColName == nil || src == nil { + return false + } + if strings.Compare(*p.ColName, *src) != 0 { + return false + } + return true +} + +type TSyncQueryColumns struct { + HighPriorityColumns []*TQueryColumn `thrift:"highPriorityColumns,1,optional" frugal:"1,optional,list" json:"highPriorityColumns,omitempty"` + MidPriorityColumns []*TQueryColumn `thrift:"midPriorityColumns,2,optional" frugal:"2,optional,list" json:"midPriorityColumns,omitempty"` +} + +func NewTSyncQueryColumns() *TSyncQueryColumns { + return &TSyncQueryColumns{} +} + +func (p *TSyncQueryColumns) InitDefault() { +} + +var TSyncQueryColumns_HighPriorityColumns_DEFAULT []*TQueryColumn + +func (p *TSyncQueryColumns) GetHighPriorityColumns() (v []*TQueryColumn) { + if !p.IsSetHighPriorityColumns() { + return TSyncQueryColumns_HighPriorityColumns_DEFAULT + } + return p.HighPriorityColumns +} + +var TSyncQueryColumns_MidPriorityColumns_DEFAULT []*TQueryColumn + +func (p *TSyncQueryColumns) GetMidPriorityColumns() (v []*TQueryColumn) { + if !p.IsSetMidPriorityColumns() { + return TSyncQueryColumns_MidPriorityColumns_DEFAULT + } + return p.MidPriorityColumns +} +func (p *TSyncQueryColumns) SetHighPriorityColumns(val []*TQueryColumn) { + p.HighPriorityColumns = val +} +func (p *TSyncQueryColumns) SetMidPriorityColumns(val []*TQueryColumn) { + p.MidPriorityColumns = val +} + +var fieldIDToName_TSyncQueryColumns = map[int16]string{ + 1: "highPriorityColumns", + 2: "midPriorityColumns", +} + +func (p *TSyncQueryColumns) IsSetHighPriorityColumns() bool { + return p.HighPriorityColumns != nil +} + +func (p *TSyncQueryColumns) IsSetMidPriorityColumns() bool { + return p.MidPriorityColumns != nil +} + +func (p *TSyncQueryColumns) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSyncQueryColumns[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSyncQueryColumns) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TQueryColumn, 0, size) + values := make([]TQueryColumn, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.HighPriorityColumns = _field + return nil +} +func (p *TSyncQueryColumns) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TQueryColumn, 0, size) + values := make([]TQueryColumn, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + + _field = append(_field, _elem) } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.MidPriorityColumns = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TGetQueryStatsRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTbl() { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Tbl); err != nil { - return err +func (p *TSyncQueryColumns) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSyncQueryColumns"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TGetQueryStatsRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetReplicaId() { - if err = oprot.WriteFieldBegin("replica_id", thrift.I64, 5); err != nil { +func (p *TSyncQueryColumns) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetHighPriorityColumns() { + if err = oprot.WriteFieldBegin("highPriorityColumns", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ReplicaId); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HighPriorityColumns)); err != nil { + return err + } + for _, v := range p.HighPriorityColumns { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44144,21 +75635,21 @@ func (p *TGetQueryStatsRequest) writeField5(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TGetQueryStatsRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetReplicaIds() { - if err = oprot.WriteFieldBegin("replica_ids", thrift.LIST, 6); err != nil { +func (p *TSyncQueryColumns) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMidPriorityColumns() { + if err = oprot.WriteFieldBegin("midPriorityColumns", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.I64, len(p.ReplicaIds)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.MidPriorityColumns)); err != nil { return err } - for _, v := range p.ReplicaIds { - if err := oprot.WriteI64(v); err != nil { + for _, v := range p.MidPriorityColumns { + if err := v.Write(oprot); err != nil { return err } } @@ -44171,188 +75662,111 @@ func (p *TGetQueryStatsRequest) writeField6(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TGetQueryStatsRequest) String() string { +func (p *TSyncQueryColumns) String() string { if p == nil { return "" } - return fmt.Sprintf("TGetQueryStatsRequest(%+v)", *p) + return fmt.Sprintf("TSyncQueryColumns(%+v)", *p) + } -func (p *TGetQueryStatsRequest) DeepEqual(ano *TGetQueryStatsRequest) bool { +func (p *TSyncQueryColumns) DeepEqual(ano *TSyncQueryColumns) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Type) { - return false - } - if !p.Field2DeepEqual(ano.Catalog) { - return false - } - if !p.Field3DeepEqual(ano.Db) { - return false - } - if !p.Field4DeepEqual(ano.Tbl) { - return false - } - if !p.Field5DeepEqual(ano.ReplicaId) { - return false - } - if !p.Field6DeepEqual(ano.ReplicaIds) { - return false - } - return true -} - -func (p *TGetQueryStatsRequest) Field1DeepEqual(src *TQueryStatsType) bool { - - if p.Type == src { - return true - } else if p.Type == nil || src == nil { - return false - } - if *p.Type != *src { - return false - } - return true -} -func (p *TGetQueryStatsRequest) Field2DeepEqual(src *string) bool { - - if p.Catalog == src { - return true - } else if p.Catalog == nil || src == nil { - return false - } - if strings.Compare(*p.Catalog, *src) != 0 { - return false - } - return true -} -func (p *TGetQueryStatsRequest) Field3DeepEqual(src *string) bool { - - if p.Db == src { - return true - } else if p.Db == nil || src == nil { + if !p.Field1DeepEqual(ano.HighPriorityColumns) { return false } - if strings.Compare(*p.Db, *src) != 0 { + if !p.Field2DeepEqual(ano.MidPriorityColumns) { return false } return true } -func (p *TGetQueryStatsRequest) Field4DeepEqual(src *string) bool { - if p.Tbl == src { - return true - } else if p.Tbl == nil || src == nil { - return false - } - if strings.Compare(*p.Tbl, *src) != 0 { - return false - } - return true -} -func (p *TGetQueryStatsRequest) Field5DeepEqual(src *int64) bool { +func (p *TSyncQueryColumns) Field1DeepEqual(src []*TQueryColumn) bool { - if p.ReplicaId == src { - return true - } else if p.ReplicaId == nil || src == nil { + if len(p.HighPriorityColumns) != len(src) { return false } - if *p.ReplicaId != *src { - return false + for i, v := range p.HighPriorityColumns { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TGetQueryStatsRequest) Field6DeepEqual(src []int64) bool { +func (p *TSyncQueryColumns) Field2DeepEqual(src []*TQueryColumn) bool { - if len(p.ReplicaIds) != len(src) { + if len(p.MidPriorityColumns) != len(src) { return false } - for i, v := range p.ReplicaIds { + for i, v := range p.MidPriorityColumns { _src := src[i] - if v != _src { + if !v.DeepEqual(_src) { return false } } return true } -type TTableQueryStats struct { - Field *string `thrift:"field,1,optional" frugal:"1,optional,string" json:"field,omitempty"` - QueryStats *int64 `thrift:"query_stats,2,optional" frugal:"2,optional,i64" json:"query_stats,omitempty"` - FilterStats *int64 `thrift:"filter_stats,3,optional" frugal:"3,optional,i64" json:"filter_stats,omitempty"` -} - -func NewTTableQueryStats() *TTableQueryStats { - return &TTableQueryStats{} +type TFetchSplitBatchRequest struct { + SplitSourceId *int64 `thrift:"split_source_id,1,optional" frugal:"1,optional,i64" json:"split_source_id,omitempty"` + MaxNumSplits *int32 `thrift:"max_num_splits,2,optional" frugal:"2,optional,i32" json:"max_num_splits,omitempty"` } -func (p *TTableQueryStats) InitDefault() { - *p = TTableQueryStats{} +func NewTFetchSplitBatchRequest() *TFetchSplitBatchRequest { + return &TFetchSplitBatchRequest{} } -var TTableQueryStats_Field_DEFAULT string - -func (p *TTableQueryStats) GetField() (v string) { - if !p.IsSetField() { - return TTableQueryStats_Field_DEFAULT - } - return *p.Field +func (p *TFetchSplitBatchRequest) InitDefault() { } -var TTableQueryStats_QueryStats_DEFAULT int64 +var TFetchSplitBatchRequest_SplitSourceId_DEFAULT int64 -func (p *TTableQueryStats) GetQueryStats() (v int64) { - if !p.IsSetQueryStats() { - return TTableQueryStats_QueryStats_DEFAULT +func (p *TFetchSplitBatchRequest) GetSplitSourceId() (v int64) { + if !p.IsSetSplitSourceId() { + return TFetchSplitBatchRequest_SplitSourceId_DEFAULT } - return *p.QueryStats + return *p.SplitSourceId } -var TTableQueryStats_FilterStats_DEFAULT int64 +var TFetchSplitBatchRequest_MaxNumSplits_DEFAULT int32 -func (p *TTableQueryStats) GetFilterStats() (v int64) { - if !p.IsSetFilterStats() { - return TTableQueryStats_FilterStats_DEFAULT +func (p *TFetchSplitBatchRequest) GetMaxNumSplits() (v int32) { + if !p.IsSetMaxNumSplits() { + return TFetchSplitBatchRequest_MaxNumSplits_DEFAULT } - return *p.FilterStats -} -func (p *TTableQueryStats) SetField(val *string) { - p.Field = val + return *p.MaxNumSplits } -func (p *TTableQueryStats) SetQueryStats(val *int64) { - p.QueryStats = val -} -func (p *TTableQueryStats) SetFilterStats(val *int64) { - p.FilterStats = val +func (p *TFetchSplitBatchRequest) SetSplitSourceId(val *int64) { + p.SplitSourceId = val } - -var fieldIDToName_TTableQueryStats = map[int16]string{ - 1: "field", - 2: "query_stats", - 3: "filter_stats", +func (p *TFetchSplitBatchRequest) SetMaxNumSplits(val *int32) { + p.MaxNumSplits = val } -func (p *TTableQueryStats) IsSetField() bool { - return p.Field != nil +var fieldIDToName_TFetchSplitBatchRequest = map[int16]string{ + 1: "split_source_id", + 2: "max_num_splits", } -func (p *TTableQueryStats) IsSetQueryStats() bool { - return p.QueryStats != nil +func (p *TFetchSplitBatchRequest) IsSetSplitSourceId() bool { + return p.SplitSourceId != nil } -func (p *TTableQueryStats) IsSetFilterStats() bool { - return p.FilterStats != nil +func (p *TFetchSplitBatchRequest) IsSetMaxNumSplits() bool { + return p.MaxNumSplits != nil } -func (p *TTableQueryStats) Read(iprot thrift.TProtocol) (err error) { +func (p *TFetchSplitBatchRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -44372,41 +75786,26 @@ func (p *TTableQueryStats) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -44421,7 +75820,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableQueryStats[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSplitBatchRequest[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -44431,36 +75830,32 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableQueryStats) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Field = &v - } - return nil -} +func (p *TFetchSplitBatchRequest) ReadField1(iprot thrift.TProtocol) error { -func (p *TTableQueryStats) ReadField2(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.QueryStats = &v + _field = &v } + p.SplitSourceId = _field return nil } +func (p *TFetchSplitBatchRequest) ReadField2(iprot thrift.TProtocol) error { -func (p *TTableQueryStats) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FilterStats = &v + _field = &v } + p.MaxNumSplits = _field return nil } -func (p *TTableQueryStats) Write(oprot thrift.TProtocol) (err error) { +func (p *TFetchSplitBatchRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTableQueryStats"); err != nil { + if err = oprot.WriteStructBegin("TFetchSplitBatchRequest"); err != nil { goto WriteStructBeginError } if p != nil { @@ -44472,11 +75867,6 @@ func (p *TTableQueryStats) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -44495,12 +75885,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTableQueryStats) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetField() { - if err = oprot.WriteFieldBegin("field", thrift.STRING, 1); err != nil { +func (p *TFetchSplitBatchRequest) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSplitSourceId() { + if err = oprot.WriteFieldBegin("split_source_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Field); err != nil { + if err := oprot.WriteI64(*p.SplitSourceId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44514,12 +75904,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTableQueryStats) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryStats() { - if err = oprot.WriteFieldBegin("query_stats", thrift.I64, 2); err != nil { +func (p *TFetchSplitBatchRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxNumSplits() { + if err = oprot.WriteFieldBegin("max_num_splits", thrift.I32, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.QueryStats); err != nil { + if err := oprot.WriteI32(*p.MaxNumSplits); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44533,138 +75923,104 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTableQueryStats) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetFilterStats() { - if err = oprot.WriteFieldBegin("filter_stats", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.FilterStats); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TTableQueryStats) String() string { +func (p *TFetchSplitBatchRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TTableQueryStats(%+v)", *p) + return fmt.Sprintf("TFetchSplitBatchRequest(%+v)", *p) + } -func (p *TTableQueryStats) DeepEqual(ano *TTableQueryStats) bool { +func (p *TFetchSplitBatchRequest) DeepEqual(ano *TFetchSplitBatchRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Field) { - return false - } - if !p.Field2DeepEqual(ano.QueryStats) { + if !p.Field1DeepEqual(ano.SplitSourceId) { return false } - if !p.Field3DeepEqual(ano.FilterStats) { + if !p.Field2DeepEqual(ano.MaxNumSplits) { return false } return true } -func (p *TTableQueryStats) Field1DeepEqual(src *string) bool { - - if p.Field == src { - return true - } else if p.Field == nil || src == nil { - return false - } - if strings.Compare(*p.Field, *src) != 0 { - return false - } - return true -} -func (p *TTableQueryStats) Field2DeepEqual(src *int64) bool { +func (p *TFetchSplitBatchRequest) Field1DeepEqual(src *int64) bool { - if p.QueryStats == src { + if p.SplitSourceId == src { return true - } else if p.QueryStats == nil || src == nil { + } else if p.SplitSourceId == nil || src == nil { return false } - if *p.QueryStats != *src { + if *p.SplitSourceId != *src { return false } return true } -func (p *TTableQueryStats) Field3DeepEqual(src *int64) bool { +func (p *TFetchSplitBatchRequest) Field2DeepEqual(src *int32) bool { - if p.FilterStats == src { + if p.MaxNumSplits == src { return true - } else if p.FilterStats == nil || src == nil { + } else if p.MaxNumSplits == nil || src == nil { return false } - if *p.FilterStats != *src { + if *p.MaxNumSplits != *src { return false } return true } -type TTableIndexQueryStats struct { - IndexName *string `thrift:"index_name,1,optional" frugal:"1,optional,string" json:"index_name,omitempty"` - TableStats []*TTableQueryStats `thrift:"table_stats,2,optional" frugal:"2,optional,list" json:"table_stats,omitempty"` +type TFetchSplitBatchResult_ struct { + Splits []*planner.TScanRangeLocations `thrift:"splits,1,optional" frugal:"1,optional,list" json:"splits,omitempty"` + Status *status.TStatus `thrift:"status,2,optional" frugal:"2,optional,status.TStatus" json:"status,omitempty"` } -func NewTTableIndexQueryStats() *TTableIndexQueryStats { - return &TTableIndexQueryStats{} +func NewTFetchSplitBatchResult_() *TFetchSplitBatchResult_ { + return &TFetchSplitBatchResult_{} } -func (p *TTableIndexQueryStats) InitDefault() { - *p = TTableIndexQueryStats{} +func (p *TFetchSplitBatchResult_) InitDefault() { } -var TTableIndexQueryStats_IndexName_DEFAULT string +var TFetchSplitBatchResult__Splits_DEFAULT []*planner.TScanRangeLocations -func (p *TTableIndexQueryStats) GetIndexName() (v string) { - if !p.IsSetIndexName() { - return TTableIndexQueryStats_IndexName_DEFAULT +func (p *TFetchSplitBatchResult_) GetSplits() (v []*planner.TScanRangeLocations) { + if !p.IsSetSplits() { + return TFetchSplitBatchResult__Splits_DEFAULT } - return *p.IndexName + return p.Splits } -var TTableIndexQueryStats_TableStats_DEFAULT []*TTableQueryStats +var TFetchSplitBatchResult__Status_DEFAULT *status.TStatus -func (p *TTableIndexQueryStats) GetTableStats() (v []*TTableQueryStats) { - if !p.IsSetTableStats() { - return TTableIndexQueryStats_TableStats_DEFAULT +func (p *TFetchSplitBatchResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TFetchSplitBatchResult__Status_DEFAULT } - return p.TableStats + return p.Status } -func (p *TTableIndexQueryStats) SetIndexName(val *string) { - p.IndexName = val +func (p *TFetchSplitBatchResult_) SetSplits(val []*planner.TScanRangeLocations) { + p.Splits = val } -func (p *TTableIndexQueryStats) SetTableStats(val []*TTableQueryStats) { - p.TableStats = val +func (p *TFetchSplitBatchResult_) SetStatus(val *status.TStatus) { + p.Status = val } -var fieldIDToName_TTableIndexQueryStats = map[int16]string{ - 1: "index_name", - 2: "table_stats", +var fieldIDToName_TFetchSplitBatchResult_ = map[int16]string{ + 1: "splits", + 2: "status", } -func (p *TTableIndexQueryStats) IsSetIndexName() bool { - return p.IndexName != nil +func (p *TFetchSplitBatchResult_) IsSetSplits() bool { + return p.Splits != nil } -func (p *TTableIndexQueryStats) IsSetTableStats() bool { - return p.TableStats != nil +func (p *TFetchSplitBatchResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TTableIndexQueryStats) Read(iprot thrift.TProtocol) (err error) { +func (p *TFetchSplitBatchResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -44684,31 +76040,26 @@ func (p *TTableIndexQueryStats) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -44723,7 +76074,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableIndexQueryStats[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSplitBatchResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -44733,38 +76084,41 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableIndexQueryStats) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.IndexName = &v - } - return nil -} - -func (p *TTableIndexQueryStats) ReadField2(iprot thrift.TProtocol) error { +func (p *TFetchSplitBatchResult_) ReadField1(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TableStats = make([]*TTableQueryStats, 0, size) + _field := make([]*planner.TScanRangeLocations, 0, size) + values := make([]planner.TScanRangeLocations, size) for i := 0; i < size; i++ { - _elem := NewTTableQueryStats() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TableStats = append(p.TableStats, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Splits = _field + return nil +} +func (p *TFetchSplitBatchResult_) ReadField2(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field return nil } -func (p *TTableIndexQueryStats) Write(oprot thrift.TProtocol) (err error) { +func (p *TFetchSplitBatchResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTableIndexQueryStats"); err != nil { + if err = oprot.WriteStructBegin("TFetchSplitBatchResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -44776,7 +76130,6 @@ func (p *TTableIndexQueryStats) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -44795,12 +76148,20 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTableIndexQueryStats) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIndexName() { - if err = oprot.WriteFieldBegin("index_name", thrift.STRING, 1); err != nil { +func (p *TFetchSplitBatchResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSplits() { + if err = oprot.WriteFieldBegin("splits", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.IndexName); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Splits)); err != nil { + return err + } + for _, v := range p.Splits { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44814,20 +76175,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTableIndexQueryStats) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTableStats() { - if err = oprot.WriteFieldBegin("table_stats", thrift.LIST, 2); err != nil { +func (p *TFetchSplitBatchResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableStats)); err != nil { - return err - } - for _, v := range p.TableStats { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -44841,159 +76194,100 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTableIndexQueryStats) String() string { +func (p *TFetchSplitBatchResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TTableIndexQueryStats(%+v)", *p) + return fmt.Sprintf("TFetchSplitBatchResult_(%+v)", *p) + } -func (p *TTableIndexQueryStats) DeepEqual(ano *TTableIndexQueryStats) bool { +func (p *TFetchSplitBatchResult_) DeepEqual(ano *TFetchSplitBatchResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.IndexName) { + if !p.Field1DeepEqual(ano.Splits) { return false } - if !p.Field2DeepEqual(ano.TableStats) { + if !p.Field2DeepEqual(ano.Status) { return false } return true } -func (p *TTableIndexQueryStats) Field1DeepEqual(src *string) bool { +func (p *TFetchSplitBatchResult_) Field1DeepEqual(src []*planner.TScanRangeLocations) bool { - if p.IndexName == src { - return true - } else if p.IndexName == nil || src == nil { + if len(p.Splits) != len(src) { return false } - if strings.Compare(*p.IndexName, *src) != 0 { - return false + for i, v := range p.Splits { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TTableIndexQueryStats) Field2DeepEqual(src []*TTableQueryStats) bool { +func (p *TFetchSplitBatchResult_) Field2DeepEqual(src *status.TStatus) bool { - if len(p.TableStats) != len(src) { + if !p.Status.DeepEqual(src) { return false } - for i, v := range p.TableStats { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -type TQueryStatsResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - SimpleResult_ map[string]int64 `thrift:"simple_result,2,optional" frugal:"2,optional,map" json:"simple_result,omitempty"` - TableStats []*TTableQueryStats `thrift:"table_stats,3,optional" frugal:"3,optional,list" json:"table_stats,omitempty"` - TableVerbosStats []*TTableIndexQueryStats `thrift:"table_verbos_stats,4,optional" frugal:"4,optional,list" json:"table_verbos_stats,omitempty"` - TabletStats map[int64]int64 `thrift:"tablet_stats,5,optional" frugal:"5,optional,map" json:"tablet_stats,omitempty"` +type TFetchRunningQueriesResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + RunningQueries []*types.TUniqueId `thrift:"running_queries,2,optional" frugal:"2,optional,list" json:"running_queries,omitempty"` } -func NewTQueryStatsResult_() *TQueryStatsResult_ { - return &TQueryStatsResult_{} +func NewTFetchRunningQueriesResult_() *TFetchRunningQueriesResult_ { + return &TFetchRunningQueriesResult_{} } -func (p *TQueryStatsResult_) InitDefault() { - *p = TQueryStatsResult_{} +func (p *TFetchRunningQueriesResult_) InitDefault() { } -var TQueryStatsResult__Status_DEFAULT *status.TStatus +var TFetchRunningQueriesResult__Status_DEFAULT *status.TStatus -func (p *TQueryStatsResult_) GetStatus() (v *status.TStatus) { +func (p *TFetchRunningQueriesResult_) GetStatus() (v *status.TStatus) { if !p.IsSetStatus() { - return TQueryStatsResult__Status_DEFAULT + return TFetchRunningQueriesResult__Status_DEFAULT } return p.Status } -var TQueryStatsResult__SimpleResult__DEFAULT map[string]int64 - -func (p *TQueryStatsResult_) GetSimpleResult_() (v map[string]int64) { - if !p.IsSetSimpleResult_() { - return TQueryStatsResult__SimpleResult__DEFAULT - } - return p.SimpleResult_ -} - -var TQueryStatsResult__TableStats_DEFAULT []*TTableQueryStats - -func (p *TQueryStatsResult_) GetTableStats() (v []*TTableQueryStats) { - if !p.IsSetTableStats() { - return TQueryStatsResult__TableStats_DEFAULT - } - return p.TableStats -} - -var TQueryStatsResult__TableVerbosStats_DEFAULT []*TTableIndexQueryStats - -func (p *TQueryStatsResult_) GetTableVerbosStats() (v []*TTableIndexQueryStats) { - if !p.IsSetTableVerbosStats() { - return TQueryStatsResult__TableVerbosStats_DEFAULT - } - return p.TableVerbosStats -} - -var TQueryStatsResult__TabletStats_DEFAULT map[int64]int64 +var TFetchRunningQueriesResult__RunningQueries_DEFAULT []*types.TUniqueId -func (p *TQueryStatsResult_) GetTabletStats() (v map[int64]int64) { - if !p.IsSetTabletStats() { - return TQueryStatsResult__TabletStats_DEFAULT +func (p *TFetchRunningQueriesResult_) GetRunningQueries() (v []*types.TUniqueId) { + if !p.IsSetRunningQueries() { + return TFetchRunningQueriesResult__RunningQueries_DEFAULT } - return p.TabletStats + return p.RunningQueries } -func (p *TQueryStatsResult_) SetStatus(val *status.TStatus) { +func (p *TFetchRunningQueriesResult_) SetStatus(val *status.TStatus) { p.Status = val } -func (p *TQueryStatsResult_) SetSimpleResult_(val map[string]int64) { - p.SimpleResult_ = val -} -func (p *TQueryStatsResult_) SetTableStats(val []*TTableQueryStats) { - p.TableStats = val -} -func (p *TQueryStatsResult_) SetTableVerbosStats(val []*TTableIndexQueryStats) { - p.TableVerbosStats = val -} -func (p *TQueryStatsResult_) SetTabletStats(val map[int64]int64) { - p.TabletStats = val +func (p *TFetchRunningQueriesResult_) SetRunningQueries(val []*types.TUniqueId) { + p.RunningQueries = val } -var fieldIDToName_TQueryStatsResult_ = map[int16]string{ +var fieldIDToName_TFetchRunningQueriesResult_ = map[int16]string{ 1: "status", - 2: "simple_result", - 3: "table_stats", - 4: "table_verbos_stats", - 5: "tablet_stats", + 2: "running_queries", } -func (p *TQueryStatsResult_) IsSetStatus() bool { +func (p *TFetchRunningQueriesResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TQueryStatsResult_) IsSetSimpleResult_() bool { - return p.SimpleResult_ != nil -} - -func (p *TQueryStatsResult_) IsSetTableStats() bool { - return p.TableStats != nil -} - -func (p *TQueryStatsResult_) IsSetTableVerbosStats() bool { - return p.TableVerbosStats != nil -} - -func (p *TQueryStatsResult_) IsSetTabletStats() bool { - return p.TabletStats != nil +func (p *TFetchRunningQueriesResult_) IsSetRunningQueries() bool { + return p.RunningQueries != nil } -func (p *TQueryStatsResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TFetchRunningQueriesResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -45012,62 +76306,27 @@ func (p *TQueryStatsResult_) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.MAP { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err = p.ReadField4(iprot); err != nil { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 5: - if fieldTypeId == thrift.MAP { - if err = p.ReadField5(iprot); err != nil { + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -45082,7 +76341,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryStatsResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchRunningQueriesResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -45092,115 +76351,41 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TQueryStatsResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TQueryStatsResult_) ReadField2(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.SimpleResult_ = make(map[string]int64, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - - var _val int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _val = v - } - - p.SimpleResult_[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { +func (p *TFetchRunningQueriesResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } - -func (p *TQueryStatsResult_) ReadField3(iprot thrift.TProtocol) error { +func (p *TFetchRunningQueriesResult_) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TableStats = make([]*TTableQueryStats, 0, size) + _field := make([]*types.TUniqueId, 0, size) + values := make([]types.TUniqueId, size) for i := 0; i < size; i++ { - _elem := NewTTableQueryStats() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.TableStats = append(p.TableStats, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} + _elem := &values[i] + _elem.InitDefault() -func (p *TQueryStatsResult_) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.TableVerbosStats = make([]*TTableIndexQueryStats, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTableIndexQueryStats() if err := _elem.Read(iprot); err != nil { return err } - p.TableVerbosStats = append(p.TableVerbosStats, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RunningQueries = _field return nil } -func (p *TQueryStatsResult_) ReadField5(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.TabletStats = make(map[int64]int64, size) - for i := 0; i < size; i++ { - var _key int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _key = v - } - - var _val int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _val = v - } - - p.TabletStats[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TQueryStatsResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TFetchRunningQueriesResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TQueryStatsResult"); err != nil { + if err = oprot.WriteStructBegin("TFetchRunningQueriesResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -45212,19 +76397,6 @@ func (p *TQueryStatsResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -45243,7 +76415,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TQueryStatsResult_) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TFetchRunningQueriesResult_) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetStatus() { if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError @@ -45262,25 +76434,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TQueryStatsResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetSimpleResult_() { - if err = oprot.WriteFieldBegin("simple_result", thrift.MAP, 2); err != nil { +func (p *TFetchRunningQueriesResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetRunningQueries() { + if err = oprot.WriteFieldBegin("running_queries", thrift.LIST, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.I64, len(p.SimpleResult_)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RunningQueries)); err != nil { return err } - for k, v := range p.SimpleResult_ { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteI64(v); err != nil { + for _, v := range p.RunningQueries { + if err := v.Write(oprot); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -45294,3691 +76461,3901 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TQueryStatsResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTableStats() { - if err = oprot.WriteFieldBegin("table_stats", thrift.LIST, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableStats)); err != nil { - return err - } - for _, v := range p.TableStats { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TFetchRunningQueriesResult_) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return fmt.Sprintf("TFetchRunningQueriesResult_(%+v)", *p) + } -func (p *TQueryStatsResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTableVerbosStats() { - if err = oprot.WriteFieldBegin("table_verbos_stats", thrift.LIST, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableVerbosStats)); err != nil { - return err - } - for _, v := range p.TableVerbosStats { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TFetchRunningQueriesResult_) DeepEqual(ano *TFetchRunningQueriesResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.RunningQueries) { + return false + } + return true } -func (p *TQueryStatsResult_) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTabletStats() { - if err = oprot.WriteFieldBegin("tablet_stats", thrift.MAP, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.TabletStats)); err != nil { - return err +func (p *TFetchRunningQueriesResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *TFetchRunningQueriesResult_) Field2DeepEqual(src []*types.TUniqueId) bool { + + if len(p.RunningQueries) != len(src) { + return false + } + for i, v := range p.RunningQueries { + _src := src[i] + if !v.DeepEqual(_src) { + return false } - for k, v := range p.TabletStats { + } + return true +} - if err := oprot.WriteI64(k); err != nil { - return err - } +type TFetchRunningQueriesRequest struct { +} - if err := oprot.WriteI64(v); err != nil { - return err - } +func NewTFetchRunningQueriesRequest() *TFetchRunningQueriesRequest { + return &TFetchRunningQueriesRequest{} +} + +func (p *TFetchRunningQueriesRequest) InitDefault() { +} + +var fieldIDToName_TFetchRunningQueriesRequest = map[int16]string{} + +func (p *TFetchRunningQueriesRequest) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteMapEnd(); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TQueryStatsResult_) String() string { +func (p *TFetchRunningQueriesRequest) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TFetchRunningQueriesRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFetchRunningQueriesRequest) String() string { if p == nil { return "" } - return fmt.Sprintf("TQueryStatsResult_(%+v)", *p) + return fmt.Sprintf("TFetchRunningQueriesRequest(%+v)", *p) + } -func (p *TQueryStatsResult_) DeepEqual(ano *TQueryStatsResult_) bool { +func (p *TFetchRunningQueriesRequest) DeepEqual(ano *TFetchRunningQueriesRequest) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.SimpleResult_) { - return false - } - if !p.Field3DeepEqual(ano.TableStats) { - return false - } - if !p.Field4DeepEqual(ano.TableVerbosStats) { - return false - } - if !p.Field5DeepEqual(ano.TabletStats) { - return false - } return true } -func (p *TQueryStatsResult_) Field1DeepEqual(src *status.TStatus) bool { +type FrontendService interface { + GetDbNames(ctx context.Context, params *TGetDbsParams) (r *TGetDbsResult_, err error) - if !p.Status.DeepEqual(src) { - return false + GetTableNames(ctx context.Context, params *TGetTablesParams) (r *TGetTablesResult_, err error) + + DescribeTable(ctx context.Context, params *TDescribeTableParams) (r *TDescribeTableResult_, err error) + + DescribeTables(ctx context.Context, params *TDescribeTablesParams) (r *TDescribeTablesResult_, err error) + + ShowVariables(ctx context.Context, params *TShowVariableRequest) (r *TShowVariableResult_, err error) + + ReportExecStatus(ctx context.Context, params *TReportExecStatusParams) (r *TReportExecStatusResult_, err error) + + FinishTask(ctx context.Context, request *masterservice.TFinishTaskRequest) (r *masterservice.TMasterResult_, err error) + + Report(ctx context.Context, request *masterservice.TReportRequest) (r *masterservice.TMasterResult_, err error) + + FetchResource(ctx context.Context) (r *masterservice.TFetchResourceResult_, err error) + + Forward(ctx context.Context, params *TMasterOpRequest) (r *TMasterOpResult_, err error) + + ListTableStatus(ctx context.Context, params *TGetTablesParams) (r *TListTableStatusResult_, err error) + + ListTableMetadataNameIds(ctx context.Context, params *TGetTablesParams) (r *TListTableMetadataNameIdsResult_, err error) + + ListTablePrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) + + ListSchemaPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) + + ListUserPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) + + UpdateExportTaskStatus(ctx context.Context, request *TUpdateExportTaskStatusRequest) (r *TFeResult_, err error) + + LoadTxnBegin(ctx context.Context, request *TLoadTxnBeginRequest) (r *TLoadTxnBeginResult_, err error) + + LoadTxnPreCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) + + LoadTxn2PC(ctx context.Context, request *TLoadTxn2PCRequest) (r *TLoadTxn2PCResult_, err error) + + LoadTxnCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) + + LoadTxnRollback(ctx context.Context, request *TLoadTxnRollbackRequest) (r *TLoadTxnRollbackResult_, err error) + + BeginTxn(ctx context.Context, request *TBeginTxnRequest) (r *TBeginTxnResult_, err error) + + CommitTxn(ctx context.Context, request *TCommitTxnRequest) (r *TCommitTxnResult_, err error) + + RollbackTxn(ctx context.Context, request *TRollbackTxnRequest) (r *TRollbackTxnResult_, err error) + + GetBinlog(ctx context.Context, request *TGetBinlogRequest) (r *TGetBinlogResult_, err error) + + GetSnapshot(ctx context.Context, request *TGetSnapshotRequest) (r *TGetSnapshotResult_, err error) + + RestoreSnapshot(ctx context.Context, request *TRestoreSnapshotRequest) (r *TRestoreSnapshotResult_, err error) + + WaitingTxnStatus(ctx context.Context, request *TWaitingTxnStatusRequest) (r *TWaitingTxnStatusResult_, err error) + + StreamLoadPut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadPutResult_, err error) + + StreamLoadMultiTablePut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadMultiTablePutResult_, err error) + + SnapshotLoaderReport(ctx context.Context, request *TSnapshotLoaderReportRequest) (r *status.TStatus, err error) + + Ping(ctx context.Context, request *TFrontendPingFrontendRequest) (r *TFrontendPingFrontendResult_, err error) + + InitExternalCtlMeta(ctx context.Context, request *TInitExternalCtlMetaRequest) (r *TInitExternalCtlMetaResult_, err error) + + FetchSchemaTableData(ctx context.Context, request *TFetchSchemaTableDataRequest) (r *TFetchSchemaTableDataResult_, err error) + + AcquireToken(ctx context.Context) (r *TMySqlLoadAcquireTokenResult_, err error) + + CheckToken(ctx context.Context, token string) (r bool, err error) + + ConfirmUnusedRemoteFiles(ctx context.Context, request *TConfirmUnusedRemoteFilesRequest) (r *TConfirmUnusedRemoteFilesResult_, err error) + + CheckAuth(ctx context.Context, request *TCheckAuthRequest) (r *TCheckAuthResult_, err error) + + GetQueryStats(ctx context.Context, request *TGetQueryStatsRequest) (r *TQueryStatsResult_, err error) + + GetTabletReplicaInfos(ctx context.Context, request *TGetTabletReplicaInfosRequest) (r *TGetTabletReplicaInfosResult_, err error) + + AddPlsqlStoredProcedure(ctx context.Context, request *TAddPlsqlStoredProcedureRequest) (r *TPlsqlStoredProcedureResult_, err error) + + DropPlsqlStoredProcedure(ctx context.Context, request *TDropPlsqlStoredProcedureRequest) (r *TPlsqlStoredProcedureResult_, err error) + + AddPlsqlPackage(ctx context.Context, request *TAddPlsqlPackageRequest) (r *TPlsqlPackageResult_, err error) + + DropPlsqlPackage(ctx context.Context, request *TDropPlsqlPackageRequest) (r *TPlsqlPackageResult_, err error) + + GetMasterToken(ctx context.Context, request *TGetMasterTokenRequest) (r *TGetMasterTokenResult_, err error) + + GetBinlogLag(ctx context.Context, request *TGetBinlogLagRequest) (r *TGetBinlogLagResult_, err error) + + UpdateStatsCache(ctx context.Context, request *TUpdateFollowerStatsCacheRequest) (r *status.TStatus, err error) + + GetAutoIncrementRange(ctx context.Context, request *TAutoIncrementRangeRequest) (r *TAutoIncrementRangeResult_, err error) + + CreatePartition(ctx context.Context, request *TCreatePartitionRequest) (r *TCreatePartitionResult_, err error) + + ReplacePartition(ctx context.Context, request *TReplacePartitionRequest) (r *TReplacePartitionResult_, err error) + + GetMeta(ctx context.Context, request *TGetMetaRequest) (r *TGetMetaResult_, err error) + + GetBackendMeta(ctx context.Context, request *TGetBackendMetaRequest) (r *TGetBackendMetaResult_, err error) + + GetColumnInfo(ctx context.Context, request *TGetColumnInfoRequest) (r *TGetColumnInfoResult_, err error) + + InvalidateStatsCache(ctx context.Context, request *TInvalidateFollowerStatsCacheRequest) (r *status.TStatus, err error) + + ShowProcessList(ctx context.Context, request *TShowProcessListRequest) (r *TShowProcessListResult_, err error) + + ReportCommitTxnResult_(ctx context.Context, request *TReportCommitTxnResultRequest) (r *status.TStatus, err error) + + ShowUser(ctx context.Context, request *TShowUserRequest) (r *TShowUserResult_, err error) + + SyncQueryColumns(ctx context.Context, request *TSyncQueryColumns) (r *status.TStatus, err error) + + FetchSplitBatch(ctx context.Context, request *TFetchSplitBatchRequest) (r *TFetchSplitBatchResult_, err error) + + UpdatePartitionStatsCache(ctx context.Context, request *TUpdateFollowerPartitionStatsCacheRequest) (r *status.TStatus, err error) + + FetchRunningQueries(ctx context.Context, request *TFetchRunningQueriesRequest) (r *TFetchRunningQueriesResult_, err error) +} + +type FrontendServiceClient struct { + c thrift.TClient +} + +func NewFrontendServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *FrontendServiceClient { + return &FrontendServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), } - return true } -func (p *TQueryStatsResult_) Field2DeepEqual(src map[string]int64) bool { - if len(p.SimpleResult_) != len(src) { - return false +func NewFrontendServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *FrontendServiceClient { + return &FrontendServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), } - for k, v := range p.SimpleResult_ { - _src := src[k] - if v != _src { - return false - } +} + +func NewFrontendServiceClient(c thrift.TClient) *FrontendServiceClient { + return &FrontendServiceClient{ + c: c, } - return true } -func (p *TQueryStatsResult_) Field3DeepEqual(src []*TTableQueryStats) bool { - if len(p.TableStats) != len(src) { - return false +func (p *FrontendServiceClient) Client_() thrift.TClient { + return p.c +} + +func (p *FrontendServiceClient) GetDbNames(ctx context.Context, params *TGetDbsParams) (r *TGetDbsResult_, err error) { + var _args FrontendServiceGetDbNamesArgs + _args.Params = params + var _result FrontendServiceGetDbNamesResult + if err = p.Client_().Call(ctx, "getDbNames", &_args, &_result); err != nil { + return } - for i, v := range p.TableStats { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) GetTableNames(ctx context.Context, params *TGetTablesParams) (r *TGetTablesResult_, err error) { + var _args FrontendServiceGetTableNamesArgs + _args.Params = params + var _result FrontendServiceGetTableNamesResult + if err = p.Client_().Call(ctx, "getTableNames", &_args, &_result); err != nil { + return } - return true + return _result.GetSuccess(), nil } -func (p *TQueryStatsResult_) Field4DeepEqual(src []*TTableIndexQueryStats) bool { - - if len(p.TableVerbosStats) != len(src) { - return false +func (p *FrontendServiceClient) DescribeTable(ctx context.Context, params *TDescribeTableParams) (r *TDescribeTableResult_, err error) { + var _args FrontendServiceDescribeTableArgs + _args.Params = params + var _result FrontendServiceDescribeTableResult + if err = p.Client_().Call(ctx, "describeTable", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) DescribeTables(ctx context.Context, params *TDescribeTablesParams) (r *TDescribeTablesResult_, err error) { + var _args FrontendServiceDescribeTablesArgs + _args.Params = params + var _result FrontendServiceDescribeTablesResult + if err = p.Client_().Call(ctx, "describeTables", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) ShowVariables(ctx context.Context, params *TShowVariableRequest) (r *TShowVariableResult_, err error) { + var _args FrontendServiceShowVariablesArgs + _args.Params = params + var _result FrontendServiceShowVariablesResult + if err = p.Client_().Call(ctx, "showVariables", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) ReportExecStatus(ctx context.Context, params *TReportExecStatusParams) (r *TReportExecStatusResult_, err error) { + var _args FrontendServiceReportExecStatusArgs + _args.Params = params + var _result FrontendServiceReportExecStatusResult + if err = p.Client_().Call(ctx, "reportExecStatus", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) FinishTask(ctx context.Context, request *masterservice.TFinishTaskRequest) (r *masterservice.TMasterResult_, err error) { + var _args FrontendServiceFinishTaskArgs + _args.Request = request + var _result FrontendServiceFinishTaskResult + if err = p.Client_().Call(ctx, "finishTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) Report(ctx context.Context, request *masterservice.TReportRequest) (r *masterservice.TMasterResult_, err error) { + var _args FrontendServiceReportArgs + _args.Request = request + var _result FrontendServiceReportResult + if err = p.Client_().Call(ctx, "report", &_args, &_result); err != nil { + return } - for i, v := range p.TableVerbosStats { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) FetchResource(ctx context.Context) (r *masterservice.TFetchResourceResult_, err error) { + var _args FrontendServiceFetchResourceArgs + var _result FrontendServiceFetchResourceResult + if err = p.Client_().Call(ctx, "fetchResource", &_args, &_result); err != nil { + return } - return true + return _result.GetSuccess(), nil } -func (p *TQueryStatsResult_) Field5DeepEqual(src map[int64]int64) bool { - - if len(p.TabletStats) != len(src) { - return false +func (p *FrontendServiceClient) Forward(ctx context.Context, params *TMasterOpRequest) (r *TMasterOpResult_, err error) { + var _args FrontendServiceForwardArgs + _args.Params = params + var _result FrontendServiceForwardResult + if err = p.Client_().Call(ctx, "forward", &_args, &_result); err != nil { + return } - for k, v := range p.TabletStats { - _src := src[k] - if v != _src { - return false - } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) ListTableStatus(ctx context.Context, params *TGetTablesParams) (r *TListTableStatusResult_, err error) { + var _args FrontendServiceListTableStatusArgs + _args.Params = params + var _result FrontendServiceListTableStatusResult + if err = p.Client_().Call(ctx, "listTableStatus", &_args, &_result); err != nil { + return } - return true + return _result.GetSuccess(), nil } - -type TGetBinlogRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` - TableId *int64 `thrift:"table_id,6,optional" frugal:"6,optional,i64" json:"table_id,omitempty"` - UserIp *string `thrift:"user_ip,7,optional" frugal:"7,optional,string" json:"user_ip,omitempty"` - Token *string `thrift:"token,8,optional" frugal:"8,optional,string" json:"token,omitempty"` - PrevCommitSeq *int64 `thrift:"prev_commit_seq,9,optional" frugal:"9,optional,i64" json:"prev_commit_seq,omitempty"` +func (p *FrontendServiceClient) ListTableMetadataNameIds(ctx context.Context, params *TGetTablesParams) (r *TListTableMetadataNameIdsResult_, err error) { + var _args FrontendServiceListTableMetadataNameIdsArgs + _args.Params = params + var _result FrontendServiceListTableMetadataNameIdsResult + if err = p.Client_().Call(ctx, "listTableMetadataNameIds", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func NewTGetBinlogRequest() *TGetBinlogRequest { - return &TGetBinlogRequest{} +func (p *FrontendServiceClient) ListTablePrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) { + var _args FrontendServiceListTablePrivilegeStatusArgs + _args.Params = params + var _result FrontendServiceListTablePrivilegeStatusResult + if err = p.Client_().Call(ctx, "listTablePrivilegeStatus", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) InitDefault() { - *p = TGetBinlogRequest{} +func (p *FrontendServiceClient) ListSchemaPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) { + var _args FrontendServiceListSchemaPrivilegeStatusArgs + _args.Params = params + var _result FrontendServiceListSchemaPrivilegeStatusResult + if err = p.Client_().Call(ctx, "listSchemaPrivilegeStatus", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_Cluster_DEFAULT string - -func (p *TGetBinlogRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TGetBinlogRequest_Cluster_DEFAULT +func (p *FrontendServiceClient) ListUserPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) { + var _args FrontendServiceListUserPrivilegeStatusArgs + _args.Params = params + var _result FrontendServiceListUserPrivilegeStatusResult + if err = p.Client_().Call(ctx, "listUserPrivilegeStatus", &_args, &_result); err != nil { + return } - return *p.Cluster + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_User_DEFAULT string - -func (p *TGetBinlogRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TGetBinlogRequest_User_DEFAULT +func (p *FrontendServiceClient) UpdateExportTaskStatus(ctx context.Context, request *TUpdateExportTaskStatusRequest) (r *TFeResult_, err error) { + var _args FrontendServiceUpdateExportTaskStatusArgs + _args.Request = request + var _result FrontendServiceUpdateExportTaskStatusResult + if err = p.Client_().Call(ctx, "updateExportTaskStatus", &_args, &_result); err != nil { + return } - return *p.User + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_Passwd_DEFAULT string - -func (p *TGetBinlogRequest) GetPasswd() (v string) { - if !p.IsSetPasswd() { - return TGetBinlogRequest_Passwd_DEFAULT +func (p *FrontendServiceClient) LoadTxnBegin(ctx context.Context, request *TLoadTxnBeginRequest) (r *TLoadTxnBeginResult_, err error) { + var _args FrontendServiceLoadTxnBeginArgs + _args.Request = request + var _result FrontendServiceLoadTxnBeginResult + if err = p.Client_().Call(ctx, "loadTxnBegin", &_args, &_result); err != nil { + return } - return *p.Passwd + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_Db_DEFAULT string - -func (p *TGetBinlogRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TGetBinlogRequest_Db_DEFAULT +func (p *FrontendServiceClient) LoadTxnPreCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) { + var _args FrontendServiceLoadTxnPreCommitArgs + _args.Request = request + var _result FrontendServiceLoadTxnPreCommitResult + if err = p.Client_().Call(ctx, "loadTxnPreCommit", &_args, &_result); err != nil { + return } - return *p.Db + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_Table_DEFAULT string - -func (p *TGetBinlogRequest) GetTable() (v string) { - if !p.IsSetTable() { - return TGetBinlogRequest_Table_DEFAULT +func (p *FrontendServiceClient) LoadTxn2PC(ctx context.Context, request *TLoadTxn2PCRequest) (r *TLoadTxn2PCResult_, err error) { + var _args FrontendServiceLoadTxn2PCArgs + _args.Request = request + var _result FrontendServiceLoadTxn2PCResult + if err = p.Client_().Call(ctx, "loadTxn2PC", &_args, &_result); err != nil { + return } - return *p.Table + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_TableId_DEFAULT int64 - -func (p *TGetBinlogRequest) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TGetBinlogRequest_TableId_DEFAULT +func (p *FrontendServiceClient) LoadTxnCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) { + var _args FrontendServiceLoadTxnCommitArgs + _args.Request = request + var _result FrontendServiceLoadTxnCommitResult + if err = p.Client_().Call(ctx, "loadTxnCommit", &_args, &_result); err != nil { + return } - return *p.TableId + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_UserIp_DEFAULT string - -func (p *TGetBinlogRequest) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TGetBinlogRequest_UserIp_DEFAULT +func (p *FrontendServiceClient) LoadTxnRollback(ctx context.Context, request *TLoadTxnRollbackRequest) (r *TLoadTxnRollbackResult_, err error) { + var _args FrontendServiceLoadTxnRollbackArgs + _args.Request = request + var _result FrontendServiceLoadTxnRollbackResult + if err = p.Client_().Call(ctx, "loadTxnRollback", &_args, &_result); err != nil { + return } - return *p.UserIp + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_Token_DEFAULT string - -func (p *TGetBinlogRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TGetBinlogRequest_Token_DEFAULT +func (p *FrontendServiceClient) BeginTxn(ctx context.Context, request *TBeginTxnRequest) (r *TBeginTxnResult_, err error) { + var _args FrontendServiceBeginTxnArgs + _args.Request = request + var _result FrontendServiceBeginTxnResult + if err = p.Client_().Call(ctx, "beginTxn", &_args, &_result); err != nil { + return } - return *p.Token + return _result.GetSuccess(), nil } - -var TGetBinlogRequest_PrevCommitSeq_DEFAULT int64 - -func (p *TGetBinlogRequest) GetPrevCommitSeq() (v int64) { - if !p.IsSetPrevCommitSeq() { - return TGetBinlogRequest_PrevCommitSeq_DEFAULT +func (p *FrontendServiceClient) CommitTxn(ctx context.Context, request *TCommitTxnRequest) (r *TCommitTxnResult_, err error) { + var _args FrontendServiceCommitTxnArgs + _args.Request = request + var _result FrontendServiceCommitTxnResult + if err = p.Client_().Call(ctx, "commitTxn", &_args, &_result); err != nil { + return } - return *p.PrevCommitSeq + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetCluster(val *string) { - p.Cluster = val +func (p *FrontendServiceClient) RollbackTxn(ctx context.Context, request *TRollbackTxnRequest) (r *TRollbackTxnResult_, err error) { + var _args FrontendServiceRollbackTxnArgs + _args.Request = request + var _result FrontendServiceRollbackTxnResult + if err = p.Client_().Call(ctx, "rollbackTxn", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetUser(val *string) { - p.User = val +func (p *FrontendServiceClient) GetBinlog(ctx context.Context, request *TGetBinlogRequest) (r *TGetBinlogResult_, err error) { + var _args FrontendServiceGetBinlogArgs + _args.Request = request + var _result FrontendServiceGetBinlogResult + if err = p.Client_().Call(ctx, "getBinlog", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetPasswd(val *string) { - p.Passwd = val +func (p *FrontendServiceClient) GetSnapshot(ctx context.Context, request *TGetSnapshotRequest) (r *TGetSnapshotResult_, err error) { + var _args FrontendServiceGetSnapshotArgs + _args.Request = request + var _result FrontendServiceGetSnapshotResult + if err = p.Client_().Call(ctx, "getSnapshot", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetDb(val *string) { - p.Db = val +func (p *FrontendServiceClient) RestoreSnapshot(ctx context.Context, request *TRestoreSnapshotRequest) (r *TRestoreSnapshotResult_, err error) { + var _args FrontendServiceRestoreSnapshotArgs + _args.Request = request + var _result FrontendServiceRestoreSnapshotResult + if err = p.Client_().Call(ctx, "restoreSnapshot", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetTable(val *string) { - p.Table = val +func (p *FrontendServiceClient) WaitingTxnStatus(ctx context.Context, request *TWaitingTxnStatusRequest) (r *TWaitingTxnStatusResult_, err error) { + var _args FrontendServiceWaitingTxnStatusArgs + _args.Request = request + var _result FrontendServiceWaitingTxnStatusResult + if err = p.Client_().Call(ctx, "waitingTxnStatus", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetTableId(val *int64) { - p.TableId = val +func (p *FrontendServiceClient) StreamLoadPut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadPutResult_, err error) { + var _args FrontendServiceStreamLoadPutArgs + _args.Request = request + var _result FrontendServiceStreamLoadPutResult + if err = p.Client_().Call(ctx, "streamLoadPut", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetUserIp(val *string) { - p.UserIp = val +func (p *FrontendServiceClient) StreamLoadMultiTablePut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadMultiTablePutResult_, err error) { + var _args FrontendServiceStreamLoadMultiTablePutArgs + _args.Request = request + var _result FrontendServiceStreamLoadMultiTablePutResult + if err = p.Client_().Call(ctx, "streamLoadMultiTablePut", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetToken(val *string) { - p.Token = val +func (p *FrontendServiceClient) SnapshotLoaderReport(ctx context.Context, request *TSnapshotLoaderReportRequest) (r *status.TStatus, err error) { + var _args FrontendServiceSnapshotLoaderReportArgs + _args.Request = request + var _result FrontendServiceSnapshotLoaderReportResult + if err = p.Client_().Call(ctx, "snapshotLoaderReport", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) SetPrevCommitSeq(val *int64) { - p.PrevCommitSeq = val +func (p *FrontendServiceClient) Ping(ctx context.Context, request *TFrontendPingFrontendRequest) (r *TFrontendPingFrontendResult_, err error) { + var _args FrontendServicePingArgs + _args.Request = request + var _result FrontendServicePingResult + if err = p.Client_().Call(ctx, "ping", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -var fieldIDToName_TGetBinlogRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "table", - 6: "table_id", - 7: "user_ip", - 8: "token", - 9: "prev_commit_seq", +func (p *FrontendServiceClient) InitExternalCtlMeta(ctx context.Context, request *TInitExternalCtlMetaRequest) (r *TInitExternalCtlMetaResult_, err error) { + var _args FrontendServiceInitExternalCtlMetaArgs + _args.Request = request + var _result FrontendServiceInitExternalCtlMetaResult + if err = p.Client_().Call(ctx, "initExternalCtlMeta", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetCluster() bool { - return p.Cluster != nil +func (p *FrontendServiceClient) FetchSchemaTableData(ctx context.Context, request *TFetchSchemaTableDataRequest) (r *TFetchSchemaTableDataResult_, err error) { + var _args FrontendServiceFetchSchemaTableDataArgs + _args.Request = request + var _result FrontendServiceFetchSchemaTableDataResult + if err = p.Client_().Call(ctx, "fetchSchemaTableData", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetUser() bool { - return p.User != nil +func (p *FrontendServiceClient) AcquireToken(ctx context.Context) (r *TMySqlLoadAcquireTokenResult_, err error) { + var _args FrontendServiceAcquireTokenArgs + var _result FrontendServiceAcquireTokenResult + if err = p.Client_().Call(ctx, "acquireToken", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetPasswd() bool { - return p.Passwd != nil +func (p *FrontendServiceClient) CheckToken(ctx context.Context, token string) (r bool, err error) { + var _args FrontendServiceCheckTokenArgs + _args.Token = token + var _result FrontendServiceCheckTokenResult + if err = p.Client_().Call(ctx, "checkToken", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetDb() bool { - return p.Db != nil +func (p *FrontendServiceClient) ConfirmUnusedRemoteFiles(ctx context.Context, request *TConfirmUnusedRemoteFilesRequest) (r *TConfirmUnusedRemoteFilesResult_, err error) { + var _args FrontendServiceConfirmUnusedRemoteFilesArgs + _args.Request = request + var _result FrontendServiceConfirmUnusedRemoteFilesResult + if err = p.Client_().Call(ctx, "confirmUnusedRemoteFiles", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetTable() bool { - return p.Table != nil +func (p *FrontendServiceClient) CheckAuth(ctx context.Context, request *TCheckAuthRequest) (r *TCheckAuthResult_, err error) { + var _args FrontendServiceCheckAuthArgs + _args.Request = request + var _result FrontendServiceCheckAuthResult + if err = p.Client_().Call(ctx, "checkAuth", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetTableId() bool { - return p.TableId != nil +func (p *FrontendServiceClient) GetQueryStats(ctx context.Context, request *TGetQueryStatsRequest) (r *TQueryStatsResult_, err error) { + var _args FrontendServiceGetQueryStatsArgs + _args.Request = request + var _result FrontendServiceGetQueryStatsResult + if err = p.Client_().Call(ctx, "getQueryStats", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetUserIp() bool { - return p.UserIp != nil +func (p *FrontendServiceClient) GetTabletReplicaInfos(ctx context.Context, request *TGetTabletReplicaInfosRequest) (r *TGetTabletReplicaInfosResult_, err error) { + var _args FrontendServiceGetTabletReplicaInfosArgs + _args.Request = request + var _result FrontendServiceGetTabletReplicaInfosResult + if err = p.Client_().Call(ctx, "getTabletReplicaInfos", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetToken() bool { - return p.Token != nil +func (p *FrontendServiceClient) AddPlsqlStoredProcedure(ctx context.Context, request *TAddPlsqlStoredProcedureRequest) (r *TPlsqlStoredProcedureResult_, err error) { + var _args FrontendServiceAddPlsqlStoredProcedureArgs + _args.Request = request + var _result FrontendServiceAddPlsqlStoredProcedureResult + if err = p.Client_().Call(ctx, "addPlsqlStoredProcedure", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) IsSetPrevCommitSeq() bool { - return p.PrevCommitSeq != nil +func (p *FrontendServiceClient) DropPlsqlStoredProcedure(ctx context.Context, request *TDropPlsqlStoredProcedureRequest) (r *TPlsqlStoredProcedureResult_, err error) { + var _args FrontendServiceDropPlsqlStoredProcedureArgs + _args.Request = request + var _result FrontendServiceDropPlsqlStoredProcedureResult + if err = p.Client_().Call(ctx, "dropPlsqlStoredProcedure", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *FrontendServiceClient) AddPlsqlPackage(ctx context.Context, request *TAddPlsqlPackageRequest) (r *TPlsqlPackageResult_, err error) { + var _args FrontendServiceAddPlsqlPackageArgs + _args.Request = request + var _result FrontendServiceAddPlsqlPackageResult + if err = p.Client_().Call(ctx, "addPlsqlPackage", &_args, &_result); err != nil { + return } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) DropPlsqlPackage(ctx context.Context, request *TDropPlsqlPackageRequest) (r *TPlsqlPackageResult_, err error) { + var _args FrontendServiceDropPlsqlPackageArgs + _args.Request = request + var _result FrontendServiceDropPlsqlPackageResult + if err = p.Client_().Call(ctx, "dropPlsqlPackage", &_args, &_result); err != nil { + return } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) GetMasterToken(ctx context.Context, request *TGetMasterTokenRequest) (r *TGetMasterTokenResult_, err error) { + var _args FrontendServiceGetMasterTokenArgs + _args.Request = request + var _result FrontendServiceGetMasterTokenResult + if err = p.Client_().Call(ctx, "getMasterToken", &_args, &_result); err != nil { + return } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogRequest[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v +func (p *FrontendServiceClient) GetBinlogLag(ctx context.Context, request *TGetBinlogLagRequest) (r *TGetBinlogLagResult_, err error) { + var _args FrontendServiceGetBinlogLagArgs + _args.Request = request + var _result FrontendServiceGetBinlogLagResult + if err = p.Client_().Call(ctx, "getBinlogLag", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = &v +func (p *FrontendServiceClient) UpdateStatsCache(ctx context.Context, request *TUpdateFollowerStatsCacheRequest) (r *status.TStatus, err error) { + var _args FrontendServiceUpdateStatsCacheArgs + _args.Request = request + var _result FrontendServiceUpdateStatsCacheResult + if err = p.Client_().Call(ctx, "updateStatsCache", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = &v +func (p *FrontendServiceClient) GetAutoIncrementRange(ctx context.Context, request *TAutoIncrementRangeRequest) (r *TAutoIncrementRangeResult_, err error) { + var _args FrontendServiceGetAutoIncrementRangeArgs + _args.Request = request + var _result FrontendServiceGetAutoIncrementRangeResult + if err = p.Client_().Call(ctx, "getAutoIncrementRange", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v +func (p *FrontendServiceClient) CreatePartition(ctx context.Context, request *TCreatePartitionRequest) (r *TCreatePartitionResult_, err error) { + var _args FrontendServiceCreatePartitionArgs + _args.Request = request + var _result FrontendServiceCreatePartitionResult + if err = p.Client_().Call(ctx, "createPartition", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Table = &v +func (p *FrontendServiceClient) ReplacePartition(ctx context.Context, request *TReplacePartitionRequest) (r *TReplacePartitionResult_, err error) { + var _args FrontendServiceReplacePartitionArgs + _args.Request = request + var _result FrontendServiceReplacePartitionResult + if err = p.Client_().Call(ctx, "replacePartition", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TableId = &v +func (p *FrontendServiceClient) GetMeta(ctx context.Context, request *TGetMetaRequest) (r *TGetMetaResult_, err error) { + var _args FrontendServiceGetMetaArgs + _args.Request = request + var _result FrontendServiceGetMetaResult + if err = p.Client_().Call(ctx, "getMeta", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v +func (p *FrontendServiceClient) GetBackendMeta(ctx context.Context, request *TGetBackendMetaRequest) (r *TGetBackendMetaResult_, err error) { + var _args FrontendServiceGetBackendMetaArgs + _args.Request = request + var _result FrontendServiceGetBackendMetaResult + if err = p.Client_().Call(ctx, "getBackendMeta", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v +func (p *FrontendServiceClient) GetColumnInfo(ctx context.Context, request *TGetColumnInfoRequest) (r *TGetColumnInfoResult_, err error) { + var _args FrontendServiceGetColumnInfoArgs + _args.Request = request + var _result FrontendServiceGetColumnInfoResult + if err = p.Client_().Call(ctx, "getColumnInfo", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.PrevCommitSeq = &v +func (p *FrontendServiceClient) InvalidateStatsCache(ctx context.Context, request *TInvalidateFollowerStatsCacheRequest) (r *status.TStatus, err error) { + var _args FrontendServiceInvalidateStatsCacheArgs + _args.Request = request + var _result FrontendServiceInvalidateStatsCacheResult + if err = p.Client_().Call(ctx, "invalidateStatsCache", &_args, &_result); err != nil { + return } - return nil + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TGetBinlogRequest"); err != nil { - goto WriteStructBeginError +func (p *FrontendServiceClient) ShowProcessList(ctx context.Context, request *TShowProcessListRequest) (r *TShowProcessListResult_, err error) { + var _args FrontendServiceShowProcessListArgs + _args.Request = request + var _result FrontendServiceShowProcessListResult + if err = p.Client_().Call(ctx, "showProcessList", &_args, &_result); err != nil { + return } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) ReportCommitTxnResult_(ctx context.Context, request *TReportCommitTxnResultRequest) (r *status.TStatus, err error) { + var _args FrontendServiceReportCommitTxnResultArgs + _args.Request = request + var _result FrontendServiceReportCommitTxnResultResult + if err = p.Client_().Call(ctx, "reportCommitTxnResult", &_args, &_result); err != nil { + return } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) ShowUser(ctx context.Context, request *TShowUserRequest) (r *TShowUserResult_, err error) { + var _args FrontendServiceShowUserArgs + _args.Request = request + var _result FrontendServiceShowUserResult + if err = p.Client_().Call(ctx, "showUser", &_args, &_result); err != nil { + return } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + return _result.GetSuccess(), nil +} +func (p *FrontendServiceClient) SyncQueryColumns(ctx context.Context, request *TSyncQueryColumns) (r *status.TStatus, err error) { + var _args FrontendServiceSyncQueryColumnsArgs + _args.Request = request + var _result FrontendServiceSyncQueryColumnsResult + if err = p.Client_().Call(ctx, "syncQueryColumns", &_args, &_result); err != nil { + return } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Cluster); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceClient) FetchSplitBatch(ctx context.Context, request *TFetchSplitBatchRequest) (r *TFetchSplitBatchResult_, err error) { + var _args FrontendServiceFetchSplitBatchArgs + _args.Request = request + var _result FrontendServiceFetchSplitBatchResult + if err = p.Client_().Call(ctx, "fetchSplitBatch", &_args, &_result); err != nil { + return } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceClient) UpdatePartitionStatsCache(ctx context.Context, request *TUpdateFollowerPartitionStatsCacheRequest) (r *status.TStatus, err error) { + var _args FrontendServiceUpdatePartitionStatsCacheArgs + _args.Request = request + var _result FrontendServiceUpdatePartitionStatsCacheResult + if err = p.Client_().Call(ctx, "updatePartitionStatsCache", &_args, &_result); err != nil { + return } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return _result.GetSuccess(), nil } - -func (p *TGetBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPasswd() { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceClient) FetchRunningQueries(ctx context.Context, request *TFetchRunningQueriesRequest) (r *TFetchRunningQueriesResult_, err error) { + var _args FrontendServiceFetchRunningQueriesArgs + _args.Request = request + var _result FrontendServiceFetchRunningQueriesResult + if err = p.Client_().Call(ctx, "fetchRunningQueries", &_args, &_result); err != nil { + return } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return _result.GetSuccess(), nil } -func (p *TGetBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +type FrontendServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler FrontendService } -func (p *TGetBinlogRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTable() { - if err = oprot.WriteFieldBegin("table", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Table); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +func (p *FrontendServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor } -func (p *TGetBinlogRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("table_id", thrift.I64, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TableId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +func (p *FrontendServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok } -func (p *TGetBinlogRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +func (p *FrontendServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap } -func (p *TGetBinlogRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func NewFrontendServiceProcessor(handler FrontendService) *FrontendServiceProcessor { + self := &FrontendServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self.AddToProcessorMap("getDbNames", &frontendServiceProcessorGetDbNames{handler: handler}) + self.AddToProcessorMap("getTableNames", &frontendServiceProcessorGetTableNames{handler: handler}) + self.AddToProcessorMap("describeTable", &frontendServiceProcessorDescribeTable{handler: handler}) + self.AddToProcessorMap("describeTables", &frontendServiceProcessorDescribeTables{handler: handler}) + self.AddToProcessorMap("showVariables", &frontendServiceProcessorShowVariables{handler: handler}) + self.AddToProcessorMap("reportExecStatus", &frontendServiceProcessorReportExecStatus{handler: handler}) + self.AddToProcessorMap("finishTask", &frontendServiceProcessorFinishTask{handler: handler}) + self.AddToProcessorMap("report", &frontendServiceProcessorReport{handler: handler}) + self.AddToProcessorMap("fetchResource", &frontendServiceProcessorFetchResource{handler: handler}) + self.AddToProcessorMap("forward", &frontendServiceProcessorForward{handler: handler}) + self.AddToProcessorMap("listTableStatus", &frontendServiceProcessorListTableStatus{handler: handler}) + self.AddToProcessorMap("listTableMetadataNameIds", &frontendServiceProcessorListTableMetadataNameIds{handler: handler}) + self.AddToProcessorMap("listTablePrivilegeStatus", &frontendServiceProcessorListTablePrivilegeStatus{handler: handler}) + self.AddToProcessorMap("listSchemaPrivilegeStatus", &frontendServiceProcessorListSchemaPrivilegeStatus{handler: handler}) + self.AddToProcessorMap("listUserPrivilegeStatus", &frontendServiceProcessorListUserPrivilegeStatus{handler: handler}) + self.AddToProcessorMap("updateExportTaskStatus", &frontendServiceProcessorUpdateExportTaskStatus{handler: handler}) + self.AddToProcessorMap("loadTxnBegin", &frontendServiceProcessorLoadTxnBegin{handler: handler}) + self.AddToProcessorMap("loadTxnPreCommit", &frontendServiceProcessorLoadTxnPreCommit{handler: handler}) + self.AddToProcessorMap("loadTxn2PC", &frontendServiceProcessorLoadTxn2PC{handler: handler}) + self.AddToProcessorMap("loadTxnCommit", &frontendServiceProcessorLoadTxnCommit{handler: handler}) + self.AddToProcessorMap("loadTxnRollback", &frontendServiceProcessorLoadTxnRollback{handler: handler}) + self.AddToProcessorMap("beginTxn", &frontendServiceProcessorBeginTxn{handler: handler}) + self.AddToProcessorMap("commitTxn", &frontendServiceProcessorCommitTxn{handler: handler}) + self.AddToProcessorMap("rollbackTxn", &frontendServiceProcessorRollbackTxn{handler: handler}) + self.AddToProcessorMap("getBinlog", &frontendServiceProcessorGetBinlog{handler: handler}) + self.AddToProcessorMap("getSnapshot", &frontendServiceProcessorGetSnapshot{handler: handler}) + self.AddToProcessorMap("restoreSnapshot", &frontendServiceProcessorRestoreSnapshot{handler: handler}) + self.AddToProcessorMap("waitingTxnStatus", &frontendServiceProcessorWaitingTxnStatus{handler: handler}) + self.AddToProcessorMap("streamLoadPut", &frontendServiceProcessorStreamLoadPut{handler: handler}) + self.AddToProcessorMap("streamLoadMultiTablePut", &frontendServiceProcessorStreamLoadMultiTablePut{handler: handler}) + self.AddToProcessorMap("snapshotLoaderReport", &frontendServiceProcessorSnapshotLoaderReport{handler: handler}) + self.AddToProcessorMap("ping", &frontendServiceProcessorPing{handler: handler}) + self.AddToProcessorMap("initExternalCtlMeta", &frontendServiceProcessorInitExternalCtlMeta{handler: handler}) + self.AddToProcessorMap("fetchSchemaTableData", &frontendServiceProcessorFetchSchemaTableData{handler: handler}) + self.AddToProcessorMap("acquireToken", &frontendServiceProcessorAcquireToken{handler: handler}) + self.AddToProcessorMap("checkToken", &frontendServiceProcessorCheckToken{handler: handler}) + self.AddToProcessorMap("confirmUnusedRemoteFiles", &frontendServiceProcessorConfirmUnusedRemoteFiles{handler: handler}) + self.AddToProcessorMap("checkAuth", &frontendServiceProcessorCheckAuth{handler: handler}) + self.AddToProcessorMap("getQueryStats", &frontendServiceProcessorGetQueryStats{handler: handler}) + self.AddToProcessorMap("getTabletReplicaInfos", &frontendServiceProcessorGetTabletReplicaInfos{handler: handler}) + self.AddToProcessorMap("addPlsqlStoredProcedure", &frontendServiceProcessorAddPlsqlStoredProcedure{handler: handler}) + self.AddToProcessorMap("dropPlsqlStoredProcedure", &frontendServiceProcessorDropPlsqlStoredProcedure{handler: handler}) + self.AddToProcessorMap("addPlsqlPackage", &frontendServiceProcessorAddPlsqlPackage{handler: handler}) + self.AddToProcessorMap("dropPlsqlPackage", &frontendServiceProcessorDropPlsqlPackage{handler: handler}) + self.AddToProcessorMap("getMasterToken", &frontendServiceProcessorGetMasterToken{handler: handler}) + self.AddToProcessorMap("getBinlogLag", &frontendServiceProcessorGetBinlogLag{handler: handler}) + self.AddToProcessorMap("updateStatsCache", &frontendServiceProcessorUpdateStatsCache{handler: handler}) + self.AddToProcessorMap("getAutoIncrementRange", &frontendServiceProcessorGetAutoIncrementRange{handler: handler}) + self.AddToProcessorMap("createPartition", &frontendServiceProcessorCreatePartition{handler: handler}) + self.AddToProcessorMap("replacePartition", &frontendServiceProcessorReplacePartition{handler: handler}) + self.AddToProcessorMap("getMeta", &frontendServiceProcessorGetMeta{handler: handler}) + self.AddToProcessorMap("getBackendMeta", &frontendServiceProcessorGetBackendMeta{handler: handler}) + self.AddToProcessorMap("getColumnInfo", &frontendServiceProcessorGetColumnInfo{handler: handler}) + self.AddToProcessorMap("invalidateStatsCache", &frontendServiceProcessorInvalidateStatsCache{handler: handler}) + self.AddToProcessorMap("showProcessList", &frontendServiceProcessorShowProcessList{handler: handler}) + self.AddToProcessorMap("reportCommitTxnResult", &frontendServiceProcessorReportCommitTxnResult_{handler: handler}) + self.AddToProcessorMap("showUser", &frontendServiceProcessorShowUser{handler: handler}) + self.AddToProcessorMap("syncQueryColumns", &frontendServiceProcessorSyncQueryColumns{handler: handler}) + self.AddToProcessorMap("fetchSplitBatch", &frontendServiceProcessorFetchSplitBatch{handler: handler}) + self.AddToProcessorMap("updatePartitionStatsCache", &frontendServiceProcessorUpdatePartitionStatsCache{handler: handler}) + self.AddToProcessorMap("fetchRunningQueries", &frontendServiceProcessorFetchRunningQueries{handler: handler}) + return self +} +func (p *FrontendServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x } -func (p *TGetBinlogRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetPrevCommitSeq() { - if err = oprot.WriteFieldBegin("prev_commit_seq", thrift.I64, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.PrevCommitSeq); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +type frontendServiceProcessorGetDbNames struct { + handler FrontendService } -func (p *TGetBinlogRequest) String() string { - if p == nil { - return "" +func (p *frontendServiceProcessorGetDbNames) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetDbNamesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getDbNames", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return fmt.Sprintf("TGetBinlogRequest(%+v)", *p) -} -func (p *TGetBinlogRequest) DeepEqual(ano *TGetBinlogRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetDbNamesResult{} + var retval *TGetDbsResult_ + if retval, err2 = p.handler.GetDbNames(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getDbNames: "+err2.Error()) + oprot.WriteMessageBegin("getDbNames", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if !p.Field1DeepEqual(ano.Cluster) { - return false + if err2 = oprot.WriteMessageBegin("getDbNames", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field2DeepEqual(ano.User) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if !p.Field3DeepEqual(ano.Passwd) { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if !p.Field4DeepEqual(ano.Db) { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if !p.Field5DeepEqual(ano.Table) { - return false + if err != nil { + return + } + return true, err +} + +type frontendServiceProcessorGetTableNames struct { + handler FrontendService +} + +func (p *frontendServiceProcessorGetTableNames) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetTableNamesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getTableNames", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetTableNamesResult{} + var retval *TGetTablesResult_ + if retval, err2 = p.handler.GetTableNames(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getTableNames: "+err2.Error()) + oprot.WriteMessageBegin("getTableNames", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("getTableNames", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field6DeepEqual(ano.TableId) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if !p.Field7DeepEqual(ano.UserIp) { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if !p.Field8DeepEqual(ano.Token) { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if !p.Field9DeepEqual(ano.PrevCommitSeq) { - return false + if err != nil { + return } - return true + return true, err } -func (p *TGetBinlogRequest) Field1DeepEqual(src *string) bool { +type frontendServiceProcessorDescribeTable struct { + handler FrontendService +} - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { - return false +func (p *frontendServiceProcessorDescribeTable) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceDescribeTableArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("describeTable", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetBinlogRequest) Field2DeepEqual(src *string) bool { - if p.User == src { - return true - } else if p.User == nil || src == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceDescribeTableResult{} + var retval *TDescribeTableResult_ + if retval, err2 = p.handler.DescribeTable(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing describeTable: "+err2.Error()) + oprot.WriteMessageBegin("describeTable", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if strings.Compare(*p.User, *src) != 0 { - return false + if err2 = oprot.WriteMessageBegin("describeTable", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TGetBinlogRequest) Field3DeepEqual(src *string) bool { - - if p.Passwd == src { - return true - } else if p.Passwd == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.Passwd, *src) != 0 { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} -func (p *TGetBinlogRequest) Field4DeepEqual(src *string) bool { - - if p.Db == src { - return true - } else if p.Db == nil || src == nil { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.Db, *src) != 0 { - return false + if err != nil { + return } - return true + return true, err } -func (p *TGetBinlogRequest) Field5DeepEqual(src *string) bool { - if p.Table == src { - return true - } else if p.Table == nil || src == nil { - return false - } - if strings.Compare(*p.Table, *src) != 0 { - return false - } - return true +type frontendServiceProcessorDescribeTables struct { + handler FrontendService } -func (p *TGetBinlogRequest) Field6DeepEqual(src *int64) bool { - if p.TableId == src { - return true - } else if p.TableId == nil || src == nil { - return false - } - if *p.TableId != *src { - return false +func (p *frontendServiceProcessorDescribeTables) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceDescribeTablesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("describeTables", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetBinlogRequest) Field7DeepEqual(src *string) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceDescribeTablesResult{} + var retval *TDescribeTablesResult_ + if retval, err2 = p.handler.DescribeTables(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing describeTables: "+err2.Error()) + oprot.WriteMessageBegin("describeTables", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if strings.Compare(*p.UserIp, *src) != 0 { - return false + if err2 = oprot.WriteMessageBegin("describeTables", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TGetBinlogRequest) Field8DeepEqual(src *string) bool { - - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.Token, *src) != 0 { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} -func (p *TGetBinlogRequest) Field9DeepEqual(src *int64) bool { - - if p.PrevCommitSeq == src { - return true - } else if p.PrevCommitSeq == nil || src == nil { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if *p.PrevCommitSeq != *src { - return false + if err != nil { + return } - return true -} - -type TBinlog struct { - CommitSeq *int64 `thrift:"commit_seq,1,optional" frugal:"1,optional,i64" json:"commit_seq,omitempty"` - Timestamp *int64 `thrift:"timestamp,2,optional" frugal:"2,optional,i64" json:"timestamp,omitempty"` - Type *TBinlogType `thrift:"type,3,optional" frugal:"3,optional,TBinlogType" json:"type,omitempty"` - DbId *int64 `thrift:"db_id,4,optional" frugal:"4,optional,i64" json:"db_id,omitempty"` - TableIds []int64 `thrift:"table_ids,5,optional" frugal:"5,optional,list" json:"table_ids,omitempty"` - Data *string `thrift:"data,6,optional" frugal:"6,optional,string" json:"data,omitempty"` - Belong *int64 `thrift:"belong,7,optional" frugal:"7,optional,i64" json:"belong,omitempty"` - TableRef *int64 `thrift:"table_ref,8,optional" frugal:"8,optional,i64" json:"table_ref,omitempty"` - RemoveEnableCache *bool `thrift:"remove_enable_cache,9,optional" frugal:"9,optional,bool" json:"remove_enable_cache,omitempty"` -} - -func NewTBinlog() *TBinlog { - return &TBinlog{} + return true, err } -func (p *TBinlog) InitDefault() { - *p = TBinlog{} +type frontendServiceProcessorShowVariables struct { + handler FrontendService } -var TBinlog_CommitSeq_DEFAULT int64 - -func (p *TBinlog) GetCommitSeq() (v int64) { - if !p.IsSetCommitSeq() { - return TBinlog_CommitSeq_DEFAULT +func (p *frontendServiceProcessorShowVariables) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceShowVariablesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("showVariables", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return *p.CommitSeq -} - -var TBinlog_Timestamp_DEFAULT int64 -func (p *TBinlog) GetTimestamp() (v int64) { - if !p.IsSetTimestamp() { - return TBinlog_Timestamp_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceShowVariablesResult{} + var retval *TShowVariableResult_ + if retval, err2 = p.handler.ShowVariables(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing showVariables: "+err2.Error()) + oprot.WriteMessageBegin("showVariables", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return *p.Timestamp -} - -var TBinlog_Type_DEFAULT TBinlogType - -func (p *TBinlog) GetType() (v TBinlogType) { - if !p.IsSetType() { - return TBinlog_Type_DEFAULT + if err2 = oprot.WriteMessageBegin("showVariables", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return *p.Type -} - -var TBinlog_DbId_DEFAULT int64 - -func (p *TBinlog) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TBinlog_DbId_DEFAULT + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return *p.DbId -} - -var TBinlog_TableIds_DEFAULT []int64 - -func (p *TBinlog) GetTableIds() (v []int64) { - if !p.IsSetTableIds() { - return TBinlog_TableIds_DEFAULT + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return p.TableIds + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var TBinlog_Data_DEFAULT string - -func (p *TBinlog) GetData() (v string) { - if !p.IsSetData() { - return TBinlog_Data_DEFAULT - } - return *p.Data +type frontendServiceProcessorReportExecStatus struct { + handler FrontendService } -var TBinlog_Belong_DEFAULT int64 +func (p *frontendServiceProcessorReportExecStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceReportExecStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("reportExecStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TBinlog) GetBelong() (v int64) { - if !p.IsSetBelong() { - return TBinlog_Belong_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceReportExecStatusResult{} + var retval *TReportExecStatusResult_ + if retval, err2 = p.handler.ReportExecStatus(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing reportExecStatus: "+err2.Error()) + oprot.WriteMessageBegin("reportExecStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return *p.Belong + if err2 = oprot.WriteMessageBegin("reportExecStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var TBinlog_TableRef_DEFAULT int64 - -func (p *TBinlog) GetTableRef() (v int64) { - if !p.IsSetTableRef() { - return TBinlog_TableRef_DEFAULT - } - return *p.TableRef +type frontendServiceProcessorFinishTask struct { + handler FrontendService } -var TBinlog_RemoveEnableCache_DEFAULT bool +func (p *frontendServiceProcessorFinishTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceFinishTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("finishTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TBinlog) GetRemoveEnableCache() (v bool) { - if !p.IsSetRemoveEnableCache() { - return TBinlog_RemoveEnableCache_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceFinishTaskResult{} + var retval *masterservice.TMasterResult_ + if retval, err2 = p.handler.FinishTask(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing finishTask: "+err2.Error()) + oprot.WriteMessageBegin("finishTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return *p.RemoveEnableCache -} -func (p *TBinlog) SetCommitSeq(val *int64) { - p.CommitSeq = val -} -func (p *TBinlog) SetTimestamp(val *int64) { - p.Timestamp = val -} -func (p *TBinlog) SetType(val *TBinlogType) { - p.Type = val -} -func (p *TBinlog) SetDbId(val *int64) { - p.DbId = val -} -func (p *TBinlog) SetTableIds(val []int64) { - p.TableIds = val -} -func (p *TBinlog) SetData(val *string) { - p.Data = val -} -func (p *TBinlog) SetBelong(val *int64) { - p.Belong = val -} -func (p *TBinlog) SetTableRef(val *int64) { - p.TableRef = val -} -func (p *TBinlog) SetRemoveEnableCache(val *bool) { - p.RemoveEnableCache = val + if err2 = oprot.WriteMessageBegin("finishTask", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var fieldIDToName_TBinlog = map[int16]string{ - 1: "commit_seq", - 2: "timestamp", - 3: "type", - 4: "db_id", - 5: "table_ids", - 6: "data", - 7: "belong", - 8: "table_ref", - 9: "remove_enable_cache", +type frontendServiceProcessorReport struct { + handler FrontendService } -func (p *TBinlog) IsSetCommitSeq() bool { - return p.CommitSeq != nil -} +func (p *frontendServiceProcessorReport) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceReportArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("report", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TBinlog) IsSetTimestamp() bool { - return p.Timestamp != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceReportResult{} + var retval *masterservice.TMasterResult_ + if retval, err2 = p.handler.Report(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing report: "+err2.Error()) + oprot.WriteMessageBegin("report", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("report", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) IsSetType() bool { - return p.Type != nil +type frontendServiceProcessorFetchResource struct { + handler FrontendService } -func (p *TBinlog) IsSetDbId() bool { - return p.DbId != nil -} +func (p *frontendServiceProcessorFetchResource) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceFetchResourceArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("fetchResource", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TBinlog) IsSetTableIds() bool { - return p.TableIds != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceFetchResourceResult{} + var retval *masterservice.TFetchResourceResult_ + if retval, err2 = p.handler.FetchResource(ctx); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchResource: "+err2.Error()) + oprot.WriteMessageBegin("fetchResource", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("fetchResource", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) IsSetData() bool { - return p.Data != nil +type frontendServiceProcessorForward struct { + handler FrontendService } -func (p *TBinlog) IsSetBelong() bool { - return p.Belong != nil -} +func (p *frontendServiceProcessorForward) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceForwardArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("forward", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TBinlog) IsSetTableRef() bool { - return p.TableRef != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceForwardResult{} + var retval *TMasterOpResult_ + if retval, err2 = p.handler.Forward(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing forward: "+err2.Error()) + oprot.WriteMessageBegin("forward", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("forward", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) IsSetRemoveEnableCache() bool { - return p.RemoveEnableCache != nil +type frontendServiceProcessorListTableStatus struct { + handler FrontendService } -func (p *TBinlog) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *frontendServiceProcessorListTableStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceListTableStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("listTableStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.LIST { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceListTableStatusResult{} + var retval *TListTableStatusResult_ + if retval, err2 = p.handler.ListTableStatus(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listTableStatus: "+err2.Error()) + oprot.WriteMessageBegin("listTableStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err2 = oprot.WriteMessageBegin("listTableStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlog[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.CommitSeq = &v - } - return nil +type frontendServiceProcessorListTableMetadataNameIds struct { + handler FrontendService } -func (p *TBinlog) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Timestamp = &v +func (p *frontendServiceProcessorListTableMetadataNameIds) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceListTableMetadataNameIdsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("listTableMetadataNameIds", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -} -func (p *TBinlog) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceListTableMetadataNameIdsResult{} + var retval *TListTableMetadataNameIdsResult_ + if retval, err2 = p.handler.ListTableMetadataNameIds(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listTableMetadataNameIds: "+err2.Error()) + oprot.WriteMessageBegin("listTableMetadataNameIds", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 } else { - tmp := TBinlogType(v) - p.Type = &tmp + result.Success = retval } - return nil + if err2 = oprot.WriteMessageBegin("listTableMetadataNameIds", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DbId = &v - } - return nil +type frontendServiceProcessorListTablePrivilegeStatus struct { + handler FrontendService } -func (p *TBinlog) ReadField5(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +func (p *frontendServiceProcessorListTablePrivilegeStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceListTablePrivilegeStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("listTablePrivilegeStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - p.TableIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _elem = v - } - p.TableIds = append(p.TableIds, _elem) + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceListTablePrivilegeStatusResult{} + var retval *TListPrivilegesResult_ + if retval, err2 = p.handler.ListTablePrivilegeStatus(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listTablePrivilegeStatus: "+err2.Error()) + oprot.WriteMessageBegin("listTablePrivilegeStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err := iprot.ReadListEnd(); err != nil { - return err + if err2 = oprot.WriteMessageBegin("listTablePrivilegeStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return nil + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Data = &v - } - return nil +type frontendServiceProcessorListSchemaPrivilegeStatus struct { + handler FrontendService } -func (p *TBinlog) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Belong = &v +func (p *frontendServiceProcessorListSchemaPrivilegeStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceListSchemaPrivilegeStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("listSchemaPrivilegeStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -} -func (p *TBinlog) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceListSchemaPrivilegeStatusResult{} + var retval *TListPrivilegesResult_ + if retval, err2 = p.handler.ListSchemaPrivilegeStatus(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listSchemaPrivilegeStatus: "+err2.Error()) + oprot.WriteMessageBegin("listSchemaPrivilegeStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 } else { - p.TableRef = &v + result.Success = retval } - return nil + if err2 = oprot.WriteMessageBegin("listSchemaPrivilegeStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.RemoveEnableCache = &v - } - return nil +type frontendServiceProcessorListUserPrivilegeStatus struct { + handler FrontendService } -func (p *TBinlog) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TBinlog"); err != nil { - goto WriteStructBeginError +func (p *frontendServiceProcessorListUserPrivilegeStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceListUserPrivilegeStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("listUserPrivilegeStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceListUserPrivilegeStatusResult{} + var retval *TListPrivilegesResult_ + if retval, err2 = p.handler.ListUserPrivilegeStatus(ctx, args.Params); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listUserPrivilegeStatus: "+err2.Error()) + oprot.WriteMessageBegin("listUserPrivilegeStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if err2 = oprot.WriteMessageBegin("listUserPrivilegeStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *TBinlog) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCommitSeq() { - if err = oprot.WriteFieldBegin("commit_seq", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.CommitSeq); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TBinlog) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err = oprot.WriteFieldBegin("timestamp", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Timestamp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + if err != nil { + return + } + return true, err } -func (p *TBinlog) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetType() { - if err = oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.Type)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +type frontendServiceProcessorUpdateExportTaskStatus struct { + handler FrontendService } -func (p *TBinlog) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *frontendServiceProcessorUpdateExportTaskStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceUpdateExportTaskStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("updateExportTaskStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} -func (p *TBinlog) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTableIds() { - if err = oprot.WriteFieldBegin("table_ids", thrift.LIST, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.I64, len(p.TableIds)); err != nil { - return err - } - for _, v := range p.TableIds { - if err := oprot.WriteI64(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceUpdateExportTaskStatusResult{} + var retval *TFeResult_ + if retval, err2 = p.handler.UpdateExportTaskStatus(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing updateExportTaskStatus: "+err2.Error()) + oprot.WriteMessageBegin("updateExportTaskStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + if err2 = oprot.WriteMessageBegin("updateExportTaskStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetData() { - if err = oprot.WriteFieldBegin("data", thrift.STRING, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Data); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +type frontendServiceProcessorLoadTxnBegin struct { + handler FrontendService } -func (p *TBinlog) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetBelong() { - if err = oprot.WriteFieldBegin("belong", thrift.I64, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Belong); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *frontendServiceProcessorLoadTxnBegin) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceLoadTxnBeginArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("loadTxnBegin", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} -func (p *TBinlog) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetTableRef() { - if err = oprot.WriteFieldBegin("table_ref", thrift.I64, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TableRef); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceLoadTxnBeginResult{} + var retval *TLoadTxnBeginResult_ + if retval, err2 = p.handler.LoadTxnBegin(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnBegin: "+err2.Error()) + oprot.WriteMessageBegin("loadTxnBegin", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + if err2 = oprot.WriteMessageBegin("loadTxnBegin", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetRemoveEnableCache() { - if err = oprot.WriteFieldBegin("remove_enable_cache", thrift.BOOL, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.RemoveEnableCache); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +type frontendServiceProcessorLoadTxnPreCommit struct { + handler FrontendService } -func (p *TBinlog) String() string { - if p == nil { - return "" +func (p *frontendServiceProcessorLoadTxnPreCommit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceLoadTxnPreCommitArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("loadTxnPreCommit", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return fmt.Sprintf("TBinlog(%+v)", *p) -} -func (p *TBinlog) DeepEqual(ano *TBinlog) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceLoadTxnPreCommitResult{} + var retval *TLoadTxnCommitResult_ + if retval, err2 = p.handler.LoadTxnPreCommit(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnPreCommit: "+err2.Error()) + oprot.WriteMessageBegin("loadTxnPreCommit", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if !p.Field1DeepEqual(ano.CommitSeq) { - return false + if err2 = oprot.WriteMessageBegin("loadTxnPreCommit", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field2DeepEqual(ano.Timestamp) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if !p.Field3DeepEqual(ano.Type) { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if !p.Field4DeepEqual(ano.DbId) { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if !p.Field5DeepEqual(ano.TableIds) { - return false + if err != nil { + return } - if !p.Field6DeepEqual(ano.Data) { - return false + return true, err +} + +type frontendServiceProcessorLoadTxn2PC struct { + handler FrontendService +} + +func (p *frontendServiceProcessorLoadTxn2PC) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceLoadTxn2PCArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("loadTxn2PC", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if !p.Field7DeepEqual(ano.Belong) { - return false + + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceLoadTxn2PCResult{} + var retval *TLoadTxn2PCResult_ + if retval, err2 = p.handler.LoadTxn2PC(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxn2PC: "+err2.Error()) + oprot.WriteMessageBegin("loadTxn2PC", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if !p.Field8DeepEqual(ano.TableRef) { - return false + if err2 = oprot.WriteMessageBegin("loadTxn2PC", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field9DeepEqual(ano.RemoveEnableCache) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return true + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) Field1DeepEqual(src *int64) bool { +type frontendServiceProcessorLoadTxnCommit struct { + handler FrontendService +} - if p.CommitSeq == src { - return true - } else if p.CommitSeq == nil || src == nil { - return false - } - if *p.CommitSeq != *src { - return false +func (p *frontendServiceProcessorLoadTxnCommit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceLoadTxnCommitArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("loadTxnCommit", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TBinlog) Field2DeepEqual(src *int64) bool { - if p.Timestamp == src { - return true - } else if p.Timestamp == nil || src == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceLoadTxnCommitResult{} + var retval *TLoadTxnCommitResult_ + if retval, err2 = p.handler.LoadTxnCommit(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnCommit: "+err2.Error()) + oprot.WriteMessageBegin("loadTxnCommit", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if *p.Timestamp != *src { - return false + if err2 = oprot.WriteMessageBegin("loadTxnCommit", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TBinlog) Field3DeepEqual(src *TBinlogType) bool { - - if p.Type == src { - return true - } else if p.Type == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if *p.Type != *src { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} -func (p *TBinlog) Field4DeepEqual(src *int64) bool { - - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if *p.DbId != *src { - return false + if err != nil { + return } - return true + return true, err } -func (p *TBinlog) Field5DeepEqual(src []int64) bool { - if len(p.TableIds) != len(src) { - return false - } - for i, v := range p.TableIds { - _src := src[i] - if v != _src { - return false - } - } - return true +type frontendServiceProcessorLoadTxnRollback struct { + handler FrontendService } -func (p *TBinlog) Field6DeepEqual(src *string) bool { - if p.Data == src { - return true - } else if p.Data == nil || src == nil { - return false - } - if strings.Compare(*p.Data, *src) != 0 { - return false +func (p *frontendServiceProcessorLoadTxnRollback) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceLoadTxnRollbackArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("loadTxnRollback", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TBinlog) Field7DeepEqual(src *int64) bool { - if p.Belong == src { - return true - } else if p.Belong == nil || src == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceLoadTxnRollbackResult{} + var retval *TLoadTxnRollbackResult_ + if retval, err2 = p.handler.LoadTxnRollback(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnRollback: "+err2.Error()) + oprot.WriteMessageBegin("loadTxnRollback", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if *p.Belong != *src { - return false + if err2 = oprot.WriteMessageBegin("loadTxnRollback", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TBinlog) Field8DeepEqual(src *int64) bool { - - if p.TableRef == src { - return true - } else if p.TableRef == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if *p.TableRef != *src { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TBinlog) Field9DeepEqual(src *bool) bool { - if p.RemoveEnableCache == src { - return true - } else if p.RemoveEnableCache == nil || src == nil { - return false +type frontendServiceProcessorBeginTxn struct { + handler FrontendService +} + +func (p *frontendServiceProcessorBeginTxn) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceBeginTxnArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("beginTxn", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceBeginTxnResult{} + var retval *TBeginTxnResult_ + if retval, err2 = p.handler.BeginTxn(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing beginTxn: "+err2.Error()) + oprot.WriteMessageBegin("beginTxn", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("beginTxn", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if *p.RemoveEnableCache != *src { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - return true -} - -type TGetBinlogResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - NextCommitSeq *int64 `thrift:"next_commit_seq,2,optional" frugal:"2,optional,i64" json:"next_commit_seq,omitempty"` - Binlogs []*TBinlog `thrift:"binlogs,3,optional" frugal:"3,optional,list" json:"binlogs,omitempty"` - FeVersion *string `thrift:"fe_version,4,optional" frugal:"4,optional,string" json:"fe_version,omitempty"` - FeMetaVersion *int64 `thrift:"fe_meta_version,5,optional" frugal:"5,optional,i64" json:"fe_meta_version,omitempty"` -} - -func NewTGetBinlogResult_() *TGetBinlogResult_ { - return &TGetBinlogResult_{} + if err != nil { + return + } + return true, err } -func (p *TGetBinlogResult_) InitDefault() { - *p = TGetBinlogResult_{} +type frontendServiceProcessorCommitTxn struct { + handler FrontendService } -var TGetBinlogResult__Status_DEFAULT *status.TStatus - -func (p *TGetBinlogResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TGetBinlogResult__Status_DEFAULT +func (p *frontendServiceProcessorCommitTxn) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceCommitTxnArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("commitTxn", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return p.Status -} - -var TGetBinlogResult__NextCommitSeq_DEFAULT int64 -func (p *TGetBinlogResult_) GetNextCommitSeq() (v int64) { - if !p.IsSetNextCommitSeq() { - return TGetBinlogResult__NextCommitSeq_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceCommitTxnResult{} + var retval *TCommitTxnResult_ + if retval, err2 = p.handler.CommitTxn(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing commitTxn: "+err2.Error()) + oprot.WriteMessageBegin("commitTxn", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return *p.NextCommitSeq -} - -var TGetBinlogResult__Binlogs_DEFAULT []*TBinlog - -func (p *TGetBinlogResult_) GetBinlogs() (v []*TBinlog) { - if !p.IsSetBinlogs() { - return TGetBinlogResult__Binlogs_DEFAULT + if err2 = oprot.WriteMessageBegin("commitTxn", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return p.Binlogs -} - -var TGetBinlogResult__FeVersion_DEFAULT string - -func (p *TGetBinlogResult_) GetFeVersion() (v string) { - if !p.IsSetFeVersion() { - return TGetBinlogResult__FeVersion_DEFAULT + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return *p.FeVersion -} - -var TGetBinlogResult__FeMetaVersion_DEFAULT int64 - -func (p *TGetBinlogResult_) GetFeMetaVersion() (v int64) { - if !p.IsSetFeMetaVersion() { - return TGetBinlogResult__FeMetaVersion_DEFAULT + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return *p.FeMetaVersion -} -func (p *TGetBinlogResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TGetBinlogResult_) SetNextCommitSeq(val *int64) { - p.NextCommitSeq = val -} -func (p *TGetBinlogResult_) SetBinlogs(val []*TBinlog) { - p.Binlogs = val -} -func (p *TGetBinlogResult_) SetFeVersion(val *string) { - p.FeVersion = val -} -func (p *TGetBinlogResult_) SetFeMetaVersion(val *int64) { - p.FeMetaVersion = val -} - -var fieldIDToName_TGetBinlogResult_ = map[int16]string{ - 1: "status", - 2: "next_commit_seq", - 3: "binlogs", - 4: "fe_version", - 5: "fe_meta_version", -} - -func (p *TGetBinlogResult_) IsSetStatus() bool { - return p.Status != nil -} - -func (p *TGetBinlogResult_) IsSetNextCommitSeq() bool { - return p.NextCommitSeq != nil -} - -func (p *TGetBinlogResult_) IsSetBinlogs() bool { - return p.Binlogs != nil -} - -func (p *TGetBinlogResult_) IsSetFeVersion() bool { - return p.FeVersion != nil + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetBinlogResult_) IsSetFeMetaVersion() bool { - return p.FeMetaVersion != nil +type frontendServiceProcessorRollbackTxn struct { + handler FrontendService } -func (p *TGetBinlogResult_) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *frontendServiceProcessorRollbackTxn) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceRollbackTxnArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("rollbackTxn", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceRollbackTxnResult{} + var retval *TRollbackTxnResult_ + if retval, err2 = p.handler.RollbackTxn(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing rollbackTxn: "+err2.Error()) + oprot.WriteMessageBegin("rollbackTxn", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err2 = oprot.WriteMessageBegin("rollbackTxn", thrift.REPLY, seqId); err2 != nil { + err = err2 } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +type frontendServiceProcessorGetBinlog struct { + handler FrontendService } -func (p *TGetBinlogResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err +func (p *frontendServiceProcessorGetBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetBinlogArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getBinlog", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -} -func (p *TGetBinlogResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetBinlogResult{} + var retval *TGetBinlogResult_ + if retval, err2 = p.handler.GetBinlog(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBinlog: "+err2.Error()) + oprot.WriteMessageBegin("getBinlog", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 } else { - p.NextCommitSeq = &v + result.Success = retval } - return nil -} - -func (p *TGetBinlogResult_) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err + if err2 = oprot.WriteMessageBegin("getBinlog", thrift.REPLY, seqId); err2 != nil { + err = err2 } - p.Binlogs = make([]*TBinlog, 0, size) - for i := 0; i < size; i++ { - _elem := NewTBinlog() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.Binlogs = append(p.Binlogs, _elem) + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if err := iprot.ReadListEnd(); err != nil { - return err + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return nil -} - -func (p *TGetBinlogResult_) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.FeVersion = &v + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - return nil + if err != nil { + return + } + return true, err } -func (p *TGetBinlogResult_) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.FeMetaVersion = &v - } - return nil +type frontendServiceProcessorGetSnapshot struct { + handler FrontendService } -func (p *TGetBinlogResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TGetBinlogResult"); err != nil { - goto WriteStructBeginError +func (p *frontendServiceProcessorGetSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetSnapshotArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getSnapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetSnapshotResult{} + var retval *TGetSnapshotResult_ + if retval, err2 = p.handler.GetSnapshot(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSnapshot: "+err2.Error()) + oprot.WriteMessageBegin("getSnapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if err2 = oprot.WriteMessageBegin("getSnapshot", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *TGetBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetBinlogResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetNextCommitSeq() { - if err = oprot.WriteFieldBegin("next_commit_seq", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.NextCommitSeq); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +type frontendServiceProcessorRestoreSnapshot struct { + handler FrontendService } -func (p *TGetBinlogResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetBinlogs() { - if err = oprot.WriteFieldBegin("binlogs", thrift.LIST, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Binlogs)); err != nil { - return err - } - for _, v := range p.Binlogs { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *frontendServiceProcessorRestoreSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceRestoreSnapshotArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("restoreSnapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} -func (p *TGetBinlogResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetFeVersion() { - if err = oprot.WriteFieldBegin("fe_version", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.FeVersion); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceRestoreSnapshotResult{} + var retval *TRestoreSnapshotResult_ + if retval, err2 = p.handler.RestoreSnapshot(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing restoreSnapshot: "+err2.Error()) + oprot.WriteMessageBegin("restoreSnapshot", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("restoreSnapshot", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + if err != nil { + return + } + return true, err } -func (p *TGetBinlogResult_) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetFeMetaVersion() { - if err = oprot.WriteFieldBegin("fe_meta_version", thrift.I64, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.FeMetaVersion); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +type frontendServiceProcessorWaitingTxnStatus struct { + handler FrontendService } -func (p *TGetBinlogResult_) String() string { - if p == nil { - return "" +func (p *frontendServiceProcessorWaitingTxnStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceWaitingTxnStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("waitingTxnStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return fmt.Sprintf("TGetBinlogResult_(%+v)", *p) -} -func (p *TGetBinlogResult_) DeepEqual(ano *TGetBinlogResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceWaitingTxnStatusResult{} + var retval *TWaitingTxnStatusResult_ + if retval, err2 = p.handler.WaitingTxnStatus(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing waitingTxnStatus: "+err2.Error()) + oprot.WriteMessageBegin("waitingTxnStatus", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if !p.Field1DeepEqual(ano.Status) { - return false + if err2 = oprot.WriteMessageBegin("waitingTxnStatus", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field2DeepEqual(ano.NextCommitSeq) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if !p.Field3DeepEqual(ano.Binlogs) { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if !p.Field4DeepEqual(ano.FeVersion) { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if !p.Field5DeepEqual(ano.FeMetaVersion) { - return false + if err != nil { + return } - return true + return true, err } -func (p *TGetBinlogResult_) Field1DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { - return false - } - return true +type frontendServiceProcessorStreamLoadPut struct { + handler FrontendService } -func (p *TGetBinlogResult_) Field2DeepEqual(src *int64) bool { - if p.NextCommitSeq == src { - return true - } else if p.NextCommitSeq == nil || src == nil { - return false - } - if *p.NextCommitSeq != *src { - return false +func (p *frontendServiceProcessorStreamLoadPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceStreamLoadPutArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("streamLoadPut", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetBinlogResult_) Field3DeepEqual(src []*TBinlog) bool { - if len(p.Binlogs) != len(src) { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceStreamLoadPutResult{} + var retval *TStreamLoadPutResult_ + if retval, err2 = p.handler.StreamLoadPut(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing streamLoadPut: "+err2.Error()) + oprot.WriteMessageBegin("streamLoadPut", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - for i, v := range p.Binlogs { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if err2 = oprot.WriteMessageBegin("streamLoadPut", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TGetBinlogResult_) Field4DeepEqual(src *string) bool { - - if p.FeVersion == src { - return true - } else if p.FeVersion == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.FeVersion, *src) != 0 { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} -func (p *TGetBinlogResult_) Field5DeepEqual(src *int64) bool { - - if p.FeMetaVersion == src { - return true - } else if p.FeMetaVersion == nil || src == nil { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if *p.FeMetaVersion != *src { - return false + if err != nil { + return } - return true -} - -type TGetTabletReplicaInfosRequest struct { - TabletIds []int64 `thrift:"tablet_ids,1,required" frugal:"1,required,list" json:"tablet_ids"` + return true, err } -func NewTGetTabletReplicaInfosRequest() *TGetTabletReplicaInfosRequest { - return &TGetTabletReplicaInfosRequest{} +type frontendServiceProcessorStreamLoadMultiTablePut struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosRequest) InitDefault() { - *p = TGetTabletReplicaInfosRequest{} -} +func (p *frontendServiceProcessorStreamLoadMultiTablePut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceStreamLoadMultiTablePutArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("streamLoadMultiTablePut", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetTabletReplicaInfosRequest) GetTabletIds() (v []int64) { - return p.TabletIds -} -func (p *TGetTabletReplicaInfosRequest) SetTabletIds(val []int64) { - p.TabletIds = val + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceStreamLoadMultiTablePutResult{} + var retval *TStreamLoadMultiTablePutResult_ + if retval, err2 = p.handler.StreamLoadMultiTablePut(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing streamLoadMultiTablePut: "+err2.Error()) + oprot.WriteMessageBegin("streamLoadMultiTablePut", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("streamLoadMultiTablePut", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var fieldIDToName_TGetTabletReplicaInfosRequest = map[int16]string{ - 1: "tablet_ids", +type frontendServiceProcessorSnapshotLoaderReport struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosRequest) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetTabletIds bool = false +func (p *frontendServiceProcessorSnapshotLoaderReport) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceSnapshotLoaderReportArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("snapshotLoaderReport", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceSnapshotLoaderReportResult{} + var retval *status.TStatus + if retval, err2 = p.handler.SnapshotLoaderReport(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing snapshotLoaderReport: "+err2.Error()) + oprot.WriteMessageBegin("snapshotLoaderReport", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("snapshotLoaderReport", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } +type frontendServiceProcessorPing struct { + handler FrontendService +} - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetTabletIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +func (p *frontendServiceProcessorPing) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServicePingArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ping", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServicePingResult{} + var retval *TFrontendPingFrontendResult_ + if retval, err2 = p.handler.Ping(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ping: "+err2.Error()) + oprot.WriteMessageBegin("ping", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err2 = oprot.WriteMessageBegin("ping", thrift.REPLY, seqId); err2 != nil { + err = err2 } - - if !issetTabletIds { - fieldId = 1 - goto RequiredFieldNotSetError + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosRequest[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTabletReplicaInfosRequest[fieldId])) +type frontendServiceProcessorInitExternalCtlMeta struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosRequest) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +func (p *frontendServiceProcessorInitExternalCtlMeta) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceInitExternalCtlMetaArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("initExternalCtlMeta", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - p.TabletIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _elem = v - } - p.TabletIds = append(p.TabletIds, _elem) + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceInitExternalCtlMetaResult{} + var retval *TInitExternalCtlMetaResult_ + if retval, err2 = p.handler.InitExternalCtlMeta(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing initExternalCtlMeta: "+err2.Error()) + oprot.WriteMessageBegin("initExternalCtlMeta", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err := iprot.ReadListEnd(); err != nil { - return err + if err2 = oprot.WriteMessageBegin("initExternalCtlMeta", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return nil + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetTabletReplicaInfosRequest) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TGetTabletReplicaInfosRequest"); err != nil { - goto WriteStructBeginError +type frontendServiceProcessorFetchSchemaTableData struct { + handler FrontendService +} + +func (p *frontendServiceProcessorFetchSchemaTableData) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceFetchSchemaTableDataArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("fetchSchemaTableData", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceFetchSchemaTableDataResult{} + var retval *TFetchSchemaTableDataResult_ + if retval, err2 = p.handler.FetchSchemaTableData(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchSchemaTableData: "+err2.Error()) + oprot.WriteMessageBegin("fetchSchemaTableData", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if err2 = oprot.WriteMessageBegin("fetchSchemaTableData", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetTabletReplicaInfosRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 1); err != nil { - goto WriteFieldBeginError +type frontendServiceProcessorAcquireToken struct { + handler FrontendService +} + +func (p *frontendServiceProcessorAcquireToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceAcquireTokenArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("acquireToken", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceAcquireTokenResult{} + var retval *TMySqlLoadAcquireTokenResult_ + if retval, err2 = p.handler.AcquireToken(ctx); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing acquireToken: "+err2.Error()) + oprot.WriteMessageBegin("acquireToken", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("acquireToken", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { - return err + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - for _, v := range p.TabletIds { - if err := oprot.WriteI64(v); err != nil { - return err - } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if err := oprot.WriteListEnd(); err != nil { - return err + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err != nil { + return } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return true, err } -func (p *TGetTabletReplicaInfosRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TGetTabletReplicaInfosRequest(%+v)", *p) +type frontendServiceProcessorCheckToken struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosRequest) DeepEqual(ano *TGetTabletReplicaInfosRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.TabletIds) { - return false +func (p *frontendServiceProcessorCheckToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceCheckTokenArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("checkToken", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetTabletReplicaInfosRequest) Field1DeepEqual(src []int64) bool { - - if len(p.TabletIds) != len(src) { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceCheckTokenResult{} + var retval bool + if retval, err2 = p.handler.CheckToken(ctx, args.Token); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing checkToken: "+err2.Error()) + oprot.WriteMessageBegin("checkToken", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = &retval } - for i, v := range p.TabletIds { - _src := src[i] - if v != _src { - return false - } + if err2 = oprot.WriteMessageBegin("checkToken", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} - -type TGetTabletReplicaInfosResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - TabletReplicaInfos map[int64][]*types.TReplicaInfo `thrift:"tablet_replica_infos,2,optional" frugal:"2,optional,map>" json:"tablet_replica_infos,omitempty"` - Token *string `thrift:"token,3,optional" frugal:"3,optional,string" json:"token,omitempty"` -} - -func NewTGetTabletReplicaInfosResult_() *TGetTabletReplicaInfosResult_ { - return &TGetTabletReplicaInfosResult_{} + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetTabletReplicaInfosResult_) InitDefault() { - *p = TGetTabletReplicaInfosResult_{} +type frontendServiceProcessorConfirmUnusedRemoteFiles struct { + handler FrontendService } -var TGetTabletReplicaInfosResult__Status_DEFAULT *status.TStatus - -func (p *TGetTabletReplicaInfosResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TGetTabletReplicaInfosResult__Status_DEFAULT +func (p *frontendServiceProcessorConfirmUnusedRemoteFiles) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceConfirmUnusedRemoteFilesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("confirmUnusedRemoteFiles", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return p.Status -} - -var TGetTabletReplicaInfosResult__TabletReplicaInfos_DEFAULT map[int64][]*types.TReplicaInfo -func (p *TGetTabletReplicaInfosResult_) GetTabletReplicaInfos() (v map[int64][]*types.TReplicaInfo) { - if !p.IsSetTabletReplicaInfos() { - return TGetTabletReplicaInfosResult__TabletReplicaInfos_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceConfirmUnusedRemoteFilesResult{} + var retval *TConfirmUnusedRemoteFilesResult_ + if retval, err2 = p.handler.ConfirmUnusedRemoteFiles(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing confirmUnusedRemoteFiles: "+err2.Error()) + oprot.WriteMessageBegin("confirmUnusedRemoteFiles", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return p.TabletReplicaInfos -} - -var TGetTabletReplicaInfosResult__Token_DEFAULT string - -func (p *TGetTabletReplicaInfosResult_) GetToken() (v string) { - if !p.IsSetToken() { - return TGetTabletReplicaInfosResult__Token_DEFAULT + if err2 = oprot.WriteMessageBegin("confirmUnusedRemoteFiles", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return *p.Token -} -func (p *TGetTabletReplicaInfosResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TGetTabletReplicaInfosResult_) SetTabletReplicaInfos(val map[int64][]*types.TReplicaInfo) { - p.TabletReplicaInfos = val -} -func (p *TGetTabletReplicaInfosResult_) SetToken(val *string) { - p.Token = val + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var fieldIDToName_TGetTabletReplicaInfosResult_ = map[int16]string{ - 1: "status", - 2: "tablet_replica_infos", - 3: "token", +type frontendServiceProcessorCheckAuth struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosResult_) IsSetStatus() bool { - return p.Status != nil -} +func (p *frontendServiceProcessorCheckAuth) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceCheckAuthArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("checkAuth", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetTabletReplicaInfosResult_) IsSetTabletReplicaInfos() bool { - return p.TabletReplicaInfos != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceCheckAuthResult{} + var retval *TCheckAuthResult_ + if retval, err2 = p.handler.CheckAuth(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing checkAuth: "+err2.Error()) + oprot.WriteMessageBegin("checkAuth", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("checkAuth", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetTabletReplicaInfosResult_) IsSetToken() bool { - return p.Token != nil +type frontendServiceProcessorGetQueryStats struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosResult_) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *frontendServiceProcessorGetQueryStats) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetQueryStatsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getQueryStats", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.MAP { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetQueryStatsResult{} + var retval *TQueryStatsResult_ + if retval, err2 = p.handler.GetQueryStats(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getQueryStats: "+err2.Error()) + oprot.WriteMessageBegin("getQueryStats", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err2 = oprot.WriteMessageBegin("getQueryStats", thrift.REPLY, seqId); err2 != nil { + err = err2 } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TGetTabletReplicaInfosResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -} - -func (p *TGetTabletReplicaInfosResult_) ReadField2(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - p.TabletReplicaInfos = make(map[int64][]*types.TReplicaInfo, size) - for i := 0; i < size; i++ { - var _key int64 - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _key = v - } - - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - _val := make([]*types.TReplicaInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTReplicaInfo() - if err := _elem.Read(iprot); err != nil { - return err - } - - _val = append(_val, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - - p.TabletReplicaInfos[_key] = _val + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if err := iprot.ReadMapEnd(); err != nil { - return err + if err != nil { + return } - return nil + return true, err } -func (p *TGetTabletReplicaInfosResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v - } - return nil +type frontendServiceProcessorGetTabletReplicaInfos struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TGetTabletReplicaInfosResult"); err != nil { - goto WriteStructBeginError +func (p *frontendServiceProcessorGetTabletReplicaInfos) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetTabletReplicaInfosArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getTabletReplicaInfos", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetTabletReplicaInfosResult{} + var retval *TGetTabletReplicaInfosResult_ + if retval, err2 = p.handler.GetTabletReplicaInfos(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getTabletReplicaInfos: "+err2.Error()) + oprot.WriteMessageBegin("getTabletReplicaInfos", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if err2 = oprot.WriteMessageBegin("getTabletReplicaInfos", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *TGetTabletReplicaInfosResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TGetTabletReplicaInfosResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTabletReplicaInfos() { - if err = oprot.WriteFieldBegin("tablet_replica_infos", thrift.MAP, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I64, thrift.LIST, len(p.TabletReplicaInfos)); err != nil { - return err - } - for k, v := range p.TabletReplicaInfos { - - if err := oprot.WriteI64(k); err != nil { - return err - } - - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return err - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + if err != nil { + return + } + return true, err } -func (p *TGetTabletReplicaInfosResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +type frontendServiceProcessorAddPlsqlStoredProcedure struct { + handler FrontendService } -func (p *TGetTabletReplicaInfosResult_) String() string { - if p == nil { - return "" +func (p *frontendServiceProcessorAddPlsqlStoredProcedure) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceAddPlsqlStoredProcedureArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("addPlsqlStoredProcedure", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return fmt.Sprintf("TGetTabletReplicaInfosResult_(%+v)", *p) -} -func (p *TGetTabletReplicaInfosResult_) DeepEqual(ano *TGetTabletReplicaInfosResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceAddPlsqlStoredProcedureResult{} + var retval *TPlsqlStoredProcedureResult_ + if retval, err2 = p.handler.AddPlsqlStoredProcedure(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing addPlsqlStoredProcedure: "+err2.Error()) + oprot.WriteMessageBegin("addPlsqlStoredProcedure", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if !p.Field1DeepEqual(ano.Status) { - return false + if err2 = oprot.WriteMessageBegin("addPlsqlStoredProcedure", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field2DeepEqual(ano.TabletReplicaInfos) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if !p.Field3DeepEqual(ano.Token) { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetTabletReplicaInfosResult_) Field1DeepEqual(src *status.TStatus) bool { +type frontendServiceProcessorDropPlsqlStoredProcedure struct { + handler FrontendService +} - if !p.Status.DeepEqual(src) { - return false +func (p *frontendServiceProcessorDropPlsqlStoredProcedure) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceDropPlsqlStoredProcedureArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("dropPlsqlStoredProcedure", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetTabletReplicaInfosResult_) Field2DeepEqual(src map[int64][]*types.TReplicaInfo) bool { - if len(p.TabletReplicaInfos) != len(src) { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceDropPlsqlStoredProcedureResult{} + var retval *TPlsqlStoredProcedureResult_ + if retval, err2 = p.handler.DropPlsqlStoredProcedure(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing dropPlsqlStoredProcedure: "+err2.Error()) + oprot.WriteMessageBegin("dropPlsqlStoredProcedure", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - for k, v := range p.TabletReplicaInfos { - _src := src[k] - if len(v) != len(_src) { - return false - } - for i, v := range v { - _src1 := _src[i] - if !v.DeepEqual(_src1) { - return false - } - } + if err2 = oprot.WriteMessageBegin("dropPlsqlStoredProcedure", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TGetTabletReplicaInfosResult_) Field3DeepEqual(src *string) bool { - - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.Token, *src) != 0 { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} - -type TGetSnapshotRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` - Token *string `thrift:"token,6,optional" frugal:"6,optional,string" json:"token,omitempty"` - LabelName *string `thrift:"label_name,7,optional" frugal:"7,optional,string" json:"label_name,omitempty"` - SnapshotName *string `thrift:"snapshot_name,8,optional" frugal:"8,optional,string" json:"snapshot_name,omitempty"` - SnapshotType *TSnapshotType `thrift:"snapshot_type,9,optional" frugal:"9,optional,TSnapshotType" json:"snapshot_type,omitempty"` -} - -func NewTGetSnapshotRequest() *TGetSnapshotRequest { - return &TGetSnapshotRequest{} + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) InitDefault() { - *p = TGetSnapshotRequest{} +type frontendServiceProcessorAddPlsqlPackage struct { + handler FrontendService } -var TGetSnapshotRequest_Cluster_DEFAULT string - -func (p *TGetSnapshotRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TGetSnapshotRequest_Cluster_DEFAULT +func (p *frontendServiceProcessorAddPlsqlPackage) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceAddPlsqlPackageArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("addPlsqlPackage", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return *p.Cluster -} - -var TGetSnapshotRequest_User_DEFAULT string -func (p *TGetSnapshotRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TGetSnapshotRequest_User_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceAddPlsqlPackageResult{} + var retval *TPlsqlPackageResult_ + if retval, err2 = p.handler.AddPlsqlPackage(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing addPlsqlPackage: "+err2.Error()) + oprot.WriteMessageBegin("addPlsqlPackage", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return *p.User -} - -var TGetSnapshotRequest_Passwd_DEFAULT string - -func (p *TGetSnapshotRequest) GetPasswd() (v string) { - if !p.IsSetPasswd() { - return TGetSnapshotRequest_Passwd_DEFAULT + if err2 = oprot.WriteMessageBegin("addPlsqlPackage", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return *p.Passwd -} - -var TGetSnapshotRequest_Db_DEFAULT string - -func (p *TGetSnapshotRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TGetSnapshotRequest_Db_DEFAULT + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return *p.Db -} - -var TGetSnapshotRequest_Table_DEFAULT string - -func (p *TGetSnapshotRequest) GetTable() (v string) { - if !p.IsSetTable() { - return TGetSnapshotRequest_Table_DEFAULT + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return *p.Table -} - -var TGetSnapshotRequest_Token_DEFAULT string - -func (p *TGetSnapshotRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TGetSnapshotRequest_Token_DEFAULT + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - return *p.Token -} - -var TGetSnapshotRequest_LabelName_DEFAULT string - -func (p *TGetSnapshotRequest) GetLabelName() (v string) { - if !p.IsSetLabelName() { - return TGetSnapshotRequest_LabelName_DEFAULT + if err != nil { + return } - return *p.LabelName + return true, err } -var TGetSnapshotRequest_SnapshotName_DEFAULT string - -func (p *TGetSnapshotRequest) GetSnapshotName() (v string) { - if !p.IsSetSnapshotName() { - return TGetSnapshotRequest_SnapshotName_DEFAULT - } - return *p.SnapshotName +type frontendServiceProcessorDropPlsqlPackage struct { + handler FrontendService } -var TGetSnapshotRequest_SnapshotType_DEFAULT TSnapshotType +func (p *frontendServiceProcessorDropPlsqlPackage) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceDropPlsqlPackageArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("dropPlsqlPackage", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetSnapshotRequest) GetSnapshotType() (v TSnapshotType) { - if !p.IsSetSnapshotType() { - return TGetSnapshotRequest_SnapshotType_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceDropPlsqlPackageResult{} + var retval *TPlsqlPackageResult_ + if retval, err2 = p.handler.DropPlsqlPackage(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing dropPlsqlPackage: "+err2.Error()) + oprot.WriteMessageBegin("dropPlsqlPackage", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return *p.SnapshotType -} -func (p *TGetSnapshotRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TGetSnapshotRequest) SetUser(val *string) { - p.User = val -} -func (p *TGetSnapshotRequest) SetPasswd(val *string) { - p.Passwd = val -} -func (p *TGetSnapshotRequest) SetDb(val *string) { - p.Db = val -} -func (p *TGetSnapshotRequest) SetTable(val *string) { - p.Table = val -} -func (p *TGetSnapshotRequest) SetToken(val *string) { - p.Token = val -} -func (p *TGetSnapshotRequest) SetLabelName(val *string) { - p.LabelName = val -} -func (p *TGetSnapshotRequest) SetSnapshotName(val *string) { - p.SnapshotName = val -} -func (p *TGetSnapshotRequest) SetSnapshotType(val *TSnapshotType) { - p.SnapshotType = val + if err2 = oprot.WriteMessageBegin("dropPlsqlPackage", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var fieldIDToName_TGetSnapshotRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "table", - 6: "token", - 7: "label_name", - 8: "snapshot_name", - 9: "snapshot_type", +type frontendServiceProcessorGetMasterToken struct { + handler FrontendService } -func (p *TGetSnapshotRequest) IsSetCluster() bool { - return p.Cluster != nil -} +func (p *frontendServiceProcessorGetMasterToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetMasterTokenArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getMasterToken", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetSnapshotRequest) IsSetUser() bool { - return p.User != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetMasterTokenResult{} + var retval *TGetMasterTokenResult_ + if retval, err2 = p.handler.GetMasterToken(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getMasterToken: "+err2.Error()) + oprot.WriteMessageBegin("getMasterToken", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("getMasterToken", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) IsSetPasswd() bool { - return p.Passwd != nil +type frontendServiceProcessorGetBinlogLag struct { + handler FrontendService } -func (p *TGetSnapshotRequest) IsSetDb() bool { - return p.Db != nil -} +func (p *frontendServiceProcessorGetBinlogLag) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetBinlogLagArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getBinlogLag", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetSnapshotRequest) IsSetTable() bool { - return p.Table != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetBinlogLagResult{} + var retval *TGetBinlogLagResult_ + if retval, err2 = p.handler.GetBinlogLag(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBinlogLag: "+err2.Error()) + oprot.WriteMessageBegin("getBinlogLag", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("getBinlogLag", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) IsSetToken() bool { - return p.Token != nil +type frontendServiceProcessorUpdateStatsCache struct { + handler FrontendService } -func (p *TGetSnapshotRequest) IsSetLabelName() bool { - return p.LabelName != nil -} +func (p *frontendServiceProcessorUpdateStatsCache) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceUpdateStatsCacheArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("updateStatsCache", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetSnapshotRequest) IsSetSnapshotName() bool { - return p.SnapshotName != nil + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceUpdateStatsCacheResult{} + var retval *status.TStatus + if retval, err2 = p.handler.UpdateStatsCache(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing updateStatsCache: "+err2.Error()) + oprot.WriteMessageBegin("updateStatsCache", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("updateStatsCache", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) IsSetSnapshotType() bool { - return p.SnapshotType != nil +type frontendServiceProcessorGetAutoIncrementRange struct { + handler FrontendService } -func (p *TGetSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I32 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +func (p *frontendServiceProcessorGetAutoIncrementRange) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetAutoIncrementRangeArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getAutoIncrementRange", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetAutoIncrementRangeResult{} + var retval *TAutoIncrementRangeResult_ + if retval, err2 = p.handler.GetAutoIncrementRange(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getAutoIncrementRange: "+err2.Error()) + oprot.WriteMessageBegin("getAutoIncrementRange", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err2 = oprot.WriteMessageBegin("getAutoIncrementRange", thrift.REPLY, seqId); err2 != nil { + err = err2 } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotRequest[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +type frontendServiceProcessorCreatePartition struct { + handler FrontendService } -func (p *TGetSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v +func (p *frontendServiceProcessorCreatePartition) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceCreatePartitionArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("createPartition", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -} -func (p *TGetSnapshotRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceCreatePartitionResult{} + var retval *TCreatePartitionResult_ + if retval, err2 = p.handler.CreatePartition(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing createPartition: "+err2.Error()) + oprot.WriteMessageBegin("createPartition", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 } else { - p.User = &v + result.Success = retval } - return nil + if err2 = oprot.WriteMessageBegin("createPartition", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = &v - } - return nil +type frontendServiceProcessorReplacePartition struct { + handler FrontendService } -func (p *TGetSnapshotRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v +func (p *frontendServiceProcessorReplacePartition) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceReplacePartitionArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("replacePartition", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -} -func (p *TGetSnapshotRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceReplacePartitionResult{} + var retval *TReplacePartitionResult_ + if retval, err2 = p.handler.ReplacePartition(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing replacePartition: "+err2.Error()) + oprot.WriteMessageBegin("replacePartition", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 } else { - p.Table = &v + result.Success = retval } - return nil + if err2 = oprot.WriteMessageBegin("replacePartition", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v - } - return nil +type frontendServiceProcessorGetMeta struct { + handler FrontendService } -func (p *TGetSnapshotRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.LabelName = &v +func (p *frontendServiceProcessorGetMeta) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetMetaArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getMeta", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -} -func (p *TGetSnapshotRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetMetaResult{} + var retval *TGetMetaResult_ + if retval, err2 = p.handler.GetMeta(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getMeta: "+err2.Error()) + oprot.WriteMessageBegin("getMeta", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 } else { - p.SnapshotName = &v + result.Success = retval } - return nil + if err2 = oprot.WriteMessageBegin("getMeta", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := TSnapshotType(v) - p.SnapshotType = &tmp - } - return nil +type frontendServiceProcessorGetBackendMeta struct { + handler FrontendService } -func (p *TGetSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TGetSnapshotRequest"); err != nil { - goto WriteStructBeginError +func (p *frontendServiceProcessorGetBackendMeta) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetBackendMetaArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getBackendMeta", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetBackendMetaResult{} + var retval *TGetBackendMetaResult_ + if retval, err2 = p.handler.GetBackendMeta(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBackendMeta: "+err2.Error()) + oprot.WriteMessageBegin("getBackendMeta", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if err2 = oprot.WriteMessageBegin("getBackendMeta", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Cluster); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +type frontendServiceProcessorGetColumnInfo struct { + handler FrontendService } -func (p *TGetSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *frontendServiceProcessorGetColumnInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceGetColumnInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("getColumnInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} -func (p *TGetSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPasswd() { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceGetColumnInfoResult{} + var retval *TGetColumnInfoResult_ + if retval, err2 = p.handler.GetColumnInfo(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getColumnInfo: "+err2.Error()) + oprot.WriteMessageBegin("getColumnInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + if err2 = oprot.WriteMessageBegin("getColumnInfo", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +type frontendServiceProcessorInvalidateStatsCache struct { + handler FrontendService } -func (p *TGetSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTable() { - if err = oprot.WriteFieldBegin("table", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Table); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *frontendServiceProcessorInvalidateStatsCache) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceInvalidateStatsCacheArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("invalidateStatsCache", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} -func (p *TGetSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceInvalidateStatsCacheResult{} + var retval *status.TStatus + if retval, err2 = p.handler.InvalidateStatsCache(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing invalidateStatsCache: "+err2.Error()) + oprot.WriteMessageBegin("invalidateStatsCache", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + if err2 = oprot.WriteMessageBegin("invalidateStatsCache", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetLabelName() { - if err = oprot.WriteFieldBegin("label_name", thrift.STRING, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.LabelName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +type frontendServiceProcessorShowProcessList struct { + handler FrontendService } -func (p *TGetSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetSnapshotName() { - if err = oprot.WriteFieldBegin("snapshot_name", thrift.STRING, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.SnapshotName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *frontendServiceProcessorShowProcessList) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceShowProcessListArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("showProcessList", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} -func (p *TGetSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetSnapshotType() { - if err = oprot.WriteFieldBegin("snapshot_type", thrift.I32, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.SnapshotType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceShowProcessListResult{} + var retval *TShowProcessListResult_ + if retval, err2 = p.handler.ShowProcessList(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing showProcessList: "+err2.Error()) + oprot.WriteMessageBegin("showProcessList", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("showProcessList", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TGetSnapshotRequest(%+v)", *p) +type frontendServiceProcessorReportCommitTxnResult_ struct { + handler FrontendService } -func (p *TGetSnapshotRequest) DeepEqual(ano *TGetSnapshotRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false +func (p *frontendServiceProcessorReportCommitTxnResult_) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceReportCommitTxnResultArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("reportCommitTxnResult", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - if !p.Field4DeepEqual(ano.Db) { - return false + + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceReportCommitTxnResultResult{} + var retval *status.TStatus + if retval, err2 = p.handler.ReportCommitTxnResult_(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing reportCommitTxnResult: "+err2.Error()) + oprot.WriteMessageBegin("reportCommitTxnResult", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if !p.Field5DeepEqual(ano.Table) { - return false + if err2 = oprot.WriteMessageBegin("reportCommitTxnResult", thrift.REPLY, seqId); err2 != nil { + err = err2 } - if !p.Field6DeepEqual(ano.Token) { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if !p.Field7DeepEqual(ano.LabelName) { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - if !p.Field8DeepEqual(ano.SnapshotName) { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if !p.Field9DeepEqual(ano.SnapshotType) { - return false + if err != nil { + return } - return true + return true, err } -func (p *TGetSnapshotRequest) Field1DeepEqual(src *string) bool { +type frontendServiceProcessorShowUser struct { + handler FrontendService +} - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { - return false +func (p *frontendServiceProcessorShowUser) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceShowUserArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("showUser", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetSnapshotRequest) Field2DeepEqual(src *string) bool { - if p.User == src { - return true - } else if p.User == nil || src == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceShowUserResult{} + var retval *TShowUserResult_ + if retval, err2 = p.handler.ShowUser(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing showUser: "+err2.Error()) + oprot.WriteMessageBegin("showUser", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if strings.Compare(*p.User, *src) != 0 { - return false + if err2 = oprot.WriteMessageBegin("showUser", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TGetSnapshotRequest) Field3DeepEqual(src *string) bool { - - if p.Passwd == src { - return true - } else if p.Passwd == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.Passwd, *src) != 0 { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} -func (p *TGetSnapshotRequest) Field4DeepEqual(src *string) bool { - - if p.Db == src { - return true - } else if p.Db == nil || src == nil { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.Db, *src) != 0 { - return false + if err != nil { + return } - return true + return true, err } -func (p *TGetSnapshotRequest) Field5DeepEqual(src *string) bool { - if p.Table == src { - return true - } else if p.Table == nil || src == nil { - return false - } - if strings.Compare(*p.Table, *src) != 0 { - return false - } - return true +type frontendServiceProcessorSyncQueryColumns struct { + handler FrontendService } -func (p *TGetSnapshotRequest) Field6DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false +func (p *frontendServiceProcessorSyncQueryColumns) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceSyncQueryColumnsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("syncQueryColumns", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err } - return true -} -func (p *TGetSnapshotRequest) Field7DeepEqual(src *string) bool { - if p.LabelName == src { - return true - } else if p.LabelName == nil || src == nil { - return false + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceSyncQueryColumnsResult{} + var retval *status.TStatus + if retval, err2 = p.handler.SyncQueryColumns(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing syncQueryColumns: "+err2.Error()) + oprot.WriteMessageBegin("syncQueryColumns", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - if strings.Compare(*p.LabelName, *src) != 0 { - return false + if err2 = oprot.WriteMessageBegin("syncQueryColumns", thrift.REPLY, seqId); err2 != nil { + err = err2 } - return true -} -func (p *TGetSnapshotRequest) Field8DeepEqual(src *string) bool { - - if p.SnapshotName == src { - return true - } else if p.SnapshotName == nil || src == nil { - return false + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 } - if strings.Compare(*p.SnapshotName, *src) != 0 { - return false + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 } - return true -} -func (p *TGetSnapshotRequest) Field9DeepEqual(src *TSnapshotType) bool { - - if p.SnapshotType == src { - return true - } else if p.SnapshotType == nil || src == nil { - return false + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 } - if *p.SnapshotType != *src { - return false + if err != nil { + return } - return true + return true, err } -type TGetSnapshotResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - Meta []byte `thrift:"meta,2,optional" frugal:"2,optional,binary" json:"meta,omitempty"` - JobInfo []byte `thrift:"job_info,3,optional" frugal:"3,optional,binary" json:"job_info,omitempty"` +type frontendServiceProcessorFetchSplitBatch struct { + handler FrontendService } -func NewTGetSnapshotResult_() *TGetSnapshotResult_ { - return &TGetSnapshotResult_{} +func (p *frontendServiceProcessorFetchSplitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceFetchSplitBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("fetchSplitBatch", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceFetchSplitBatchResult{} + var retval *TFetchSplitBatchResult_ + if retval, err2 = p.handler.FetchSplitBatch(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchSplitBatch: "+err2.Error()) + oprot.WriteMessageBegin("fetchSplitBatch", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("fetchSplitBatch", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotResult_) InitDefault() { - *p = TGetSnapshotResult_{} +type frontendServiceProcessorUpdatePartitionStatsCache struct { + handler FrontendService } -var TGetSnapshotResult__Status_DEFAULT *status.TStatus +func (p *frontendServiceProcessorUpdatePartitionStatsCache) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceUpdatePartitionStatsCacheArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("updatePartitionStatsCache", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetSnapshotResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TGetSnapshotResult__Status_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceUpdatePartitionStatsCacheResult{} + var retval *status.TStatus + if retval, err2 = p.handler.UpdatePartitionStatsCache(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing updatePartitionStatsCache: "+err2.Error()) + oprot.WriteMessageBegin("updatePartitionStatsCache", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return p.Status + if err2 = oprot.WriteMessageBegin("updatePartitionStatsCache", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -var TGetSnapshotResult__Meta_DEFAULT []byte - -func (p *TGetSnapshotResult_) GetMeta() (v []byte) { - if !p.IsSetMeta() { - return TGetSnapshotResult__Meta_DEFAULT - } - return p.Meta +type frontendServiceProcessorFetchRunningQueries struct { + handler FrontendService } -var TGetSnapshotResult__JobInfo_DEFAULT []byte +func (p *frontendServiceProcessorFetchRunningQueries) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := FrontendServiceFetchRunningQueriesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("fetchRunningQueries", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } -func (p *TGetSnapshotResult_) GetJobInfo() (v []byte) { - if !p.IsSetJobInfo() { - return TGetSnapshotResult__JobInfo_DEFAULT + iprot.ReadMessageEnd() + var err2 error + result := FrontendServiceFetchRunningQueriesResult{} + var retval *TFetchRunningQueriesResult_ + if retval, err2 = p.handler.FetchRunningQueries(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchRunningQueries: "+err2.Error()) + oprot.WriteMessageBegin("fetchRunningQueries", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval } - return p.JobInfo -} -func (p *TGetSnapshotResult_) SetStatus(val *status.TStatus) { - p.Status = val + if err2 = oprot.WriteMessageBegin("fetchRunningQueries", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err } -func (p *TGetSnapshotResult_) SetMeta(val []byte) { - p.Meta = val + +type FrontendServiceGetDbNamesArgs struct { + Params *TGetDbsParams `thrift:"params,1" frugal:"1,default,TGetDbsParams" json:"params"` } -func (p *TGetSnapshotResult_) SetJobInfo(val []byte) { - p.JobInfo = val + +func NewFrontendServiceGetDbNamesArgs() *FrontendServiceGetDbNamesArgs { + return &FrontendServiceGetDbNamesArgs{} } -var fieldIDToName_TGetSnapshotResult_ = map[int16]string{ - 1: "status", - 2: "meta", - 3: "job_info", +func (p *FrontendServiceGetDbNamesArgs) InitDefault() { } -func (p *TGetSnapshotResult_) IsSetStatus() bool { - return p.Status != nil +var FrontendServiceGetDbNamesArgs_Params_DEFAULT *TGetDbsParams + +func (p *FrontendServiceGetDbNamesArgs) GetParams() (v *TGetDbsParams) { + if !p.IsSetParams() { + return FrontendServiceGetDbNamesArgs_Params_DEFAULT + } + return p.Params +} +func (p *FrontendServiceGetDbNamesArgs) SetParams(val *TGetDbsParams) { + p.Params = val } -func (p *TGetSnapshotResult_) IsSetMeta() bool { - return p.Meta != nil +var fieldIDToName_FrontendServiceGetDbNamesArgs = map[int16]string{ + 1: "params", } -func (p *TGetSnapshotResult_) IsSetJobInfo() bool { - return p.JobInfo != nil +func (p *FrontendServiceGetDbNamesArgs) IsSetParams() bool { + return p.Params != nil } -func (p *TGetSnapshotResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetDbNamesArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -49002,37 +80379,14 @@ func (p *TGetSnapshotResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -49047,7 +80401,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -49057,35 +80411,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetSnapshotResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TGetSnapshotResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return err - } else { - p.Meta = []byte(v) - } - return nil -} - -func (p *TGetSnapshotResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { +func (p *FrontendServiceGetDbNamesArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetDbsParams() + if err := _field.Read(iprot); err != nil { return err - } else { - p.JobInfo = []byte(v) } + p.Params = _field return nil } -func (p *TGetSnapshotResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetDbNamesArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TGetSnapshotResult"); err != nil { + if err = oprot.WriteStructBegin("getDbNames_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -49093,15 +80430,6 @@ func (p *TGetSnapshotResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -49120,161 +80448,83 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TGetSnapshotResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceGetDbNamesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TGetSnapshotResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetMeta() { - if err = oprot.WriteFieldBegin("meta", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBinary([]byte(p.Meta)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err := p.Params.Write(oprot); err != nil { + return err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TGetSnapshotResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetJobInfo() { - if err = oprot.WriteFieldBegin("job_info", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBinary([]byte(p.JobInfo)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TGetSnapshotResult_) String() string { +func (p *FrontendServiceGetDbNamesArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TGetSnapshotResult_(%+v)", *p) -} + return fmt.Sprintf("FrontendServiceGetDbNamesArgs(%+v)", *p) -func (p *TGetSnapshotResult_) DeepEqual(ano *TGetSnapshotResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Meta) { - return false - } - if !p.Field3DeepEqual(ano.JobInfo) { - return false - } - return true } -func (p *TGetSnapshotResult_) Field1DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { +func (p *FrontendServiceGetDbNamesArgs) DeepEqual(ano *FrontendServiceGetDbNamesArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { return false } - return true -} -func (p *TGetSnapshotResult_) Field2DeepEqual(src []byte) bool { - - if bytes.Compare(p.Meta, src) != 0 { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *TGetSnapshotResult_) Field3DeepEqual(src []byte) bool { - if bytes.Compare(p.JobInfo, src) != 0 { +func (p *FrontendServiceGetDbNamesArgs) Field1DeepEqual(src *TGetDbsParams) bool { + + if !p.Params.DeepEqual(src) { return false } return true } -type TTableRef struct { - Table *string `thrift:"table,1,optional" frugal:"1,optional,string" json:"table,omitempty"` - AliasName *string `thrift:"alias_name,3,optional" frugal:"3,optional,string" json:"alias_name,omitempty"` -} - -func NewTTableRef() *TTableRef { - return &TTableRef{} +type FrontendServiceGetDbNamesResult struct { + Success *TGetDbsResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetDbsResult_" json:"success,omitempty"` } -func (p *TTableRef) InitDefault() { - *p = TTableRef{} +func NewFrontendServiceGetDbNamesResult() *FrontendServiceGetDbNamesResult { + return &FrontendServiceGetDbNamesResult{} } -var TTableRef_Table_DEFAULT string - -func (p *TTableRef) GetTable() (v string) { - if !p.IsSetTable() { - return TTableRef_Table_DEFAULT - } - return *p.Table +func (p *FrontendServiceGetDbNamesResult) InitDefault() { } -var TTableRef_AliasName_DEFAULT string +var FrontendServiceGetDbNamesResult_Success_DEFAULT *TGetDbsResult_ -func (p *TTableRef) GetAliasName() (v string) { - if !p.IsSetAliasName() { - return TTableRef_AliasName_DEFAULT +func (p *FrontendServiceGetDbNamesResult) GetSuccess() (v *TGetDbsResult_) { + if !p.IsSetSuccess() { + return FrontendServiceGetDbNamesResult_Success_DEFAULT } - return *p.AliasName -} -func (p *TTableRef) SetTable(val *string) { - p.Table = val -} -func (p *TTableRef) SetAliasName(val *string) { - p.AliasName = val + return p.Success } - -var fieldIDToName_TTableRef = map[int16]string{ - 1: "table", - 3: "alias_name", +func (p *FrontendServiceGetDbNamesResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetDbsResult_) } -func (p *TTableRef) IsSetTable() bool { - return p.Table != nil +var fieldIDToName_FrontendServiceGetDbNamesResult = map[int16]string{ + 0: "success", } -func (p *TTableRef) IsSetAliasName() bool { - return p.AliasName != nil +func (p *FrontendServiceGetDbNamesResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *TTableRef) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetDbNamesResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -49293,32 +80543,19 @@ func (p *TTableRef) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -49333,7 +80570,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableRef[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -49343,39 +80580,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableRef) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Table = &v - } - return nil -} - -func (p *TTableRef) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *FrontendServiceGetDbNamesResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetDbsResult_() + if err := _field.Read(iprot); err != nil { return err - } else { - p.AliasName = &v } + p.Success = _field return nil } -func (p *TTableRef) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetDbNamesResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTableRef"); err != nil { + if err = oprot.WriteStructBegin("getDbNames_result"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 + if err = p.writeField0(oprot); err != nil { + fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -49394,31 +80617,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTableRef) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTable() { - if err = oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Table); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TTableRef) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetAliasName() { - if err = oprot.WriteFieldBegin("alias_name", thrift.STRING, 3); err != nil { +func (p *FrontendServiceGetDbNamesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.AliasName); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -49427,289 +80631,71 @@ func (p *TTableRef) writeField3(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TTableRef) String() string { +func (p *FrontendServiceGetDbNamesResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TTableRef(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetDbNamesResult(%+v)", *p) + } -func (p *TTableRef) DeepEqual(ano *TTableRef) bool { +func (p *FrontendServiceGetDbNamesResult) DeepEqual(ano *FrontendServiceGetDbNamesResult) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Table) { - return false - } - if !p.Field3DeepEqual(ano.AliasName) { + if !p.Field0DeepEqual(ano.Success) { return false } return true } -func (p *TTableRef) Field1DeepEqual(src *string) bool { - - if p.Table == src { - return true - } else if p.Table == nil || src == nil { - return false - } - if strings.Compare(*p.Table, *src) != 0 { - return false - } - return true -} -func (p *TTableRef) Field3DeepEqual(src *string) bool { +func (p *FrontendServiceGetDbNamesResult) Field0DeepEqual(src *TGetDbsResult_) bool { - if p.AliasName == src { - return true - } else if p.AliasName == nil || src == nil { - return false - } - if strings.Compare(*p.AliasName, *src) != 0 { + if !p.Success.DeepEqual(src) { return false } return true } -type TRestoreSnapshotRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` - Token *string `thrift:"token,6,optional" frugal:"6,optional,string" json:"token,omitempty"` - LabelName *string `thrift:"label_name,7,optional" frugal:"7,optional,string" json:"label_name,omitempty"` - RepoName *string `thrift:"repo_name,8,optional" frugal:"8,optional,string" json:"repo_name,omitempty"` - TableRefs []*TTableRef `thrift:"table_refs,9,optional" frugal:"9,optional,list" json:"table_refs,omitempty"` - Properties map[string]string `thrift:"properties,10,optional" frugal:"10,optional,map" json:"properties,omitempty"` - Meta []byte `thrift:"meta,11,optional" frugal:"11,optional,binary" json:"meta,omitempty"` - JobInfo []byte `thrift:"job_info,12,optional" frugal:"12,optional,binary" json:"job_info,omitempty"` -} - -func NewTRestoreSnapshotRequest() *TRestoreSnapshotRequest { - return &TRestoreSnapshotRequest{} -} - -func (p *TRestoreSnapshotRequest) InitDefault() { - *p = TRestoreSnapshotRequest{} -} - -var TRestoreSnapshotRequest_Cluster_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TRestoreSnapshotRequest_Cluster_DEFAULT - } - return *p.Cluster -} - -var TRestoreSnapshotRequest_User_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TRestoreSnapshotRequest_User_DEFAULT - } - return *p.User -} - -var TRestoreSnapshotRequest_Passwd_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetPasswd() (v string) { - if !p.IsSetPasswd() { - return TRestoreSnapshotRequest_Passwd_DEFAULT - } - return *p.Passwd -} - -var TRestoreSnapshotRequest_Db_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetDb() (v string) { - if !p.IsSetDb() { - return TRestoreSnapshotRequest_Db_DEFAULT - } - return *p.Db -} - -var TRestoreSnapshotRequest_Table_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetTable() (v string) { - if !p.IsSetTable() { - return TRestoreSnapshotRequest_Table_DEFAULT - } - return *p.Table -} - -var TRestoreSnapshotRequest_Token_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetToken() (v string) { - if !p.IsSetToken() { - return TRestoreSnapshotRequest_Token_DEFAULT - } - return *p.Token -} - -var TRestoreSnapshotRequest_LabelName_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetLabelName() (v string) { - if !p.IsSetLabelName() { - return TRestoreSnapshotRequest_LabelName_DEFAULT - } - return *p.LabelName -} - -var TRestoreSnapshotRequest_RepoName_DEFAULT string - -func (p *TRestoreSnapshotRequest) GetRepoName() (v string) { - if !p.IsSetRepoName() { - return TRestoreSnapshotRequest_RepoName_DEFAULT - } - return *p.RepoName -} - -var TRestoreSnapshotRequest_TableRefs_DEFAULT []*TTableRef - -func (p *TRestoreSnapshotRequest) GetTableRefs() (v []*TTableRef) { - if !p.IsSetTableRefs() { - return TRestoreSnapshotRequest_TableRefs_DEFAULT - } - return p.TableRefs +type FrontendServiceGetTableNamesArgs struct { + Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` } -var TRestoreSnapshotRequest_Properties_DEFAULT map[string]string - -func (p *TRestoreSnapshotRequest) GetProperties() (v map[string]string) { - if !p.IsSetProperties() { - return TRestoreSnapshotRequest_Properties_DEFAULT - } - return p.Properties +func NewFrontendServiceGetTableNamesArgs() *FrontendServiceGetTableNamesArgs { + return &FrontendServiceGetTableNamesArgs{} } -var TRestoreSnapshotRequest_Meta_DEFAULT []byte - -func (p *TRestoreSnapshotRequest) GetMeta() (v []byte) { - if !p.IsSetMeta() { - return TRestoreSnapshotRequest_Meta_DEFAULT - } - return p.Meta +func (p *FrontendServiceGetTableNamesArgs) InitDefault() { } -var TRestoreSnapshotRequest_JobInfo_DEFAULT []byte +var FrontendServiceGetTableNamesArgs_Params_DEFAULT *TGetTablesParams -func (p *TRestoreSnapshotRequest) GetJobInfo() (v []byte) { - if !p.IsSetJobInfo() { - return TRestoreSnapshotRequest_JobInfo_DEFAULT +func (p *FrontendServiceGetTableNamesArgs) GetParams() (v *TGetTablesParams) { + if !p.IsSetParams() { + return FrontendServiceGetTableNamesArgs_Params_DEFAULT } - return p.JobInfo -} -func (p *TRestoreSnapshotRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TRestoreSnapshotRequest) SetUser(val *string) { - p.User = val -} -func (p *TRestoreSnapshotRequest) SetPasswd(val *string) { - p.Passwd = val -} -func (p *TRestoreSnapshotRequest) SetDb(val *string) { - p.Db = val -} -func (p *TRestoreSnapshotRequest) SetTable(val *string) { - p.Table = val -} -func (p *TRestoreSnapshotRequest) SetToken(val *string) { - p.Token = val -} -func (p *TRestoreSnapshotRequest) SetLabelName(val *string) { - p.LabelName = val -} -func (p *TRestoreSnapshotRequest) SetRepoName(val *string) { - p.RepoName = val -} -func (p *TRestoreSnapshotRequest) SetTableRefs(val []*TTableRef) { - p.TableRefs = val -} -func (p *TRestoreSnapshotRequest) SetProperties(val map[string]string) { - p.Properties = val -} -func (p *TRestoreSnapshotRequest) SetMeta(val []byte) { - p.Meta = val -} -func (p *TRestoreSnapshotRequest) SetJobInfo(val []byte) { - p.JobInfo = val -} - -var fieldIDToName_TRestoreSnapshotRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "passwd", - 4: "db", - 5: "table", - 6: "token", - 7: "label_name", - 8: "repo_name", - 9: "table_refs", - 10: "properties", - 11: "meta", - 12: "job_info", -} - -func (p *TRestoreSnapshotRequest) IsSetCluster() bool { - return p.Cluster != nil -} - -func (p *TRestoreSnapshotRequest) IsSetUser() bool { - return p.User != nil -} - -func (p *TRestoreSnapshotRequest) IsSetPasswd() bool { - return p.Passwd != nil -} - -func (p *TRestoreSnapshotRequest) IsSetDb() bool { - return p.Db != nil -} - -func (p *TRestoreSnapshotRequest) IsSetTable() bool { - return p.Table != nil -} - -func (p *TRestoreSnapshotRequest) IsSetToken() bool { - return p.Token != nil -} - -func (p *TRestoreSnapshotRequest) IsSetLabelName() bool { - return p.LabelName != nil -} - -func (p *TRestoreSnapshotRequest) IsSetRepoName() bool { - return p.RepoName != nil -} - -func (p *TRestoreSnapshotRequest) IsSetTableRefs() bool { - return p.TableRefs != nil + return p.Params } - -func (p *TRestoreSnapshotRequest) IsSetProperties() bool { - return p.Properties != nil +func (p *FrontendServiceGetTableNamesArgs) SetParams(val *TGetTablesParams) { + p.Params = val } -func (p *TRestoreSnapshotRequest) IsSetMeta() bool { - return p.Meta != nil +var fieldIDToName_FrontendServiceGetTableNamesArgs = map[int16]string{ + 1: "params", } -func (p *TRestoreSnapshotRequest) IsSetJobInfo() bool { - return p.JobInfo != nil +func (p *FrontendServiceGetTableNamesArgs) IsSetParams() bool { + return p.Params != nil } -func (p *TRestoreSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTableNamesArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -49729,131 +80715,18 @@ func (p *TRestoreSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.LIST { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.MAP { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -49868,7 +80741,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -49878,200 +80751,194 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *FrontendServiceGetTableNamesArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTablesParams() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Cluster = &v + } + p.Params = _field + return nil +} + +func (p *FrontendServiceGetTableNamesArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("getTableNames_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *FrontendServiceGetTableNamesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { return err - } else { - p.User = &v + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRestoreSnapshotRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Passwd = &v +func (p *FrontendServiceGetTableNamesArgs) String() string { + if p == nil { + return "" } - return nil + return fmt.Sprintf("FrontendServiceGetTableNamesArgs(%+v)", *p) + } -func (p *TRestoreSnapshotRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Db = &v +func (p *FrontendServiceGetTableNamesArgs) DeepEqual(ano *FrontendServiceGetTableNamesArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil + if !p.Field1DeepEqual(ano.Params) { + return false + } + return true } -func (p *TRestoreSnapshotRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Table = &v +func (p *FrontendServiceGetTableNamesArgs) Field1DeepEqual(src *TGetTablesParams) bool { + + if !p.Params.DeepEqual(src) { + return false } - return nil + return true } -func (p *TRestoreSnapshotRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v - } - return nil +type FrontendServiceGetTableNamesResult struct { + Success *TGetTablesResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetTablesResult_" json:"success,omitempty"` } -func (p *TRestoreSnapshotRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.LabelName = &v - } - return nil +func NewFrontendServiceGetTableNamesResult() *FrontendServiceGetTableNamesResult { + return &FrontendServiceGetTableNamesResult{} } -func (p *TRestoreSnapshotRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.RepoName = &v - } - return nil +func (p *FrontendServiceGetTableNamesResult) InitDefault() { } -func (p *TRestoreSnapshotRequest) ReadField9(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.TableRefs = make([]*TTableRef, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTableRef() - if err := _elem.Read(iprot); err != nil { - return err - } +var FrontendServiceGetTableNamesResult_Success_DEFAULT *TGetTablesResult_ - p.TableRefs = append(p.TableRefs, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err +func (p *FrontendServiceGetTableNamesResult) GetSuccess() (v *TGetTablesResult_) { + if !p.IsSetSuccess() { + return FrontendServiceGetTableNamesResult_Success_DEFAULT } - return nil + return p.Success +} +func (p *FrontendServiceGetTableNamesResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetTablesResult_) } -func (p *TRestoreSnapshotRequest) ReadField10(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err +var fieldIDToName_FrontendServiceGetTableNamesResult = map[int16]string{ + 0: "success", +} + +func (p *FrontendServiceGetTableNamesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *FrontendServiceGetTableNamesResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - p.Properties = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - var _val string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _val = v + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break } - p.Properties[_key] = _val + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err := iprot.ReadMapEnd(); err != nil { - return err + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return nil -} -func (p *TRestoreSnapshotRequest) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return err - } else { - p.Meta = []byte(v) - } return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { +func (p *FrontendServiceGetTableNamesResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetTablesResult_() + if err := _field.Read(iprot); err != nil { return err - } else { - p.JobInfo = []byte(v) } + p.Success = _field return nil } -func (p *TRestoreSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTableNamesResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRestoreSnapshotRequest"); err != nil { + if err = oprot.WriteStructBegin("getTableNames_result"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 + if err = p.writeField0(oprot); err != nil { + fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -50090,12 +80957,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { +func (p *FrontendServiceGetTableNamesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Cluster); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -50104,463 +80971,411 @@ func (p *TRestoreSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TRestoreSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceGetTableNamesResult) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return fmt.Sprintf("FrontendServiceGetTableNamesResult(%+v)", *p) + } -func (p *TRestoreSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPasswd() { - if err = oprot.WriteFieldBegin("passwd", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Passwd); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceGetTableNamesResult) DeepEqual(ano *FrontendServiceGetTableNamesResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true } -func (p *TRestoreSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Db); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceGetTableNamesResult) Field0DeepEqual(src *TGetTablesResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return true } -func (p *TRestoreSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTable() { - if err = oprot.WriteFieldBegin("table", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Table); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +type FrontendServiceDescribeTableArgs struct { + Params *TDescribeTableParams `thrift:"params,1" frugal:"1,default,TDescribeTableParams" json:"params"` } -func (p *TRestoreSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +func NewFrontendServiceDescribeTableArgs() *FrontendServiceDescribeTableArgs { + return &FrontendServiceDescribeTableArgs{} } -func (p *TRestoreSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetLabelName() { - if err = oprot.WriteFieldBegin("label_name", thrift.STRING, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.LabelName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +func (p *FrontendServiceDescribeTableArgs) InitDefault() { } -func (p *TRestoreSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetRepoName() { - if err = oprot.WriteFieldBegin("repo_name", thrift.STRING, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.RepoName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var FrontendServiceDescribeTableArgs_Params_DEFAULT *TDescribeTableParams + +func (p *FrontendServiceDescribeTableArgs) GetParams() (v *TDescribeTableParams) { + if !p.IsSetParams() { + return FrontendServiceDescribeTableArgs_Params_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return p.Params +} +func (p *FrontendServiceDescribeTableArgs) SetParams(val *TDescribeTableParams) { + p.Params = val } -func (p *TRestoreSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetTableRefs() { - if err = oprot.WriteFieldBegin("table_refs", thrift.LIST, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TableRefs)); err != nil { - return err - } - for _, v := range p.TableRefs { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +var fieldIDToName_FrontendServiceDescribeTableArgs = map[int16]string{ + 1: "params", } -func (p *TRestoreSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetProperties() { - if err = oprot.WriteFieldBegin("properties", thrift.MAP, 10); err != nil { - goto WriteFieldBeginError +func (p *FrontendServiceDescribeTableArgs) IsSetParams() bool { + return p.Params != nil +} + +func (p *FrontendServiceDescribeTableArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { - return err + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - - if err := oprot.WriteString(v); err != nil { - return err + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetMeta() { - if err = oprot.WriteFieldBegin("meta", thrift.STRING, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBinary([]byte(p.Meta)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *FrontendServiceDescribeTableArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTDescribeTableParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Params = _field + return nil +} + +func (p *FrontendServiceDescribeTableArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("describeTable_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetJobInfo() { - if err = oprot.WriteFieldBegin("job_info", thrift.STRING, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBinary([]byte(p.JobInfo)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceDescribeTableArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRestoreSnapshotRequest) String() string { +func (p *FrontendServiceDescribeTableArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TRestoreSnapshotRequest(%+v)", *p) + return fmt.Sprintf("FrontendServiceDescribeTableArgs(%+v)", *p) + } -func (p *TRestoreSnapshotRequest) DeepEqual(ano *TRestoreSnapshotRequest) bool { +func (p *FrontendServiceDescribeTableArgs) DeepEqual(ano *FrontendServiceDescribeTableArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Passwd) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false - } - if !p.Field5DeepEqual(ano.Table) { - return false - } - if !p.Field6DeepEqual(ano.Token) { - return false - } - if !p.Field7DeepEqual(ano.LabelName) { - return false - } - if !p.Field8DeepEqual(ano.RepoName) { - return false - } - if !p.Field9DeepEqual(ano.TableRefs) { - return false - } - if !p.Field10DeepEqual(ano.Properties) { - return false - } - if !p.Field11DeepEqual(ano.Meta) { - return false - } - if !p.Field12DeepEqual(ano.JobInfo) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *TRestoreSnapshotRequest) Field1DeepEqual(src *string) bool { +func (p *FrontendServiceDescribeTableArgs) Field1DeepEqual(src *TDescribeTableParams) bool { - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { + if !p.Params.DeepEqual(src) { return false } return true } -func (p *TRestoreSnapshotRequest) Field2DeepEqual(src *string) bool { - if p.User == src { - return true - } else if p.User == nil || src == nil { - return false - } - if strings.Compare(*p.User, *src) != 0 { - return false - } - return true +type FrontendServiceDescribeTableResult struct { + Success *TDescribeTableResult_ `thrift:"success,0,optional" frugal:"0,optional,TDescribeTableResult_" json:"success,omitempty"` } -func (p *TRestoreSnapshotRequest) Field3DeepEqual(src *string) bool { - if p.Passwd == src { - return true - } else if p.Passwd == nil || src == nil { - return false - } - if strings.Compare(*p.Passwd, *src) != 0 { - return false - } - return true +func NewFrontendServiceDescribeTableResult() *FrontendServiceDescribeTableResult { + return &FrontendServiceDescribeTableResult{} } -func (p *TRestoreSnapshotRequest) Field4DeepEqual(src *string) bool { - if p.Db == src { - return true - } else if p.Db == nil || src == nil { - return false - } - if strings.Compare(*p.Db, *src) != 0 { - return false - } - return true +func (p *FrontendServiceDescribeTableResult) InitDefault() { } -func (p *TRestoreSnapshotRequest) Field5DeepEqual(src *string) bool { - if p.Table == src { - return true - } else if p.Table == nil || src == nil { - return false - } - if strings.Compare(*p.Table, *src) != 0 { - return false +var FrontendServiceDescribeTableResult_Success_DEFAULT *TDescribeTableResult_ + +func (p *FrontendServiceDescribeTableResult) GetSuccess() (v *TDescribeTableResult_) { + if !p.IsSetSuccess() { + return FrontendServiceDescribeTableResult_Success_DEFAULT } - return true + return p.Success +} +func (p *FrontendServiceDescribeTableResult) SetSuccess(x interface{}) { + p.Success = x.(*TDescribeTableResult_) } -func (p *TRestoreSnapshotRequest) Field6DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { - return false - } - return true +var fieldIDToName_FrontendServiceDescribeTableResult = map[int16]string{ + 0: "success", } -func (p *TRestoreSnapshotRequest) Field7DeepEqual(src *string) bool { - if p.LabelName == src { - return true - } else if p.LabelName == nil || src == nil { - return false +func (p *FrontendServiceDescribeTableResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *FrontendServiceDescribeTableResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if strings.Compare(*p.LabelName, *src) != 0 { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - return true + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) Field8DeepEqual(src *string) bool { - if p.RepoName == src { - return true - } else if p.RepoName == nil || src == nil { - return false - } - if strings.Compare(*p.RepoName, *src) != 0 { - return false +func (p *FrontendServiceDescribeTableResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTDescribeTableResult_() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.Success = _field + return nil } -func (p *TRestoreSnapshotRequest) Field9DeepEqual(src []*TTableRef) bool { - if len(p.TableRefs) != len(src) { - return false +func (p *FrontendServiceDescribeTableResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("describeTable_result"); err != nil { + goto WriteStructBeginError } - for i, v := range p.TableRefs { - _src := src[i] - if !v.DeepEqual(_src) { - return false + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError } } - return true + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) Field10DeepEqual(src map[string]string) bool { - if len(p.Properties) != len(src) { - return false - } - for k, v := range p.Properties { - _src := src[k] - if strings.Compare(v, _src) != 0 { - return false +func (p *FrontendServiceDescribeTableResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TRestoreSnapshotRequest) Field11DeepEqual(src []byte) bool { - if bytes.Compare(p.Meta, src) != 0 { +func (p *FrontendServiceDescribeTableResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FrontendServiceDescribeTableResult(%+v)", *p) + +} + +func (p *FrontendServiceDescribeTableResult) DeepEqual(ano *FrontendServiceDescribeTableResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { return false } return true } -func (p *TRestoreSnapshotRequest) Field12DeepEqual(src []byte) bool { - if bytes.Compare(p.JobInfo, src) != 0 { +func (p *FrontendServiceDescribeTableResult) Field0DeepEqual(src *TDescribeTableResult_) bool { + + if !p.Success.DeepEqual(src) { return false } return true } -type TRestoreSnapshotResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +type FrontendServiceDescribeTablesArgs struct { + Params *TDescribeTablesParams `thrift:"params,1" frugal:"1,default,TDescribeTablesParams" json:"params"` } -func NewTRestoreSnapshotResult_() *TRestoreSnapshotResult_ { - return &TRestoreSnapshotResult_{} +func NewFrontendServiceDescribeTablesArgs() *FrontendServiceDescribeTablesArgs { + return &FrontendServiceDescribeTablesArgs{} } -func (p *TRestoreSnapshotResult_) InitDefault() { - *p = TRestoreSnapshotResult_{} +func (p *FrontendServiceDescribeTablesArgs) InitDefault() { } -var TRestoreSnapshotResult__Status_DEFAULT *status.TStatus +var FrontendServiceDescribeTablesArgs_Params_DEFAULT *TDescribeTablesParams -func (p *TRestoreSnapshotResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TRestoreSnapshotResult__Status_DEFAULT +func (p *FrontendServiceDescribeTablesArgs) GetParams() (v *TDescribeTablesParams) { + if !p.IsSetParams() { + return FrontendServiceDescribeTablesArgs_Params_DEFAULT } - return p.Status + return p.Params } -func (p *TRestoreSnapshotResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *FrontendServiceDescribeTablesArgs) SetParams(val *TDescribeTablesParams) { + p.Params = val } -var fieldIDToName_TRestoreSnapshotResult_ = map[int16]string{ - 1: "status", +var fieldIDToName_FrontendServiceDescribeTablesArgs = map[int16]string{ + 1: "params", } -func (p *TRestoreSnapshotResult_) IsSetStatus() bool { - return p.Status != nil +func (p *FrontendServiceDescribeTablesArgs) IsSetParams() bool { + return p.Params != nil } -func (p *TRestoreSnapshotResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDescribeTablesArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -50584,17 +81399,14 @@ func (p *TRestoreSnapshotResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -50609,7 +81421,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -50619,17 +81431,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *FrontendServiceDescribeTablesArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTDescribeTablesParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } -func (p *TRestoreSnapshotResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDescribeTablesArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRestoreSnapshotResult"); err != nil { + if err = oprot.WriteStructBegin("describeTables_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -50637,7 +81450,6 @@ func (p *TRestoreSnapshotResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -50656,17 +81468,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRestoreSnapshotResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceDescribeTablesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -50675,102 +81485,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRestoreSnapshotResult_) String() string { +func (p *FrontendServiceDescribeTablesArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TRestoreSnapshotResult_(%+v)", *p) + return fmt.Sprintf("FrontendServiceDescribeTablesArgs(%+v)", *p) + } -func (p *TRestoreSnapshotResult_) DeepEqual(ano *TRestoreSnapshotResult_) bool { +func (p *FrontendServiceDescribeTablesArgs) DeepEqual(ano *FrontendServiceDescribeTablesArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *TRestoreSnapshotResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *FrontendServiceDescribeTablesArgs) Field1DeepEqual(src *TDescribeTablesParams) bool { - if !p.Status.DeepEqual(src) { + if !p.Params.DeepEqual(src) { return false } return true } -type TGetMasterTokenRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Password *string `thrift:"password,3,optional" frugal:"3,optional,string" json:"password,omitempty"` -} - -func NewTGetMasterTokenRequest() *TGetMasterTokenRequest { - return &TGetMasterTokenRequest{} -} - -func (p *TGetMasterTokenRequest) InitDefault() { - *p = TGetMasterTokenRequest{} +type FrontendServiceDescribeTablesResult struct { + Success *TDescribeTablesResult_ `thrift:"success,0,optional" frugal:"0,optional,TDescribeTablesResult_" json:"success,omitempty"` } -var TGetMasterTokenRequest_Cluster_DEFAULT string - -func (p *TGetMasterTokenRequest) GetCluster() (v string) { - if !p.IsSetCluster() { - return TGetMasterTokenRequest_Cluster_DEFAULT - } - return *p.Cluster +func NewFrontendServiceDescribeTablesResult() *FrontendServiceDescribeTablesResult { + return &FrontendServiceDescribeTablesResult{} } -var TGetMasterTokenRequest_User_DEFAULT string - -func (p *TGetMasterTokenRequest) GetUser() (v string) { - if !p.IsSetUser() { - return TGetMasterTokenRequest_User_DEFAULT - } - return *p.User +func (p *FrontendServiceDescribeTablesResult) InitDefault() { } -var TGetMasterTokenRequest_Password_DEFAULT string +var FrontendServiceDescribeTablesResult_Success_DEFAULT *TDescribeTablesResult_ -func (p *TGetMasterTokenRequest) GetPassword() (v string) { - if !p.IsSetPassword() { - return TGetMasterTokenRequest_Password_DEFAULT +func (p *FrontendServiceDescribeTablesResult) GetSuccess() (v *TDescribeTablesResult_) { + if !p.IsSetSuccess() { + return FrontendServiceDescribeTablesResult_Success_DEFAULT } - return *p.Password -} -func (p *TGetMasterTokenRequest) SetCluster(val *string) { - p.Cluster = val -} -func (p *TGetMasterTokenRequest) SetUser(val *string) { - p.User = val -} -func (p *TGetMasterTokenRequest) SetPassword(val *string) { - p.Password = val -} - -var fieldIDToName_TGetMasterTokenRequest = map[int16]string{ - 1: "cluster", - 2: "user", - 3: "password", + return p.Success } - -func (p *TGetMasterTokenRequest) IsSetCluster() bool { - return p.Cluster != nil +func (p *FrontendServiceDescribeTablesResult) SetSuccess(x interface{}) { + p.Success = x.(*TDescribeTablesResult_) } -func (p *TGetMasterTokenRequest) IsSetUser() bool { - return p.User != nil +var fieldIDToName_FrontendServiceDescribeTablesResult = map[int16]string{ + 0: "success", } -func (p *TGetMasterTokenRequest) IsSetPassword() bool { - return p.Password != nil +func (p *FrontendServiceDescribeTablesResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *TGetMasterTokenRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDescribeTablesResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -50789,42 +81563,19 @@ func (p *TGetMasterTokenRequest) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -50839,62 +81590,35 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TGetMasterTokenRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Cluster = &v - } - return nil -} - -func (p *TGetMasterTokenRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.User = &v - } - return nil +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetMasterTokenRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *FrontendServiceDescribeTablesResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTDescribeTablesResult_() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Password = &v } + p.Success = _field return nil } -func (p *TGetMasterTokenRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDescribeTablesResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TGetMasterTokenRequest"); err != nil { + if err = oprot.WriteStructBegin("describeTables_result"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 + if err = p.writeField0(oprot); err != nil { + fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -50913,50 +81637,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TGetMasterTokenRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetCluster() { - if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Cluster); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TGetMasterTokenRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetUser() { - if err = oprot.WriteFieldBegin("user", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.User); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TGetMasterTokenRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetPassword() { - if err = oprot.WriteFieldBegin("password", thrift.STRING, 3); err != nil { +func (p *FrontendServiceDescribeTablesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Password); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -50965,124 +81651,71 @@ func (p *TGetMasterTokenRequest) writeField3(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TGetMasterTokenRequest) String() string { +func (p *FrontendServiceDescribeTablesResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TGetMasterTokenRequest(%+v)", *p) + return fmt.Sprintf("FrontendServiceDescribeTablesResult(%+v)", *p) + } -func (p *TGetMasterTokenRequest) DeepEqual(ano *TGetMasterTokenRequest) bool { +func (p *FrontendServiceDescribeTablesResult) DeepEqual(ano *FrontendServiceDescribeTablesResult) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Cluster) { - return false - } - if !p.Field2DeepEqual(ano.User) { - return false - } - if !p.Field3DeepEqual(ano.Password) { - return false - } - return true -} - -func (p *TGetMasterTokenRequest) Field1DeepEqual(src *string) bool { - - if p.Cluster == src { - return true - } else if p.Cluster == nil || src == nil { - return false - } - if strings.Compare(*p.Cluster, *src) != 0 { + if !p.Field0DeepEqual(ano.Success) { return false } return true } -func (p *TGetMasterTokenRequest) Field2DeepEqual(src *string) bool { - if p.User == src { - return true - } else if p.User == nil || src == nil { - return false - } - if strings.Compare(*p.User, *src) != 0 { - return false - } - return true -} -func (p *TGetMasterTokenRequest) Field3DeepEqual(src *string) bool { +func (p *FrontendServiceDescribeTablesResult) Field0DeepEqual(src *TDescribeTablesResult_) bool { - if p.Password == src { - return true - } else if p.Password == nil || src == nil { - return false - } - if strings.Compare(*p.Password, *src) != 0 { + if !p.Success.DeepEqual(src) { return false } return true } -type TGetMasterTokenResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - Token *string `thrift:"token,2,optional" frugal:"2,optional,string" json:"token,omitempty"` -} - -func NewTGetMasterTokenResult_() *TGetMasterTokenResult_ { - return &TGetMasterTokenResult_{} +type FrontendServiceShowVariablesArgs struct { + Params *TShowVariableRequest `thrift:"params,1" frugal:"1,default,TShowVariableRequest" json:"params"` } -func (p *TGetMasterTokenResult_) InitDefault() { - *p = TGetMasterTokenResult_{} +func NewFrontendServiceShowVariablesArgs() *FrontendServiceShowVariablesArgs { + return &FrontendServiceShowVariablesArgs{} } -var TGetMasterTokenResult__Status_DEFAULT *status.TStatus - -func (p *TGetMasterTokenResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TGetMasterTokenResult__Status_DEFAULT - } - return p.Status +func (p *FrontendServiceShowVariablesArgs) InitDefault() { } -var TGetMasterTokenResult__Token_DEFAULT string +var FrontendServiceShowVariablesArgs_Params_DEFAULT *TShowVariableRequest -func (p *TGetMasterTokenResult_) GetToken() (v string) { - if !p.IsSetToken() { - return TGetMasterTokenResult__Token_DEFAULT +func (p *FrontendServiceShowVariablesArgs) GetParams() (v *TShowVariableRequest) { + if !p.IsSetParams() { + return FrontendServiceShowVariablesArgs_Params_DEFAULT } - return *p.Token -} -func (p *TGetMasterTokenResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TGetMasterTokenResult_) SetToken(val *string) { - p.Token = val + return p.Params } - -var fieldIDToName_TGetMasterTokenResult_ = map[int16]string{ - 1: "status", - 2: "token", +func (p *FrontendServiceShowVariablesArgs) SetParams(val *TShowVariableRequest) { + p.Params = val } -func (p *TGetMasterTokenResult_) IsSetStatus() bool { - return p.Status != nil +var fieldIDToName_FrontendServiceShowVariablesArgs = map[int16]string{ + 1: "params", } -func (p *TGetMasterTokenResult_) IsSetToken() bool { - return p.Token != nil +func (p *FrontendServiceShowVariablesArgs) IsSetParams() bool { + return p.Params != nil } -func (p *TGetMasterTokenResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowVariablesArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -51106,27 +81739,14 @@ func (p *TGetMasterTokenResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -51141,7 +81761,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -51151,26 +81771,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetMasterTokenResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TGetMasterTokenResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *FrontendServiceShowVariablesArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTShowVariableRequest() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Token = &v } + p.Params = _field return nil } -func (p *TGetMasterTokenResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowVariablesArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TGetMasterTokenResult"); err != nil { + if err = oprot.WriteStructBegin("showVariables_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -51178,11 +81790,6 @@ func (p *TGetMasterTokenResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -51201,17 +81808,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TGetMasterTokenResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceShowVariablesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -51220,118 +81825,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TGetMasterTokenResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Token); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TGetMasterTokenResult_) String() string { +func (p *FrontendServiceShowVariablesArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TGetMasterTokenResult_(%+v)", *p) + return fmt.Sprintf("FrontendServiceShowVariablesArgs(%+v)", *p) + } -func (p *TGetMasterTokenResult_) DeepEqual(ano *TGetMasterTokenResult_) bool { +func (p *FrontendServiceShowVariablesArgs) DeepEqual(ano *FrontendServiceShowVariablesArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Token) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *TGetMasterTokenResult_) Field1DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { - return false - } - return true -} -func (p *TGetMasterTokenResult_) Field2DeepEqual(src *string) bool { +func (p *FrontendServiceShowVariablesArgs) Field1DeepEqual(src *TShowVariableRequest) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false - } - if strings.Compare(*p.Token, *src) != 0 { + if !p.Params.DeepEqual(src) { return false } return true } -type TGetBinlogLagResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - Lag *int64 `thrift:"lag,2,optional" frugal:"2,optional,i64" json:"lag,omitempty"` -} - -func NewTGetBinlogLagResult_() *TGetBinlogLagResult_ { - return &TGetBinlogLagResult_{} +type FrontendServiceShowVariablesResult struct { + Success *TShowVariableResult_ `thrift:"success,0,optional" frugal:"0,optional,TShowVariableResult_" json:"success,omitempty"` } -func (p *TGetBinlogLagResult_) InitDefault() { - *p = TGetBinlogLagResult_{} +func NewFrontendServiceShowVariablesResult() *FrontendServiceShowVariablesResult { + return &FrontendServiceShowVariablesResult{} } -var TGetBinlogLagResult__Status_DEFAULT *status.TStatus - -func (p *TGetBinlogLagResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TGetBinlogLagResult__Status_DEFAULT - } - return p.Status +func (p *FrontendServiceShowVariablesResult) InitDefault() { } -var TGetBinlogLagResult__Lag_DEFAULT int64 +var FrontendServiceShowVariablesResult_Success_DEFAULT *TShowVariableResult_ -func (p *TGetBinlogLagResult_) GetLag() (v int64) { - if !p.IsSetLag() { - return TGetBinlogLagResult__Lag_DEFAULT +func (p *FrontendServiceShowVariablesResult) GetSuccess() (v *TShowVariableResult_) { + if !p.IsSetSuccess() { + return FrontendServiceShowVariablesResult_Success_DEFAULT } - return *p.Lag -} -func (p *TGetBinlogLagResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TGetBinlogLagResult_) SetLag(val *int64) { - p.Lag = val + return p.Success } - -var fieldIDToName_TGetBinlogLagResult_ = map[int16]string{ - 1: "status", - 2: "lag", +func (p *FrontendServiceShowVariablesResult) SetSuccess(x interface{}) { + p.Success = x.(*TShowVariableResult_) } -func (p *TGetBinlogLagResult_) IsSetStatus() bool { - return p.Status != nil +var fieldIDToName_FrontendServiceShowVariablesResult = map[int16]string{ + 0: "success", } -func (p *TGetBinlogLagResult_) IsSetLag() bool { - return p.Lag != nil +func (p *FrontendServiceShowVariablesResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *TGetBinlogLagResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowVariablesResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -51350,32 +81903,19 @@ func (p *TGetBinlogLagResult_) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { + if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -51390,7 +81930,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogLagResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -51400,38 +81940,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogLagResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TGetBinlogLagResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *FrontendServiceShowVariablesResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTShowVariableResult_() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Lag = &v } + p.Success = _field return nil } -func (p *TGetBinlogLagResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowVariablesResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TGetBinlogLagResult"); err != nil { + if err = oprot.WriteStructBegin("showVariables_result"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 + if err = p.writeField0(oprot); err != nil { + fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -51450,31 +81977,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TGetBinlogLagResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TGetBinlogLagResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetLag() { - if err = oprot.WriteFieldBegin("lag", thrift.I64, 2); err != nil { +func (p *FrontendServiceShowVariablesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.Lag); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -51483,95 +81991,71 @@ func (p *TGetBinlogLagResult_) writeField2(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TGetBinlogLagResult_) String() string { +func (p *FrontendServiceShowVariablesResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TGetBinlogLagResult_(%+v)", *p) + return fmt.Sprintf("FrontendServiceShowVariablesResult(%+v)", *p) + } -func (p *TGetBinlogLagResult_) DeepEqual(ano *TGetBinlogLagResult_) bool { +func (p *FrontendServiceShowVariablesResult) DeepEqual(ano *FrontendServiceShowVariablesResult) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Lag) { + if !p.Field0DeepEqual(ano.Success) { return false } return true } -func (p *TGetBinlogLagResult_) Field1DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { - return false - } - return true -} -func (p *TGetBinlogLagResult_) Field2DeepEqual(src *int64) bool { +func (p *FrontendServiceShowVariablesResult) Field0DeepEqual(src *TShowVariableResult_) bool { - if p.Lag == src { - return true - } else if p.Lag == nil || src == nil { - return false - } - if *p.Lag != *src { + if !p.Success.DeepEqual(src) { return false } return true } -type TUpdateFollowerStatsCacheRequest struct { - Key *string `thrift:"key,1,optional" frugal:"1,optional,string" json:"key,omitempty"` - StatsRows []string `thrift:"statsRows,2" frugal:"2,default,list" json:"statsRows"` +type FrontendServiceReportExecStatusArgs struct { + Params *TReportExecStatusParams `thrift:"params,1" frugal:"1,default,TReportExecStatusParams" json:"params"` } -func NewTUpdateFollowerStatsCacheRequest() *TUpdateFollowerStatsCacheRequest { - return &TUpdateFollowerStatsCacheRequest{} +func NewFrontendServiceReportExecStatusArgs() *FrontendServiceReportExecStatusArgs { + return &FrontendServiceReportExecStatusArgs{} } -func (p *TUpdateFollowerStatsCacheRequest) InitDefault() { - *p = TUpdateFollowerStatsCacheRequest{} +func (p *FrontendServiceReportExecStatusArgs) InitDefault() { } -var TUpdateFollowerStatsCacheRequest_Key_DEFAULT string +var FrontendServiceReportExecStatusArgs_Params_DEFAULT *TReportExecStatusParams -func (p *TUpdateFollowerStatsCacheRequest) GetKey() (v string) { - if !p.IsSetKey() { - return TUpdateFollowerStatsCacheRequest_Key_DEFAULT +func (p *FrontendServiceReportExecStatusArgs) GetParams() (v *TReportExecStatusParams) { + if !p.IsSetParams() { + return FrontendServiceReportExecStatusArgs_Params_DEFAULT } - return *p.Key -} - -func (p *TUpdateFollowerStatsCacheRequest) GetStatsRows() (v []string) { - return p.StatsRows -} -func (p *TUpdateFollowerStatsCacheRequest) SetKey(val *string) { - p.Key = val + return p.Params } -func (p *TUpdateFollowerStatsCacheRequest) SetStatsRows(val []string) { - p.StatsRows = val +func (p *FrontendServiceReportExecStatusArgs) SetParams(val *TReportExecStatusParams) { + p.Params = val } -var fieldIDToName_TUpdateFollowerStatsCacheRequest = map[int16]string{ - 1: "key", - 2: "statsRows", +var fieldIDToName_FrontendServiceReportExecStatusArgs = map[int16]string{ + 1: "params", } -func (p *TUpdateFollowerStatsCacheRequest) IsSetKey() bool { - return p.Key != nil +func (p *FrontendServiceReportExecStatusArgs) IsSetParams() bool { + return p.Params != nil } -func (p *TUpdateFollowerStatsCacheRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportExecStatusArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -51591,31 +82075,18 @@ func (p *TUpdateFollowerStatsCacheRequest) Read(iprot thrift.TProtocol) (err err switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -51630,50 +82101,28 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateFollowerStatsCacheRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TUpdateFollowerStatsCacheRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Key = &v - } - return nil -} - -func (p *TUpdateFollowerStatsCacheRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.StatsRows = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} - p.StatsRows = append(p.StatsRows, _elem) - } - if err := iprot.ReadListEnd(); err != nil { +func (p *FrontendServiceReportExecStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTReportExecStatusParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } -func (p *TUpdateFollowerStatsCacheRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportExecStatusArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TUpdateFollowerStatsCacheRequest"); err != nil { + if err = oprot.WriteStructBegin("reportExecStatus_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -51681,11 +82130,6 @@ func (p *TUpdateFollowerStatsCacheRequest) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -51704,38 +82148,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TUpdateFollowerStatsCacheRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetKey() { - if err = oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Key); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TUpdateFollowerStatsCacheRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("statsRows", thrift.LIST, 2); err != nil { +func (p *FrontendServiceReportExecStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.StatsRows)); err != nil { - return err - } - for _, v := range p.StatsRows { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.Params.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -51743,164 +82160,71 @@ func (p *TUpdateFollowerStatsCacheRequest) writeField2(oprot thrift.TProtocol) ( } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TUpdateFollowerStatsCacheRequest) String() string { +func (p *FrontendServiceReportExecStatusArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TUpdateFollowerStatsCacheRequest(%+v)", *p) + return fmt.Sprintf("FrontendServiceReportExecStatusArgs(%+v)", *p) + } -func (p *TUpdateFollowerStatsCacheRequest) DeepEqual(ano *TUpdateFollowerStatsCacheRequest) bool { +func (p *FrontendServiceReportExecStatusArgs) DeepEqual(ano *FrontendServiceReportExecStatusArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Key) { - return false - } - if !p.Field2DeepEqual(ano.StatsRows) { + if !p.Field1DeepEqual(ano.Params) { return false } return true } -func (p *TUpdateFollowerStatsCacheRequest) Field1DeepEqual(src *string) bool { - - if p.Key == src { - return true - } else if p.Key == nil || src == nil { - return false - } - if strings.Compare(*p.Key, *src) != 0 { - return false - } - return true -} -func (p *TUpdateFollowerStatsCacheRequest) Field2DeepEqual(src []string) bool { +func (p *FrontendServiceReportExecStatusArgs) Field1DeepEqual(src *TReportExecStatusParams) bool { - if len(p.StatsRows) != len(src) { + if !p.Params.DeepEqual(src) { return false } - for i, v := range p.StatsRows { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } return true } -type TAutoIncrementRangeRequest struct { - DbId *int64 `thrift:"db_id,1,optional" frugal:"1,optional,i64" json:"db_id,omitempty"` - TableId *int64 `thrift:"table_id,2,optional" frugal:"2,optional,i64" json:"table_id,omitempty"` - ColumnId *int64 `thrift:"column_id,3,optional" frugal:"3,optional,i64" json:"column_id,omitempty"` - Length *int64 `thrift:"length,4,optional" frugal:"4,optional,i64" json:"length,omitempty"` - LowerBound *int64 `thrift:"lower_bound,5,optional" frugal:"5,optional,i64" json:"lower_bound,omitempty"` -} - -func NewTAutoIncrementRangeRequest() *TAutoIncrementRangeRequest { - return &TAutoIncrementRangeRequest{} -} - -func (p *TAutoIncrementRangeRequest) InitDefault() { - *p = TAutoIncrementRangeRequest{} -} - -var TAutoIncrementRangeRequest_DbId_DEFAULT int64 - -func (p *TAutoIncrementRangeRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TAutoIncrementRangeRequest_DbId_DEFAULT - } - return *p.DbId -} - -var TAutoIncrementRangeRequest_TableId_DEFAULT int64 - -func (p *TAutoIncrementRangeRequest) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TAutoIncrementRangeRequest_TableId_DEFAULT - } - return *p.TableId +type FrontendServiceReportExecStatusResult struct { + Success *TReportExecStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,TReportExecStatusResult_" json:"success,omitempty"` } -var TAutoIncrementRangeRequest_ColumnId_DEFAULT int64 - -func (p *TAutoIncrementRangeRequest) GetColumnId() (v int64) { - if !p.IsSetColumnId() { - return TAutoIncrementRangeRequest_ColumnId_DEFAULT - } - return *p.ColumnId +func NewFrontendServiceReportExecStatusResult() *FrontendServiceReportExecStatusResult { + return &FrontendServiceReportExecStatusResult{} } -var TAutoIncrementRangeRequest_Length_DEFAULT int64 - -func (p *TAutoIncrementRangeRequest) GetLength() (v int64) { - if !p.IsSetLength() { - return TAutoIncrementRangeRequest_Length_DEFAULT - } - return *p.Length +func (p *FrontendServiceReportExecStatusResult) InitDefault() { } -var TAutoIncrementRangeRequest_LowerBound_DEFAULT int64 +var FrontendServiceReportExecStatusResult_Success_DEFAULT *TReportExecStatusResult_ -func (p *TAutoIncrementRangeRequest) GetLowerBound() (v int64) { - if !p.IsSetLowerBound() { - return TAutoIncrementRangeRequest_LowerBound_DEFAULT +func (p *FrontendServiceReportExecStatusResult) GetSuccess() (v *TReportExecStatusResult_) { + if !p.IsSetSuccess() { + return FrontendServiceReportExecStatusResult_Success_DEFAULT } - return *p.LowerBound -} -func (p *TAutoIncrementRangeRequest) SetDbId(val *int64) { - p.DbId = val -} -func (p *TAutoIncrementRangeRequest) SetTableId(val *int64) { - p.TableId = val -} -func (p *TAutoIncrementRangeRequest) SetColumnId(val *int64) { - p.ColumnId = val -} -func (p *TAutoIncrementRangeRequest) SetLength(val *int64) { - p.Length = val -} -func (p *TAutoIncrementRangeRequest) SetLowerBound(val *int64) { - p.LowerBound = val -} - -var fieldIDToName_TAutoIncrementRangeRequest = map[int16]string{ - 1: "db_id", - 2: "table_id", - 3: "column_id", - 4: "length", - 5: "lower_bound", -} - -func (p *TAutoIncrementRangeRequest) IsSetDbId() bool { - return p.DbId != nil -} - -func (p *TAutoIncrementRangeRequest) IsSetTableId() bool { - return p.TableId != nil + return p.Success } - -func (p *TAutoIncrementRangeRequest) IsSetColumnId() bool { - return p.ColumnId != nil +func (p *FrontendServiceReportExecStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TReportExecStatusResult_) } -func (p *TAutoIncrementRangeRequest) IsSetLength() bool { - return p.Length != nil +var fieldIDToName_FrontendServiceReportExecStatusResult = map[int16]string{ + 0: "success", } -func (p *TAutoIncrementRangeRequest) IsSetLowerBound() bool { - return p.LowerBound != nil +func (p *FrontendServiceReportExecStatusResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *TAutoIncrementRangeRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportExecStatusResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -51919,62 +82243,19 @@ func (p *TAutoIncrementRangeRequest) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err = p.ReadField5(iprot); err != nil { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -51989,7 +82270,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -51999,78 +82280,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TAutoIncrementRangeRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DbId = &v - } - return nil -} - -func (p *TAutoIncrementRangeRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TableId = &v - } - return nil -} - -func (p *TAutoIncrementRangeRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ColumnId = &v - } - return nil -} - -func (p *TAutoIncrementRangeRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Length = &v - } - return nil -} - -func (p *TAutoIncrementRangeRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *FrontendServiceReportExecStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTReportExecStatusResult_() + if err := _field.Read(iprot); err != nil { return err - } else { - p.LowerBound = &v } + p.Success = _field return nil } -func (p *TAutoIncrementRangeRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportExecStatusResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TAutoIncrementRangeRequest"); err != nil { + if err = oprot.WriteStructBegin("reportExecStatus_result"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 + if err = p.writeField0(oprot); err != nil { + fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -52089,88 +82317,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TAutoIncrementRangeRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TAutoIncrementRangeRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("table_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TableId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TAutoIncrementRangeRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnId() { - if err = oprot.WriteFieldBegin("column_id", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.ColumnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TAutoIncrementRangeRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetLength() { - if err = oprot.WriteFieldBegin("length", thrift.I64, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Length); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TAutoIncrementRangeRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetLowerBound() { - if err = oprot.WriteFieldBegin("lower_bound", thrift.I64, 5); err != nil { +func (p *FrontendServiceReportExecStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.LowerBound); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -52179,172 +82331,71 @@ func (p *TAutoIncrementRangeRequest) writeField5(oprot thrift.TProtocol) (err er } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TAutoIncrementRangeRequest) String() string { +func (p *FrontendServiceReportExecStatusResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TAutoIncrementRangeRequest(%+v)", *p) + return fmt.Sprintf("FrontendServiceReportExecStatusResult(%+v)", *p) + } -func (p *TAutoIncrementRangeRequest) DeepEqual(ano *TAutoIncrementRangeRequest) bool { +func (p *FrontendServiceReportExecStatusResult) DeepEqual(ano *FrontendServiceReportExecStatusResult) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.DbId) { - return false - } - if !p.Field2DeepEqual(ano.TableId) { - return false - } - if !p.Field3DeepEqual(ano.ColumnId) { - return false - } - if !p.Field4DeepEqual(ano.Length) { - return false - } - if !p.Field5DeepEqual(ano.LowerBound) { - return false - } - return true -} - -func (p *TAutoIncrementRangeRequest) Field1DeepEqual(src *int64) bool { - - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false - } - if *p.DbId != *src { - return false - } - return true -} -func (p *TAutoIncrementRangeRequest) Field2DeepEqual(src *int64) bool { - - if p.TableId == src { - return true - } else if p.TableId == nil || src == nil { - return false - } - if *p.TableId != *src { - return false - } - return true -} -func (p *TAutoIncrementRangeRequest) Field3DeepEqual(src *int64) bool { - - if p.ColumnId == src { - return true - } else if p.ColumnId == nil || src == nil { - return false - } - if *p.ColumnId != *src { + if !p.Field0DeepEqual(ano.Success) { return false } return true } -func (p *TAutoIncrementRangeRequest) Field4DeepEqual(src *int64) bool { - if p.Length == src { - return true - } else if p.Length == nil || src == nil { - return false - } - if *p.Length != *src { - return false - } - return true -} -func (p *TAutoIncrementRangeRequest) Field5DeepEqual(src *int64) bool { +func (p *FrontendServiceReportExecStatusResult) Field0DeepEqual(src *TReportExecStatusResult_) bool { - if p.LowerBound == src { - return true - } else if p.LowerBound == nil || src == nil { - return false - } - if *p.LowerBound != *src { + if !p.Success.DeepEqual(src) { return false } return true } -type TAutoIncrementRangeResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - Start *int64 `thrift:"start,2,optional" frugal:"2,optional,i64" json:"start,omitempty"` - Length *int64 `thrift:"length,3,optional" frugal:"3,optional,i64" json:"length,omitempty"` -} - -func NewTAutoIncrementRangeResult_() *TAutoIncrementRangeResult_ { - return &TAutoIncrementRangeResult_{} -} - -func (p *TAutoIncrementRangeResult_) InitDefault() { - *p = TAutoIncrementRangeResult_{} -} - -var TAutoIncrementRangeResult__Status_DEFAULT *status.TStatus - -func (p *TAutoIncrementRangeResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TAutoIncrementRangeResult__Status_DEFAULT - } - return p.Status -} - -var TAutoIncrementRangeResult__Start_DEFAULT int64 - -func (p *TAutoIncrementRangeResult_) GetStart() (v int64) { - if !p.IsSetStart() { - return TAutoIncrementRangeResult__Start_DEFAULT - } - return *p.Start -} - -var TAutoIncrementRangeResult__Length_DEFAULT int64 - -func (p *TAutoIncrementRangeResult_) GetLength() (v int64) { - if !p.IsSetLength() { - return TAutoIncrementRangeResult__Length_DEFAULT - } - return *p.Length -} -func (p *TAutoIncrementRangeResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TAutoIncrementRangeResult_) SetStart(val *int64) { - p.Start = val -} -func (p *TAutoIncrementRangeResult_) SetLength(val *int64) { - p.Length = val +type FrontendServiceFinishTaskArgs struct { + Request *masterservice.TFinishTaskRequest `thrift:"request,1" frugal:"1,default,masterservice.TFinishTaskRequest" json:"request"` } -var fieldIDToName_TAutoIncrementRangeResult_ = map[int16]string{ - 1: "status", - 2: "start", - 3: "length", +func NewFrontendServiceFinishTaskArgs() *FrontendServiceFinishTaskArgs { + return &FrontendServiceFinishTaskArgs{} } -func (p *TAutoIncrementRangeResult_) IsSetStatus() bool { - return p.Status != nil +func (p *FrontendServiceFinishTaskArgs) InitDefault() { } -func (p *TAutoIncrementRangeResult_) IsSetStart() bool { - return p.Start != nil +var FrontendServiceFinishTaskArgs_Request_DEFAULT *masterservice.TFinishTaskRequest + +func (p *FrontendServiceFinishTaskArgs) GetRequest() (v *masterservice.TFinishTaskRequest) { + if !p.IsSetRequest() { + return FrontendServiceFinishTaskArgs_Request_DEFAULT + } + return p.Request +} +func (p *FrontendServiceFinishTaskArgs) SetRequest(val *masterservice.TFinishTaskRequest) { + p.Request = val } -func (p *TAutoIncrementRangeResult_) IsSetLength() bool { - return p.Length != nil +var fieldIDToName_FrontendServiceFinishTaskArgs = map[int16]string{ + 1: "request", } -func (p *TAutoIncrementRangeResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFinishTaskArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *FrontendServiceFinishTaskArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -52368,37 +82419,14 @@ func (p *TAutoIncrementRangeResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -52413,7 +82441,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -52423,35 +82451,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TAutoIncrementRangeResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TAutoIncrementRangeResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Start = &v - } - return nil -} - -func (p *TAutoIncrementRangeResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *FrontendServiceFinishTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _field := masterservice.NewTFinishTaskRequest() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Length = &v } + p.Request = _field return nil } -func (p *TAutoIncrementRangeResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFinishTaskArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TAutoIncrementRangeResult"); err != nil { + if err = oprot.WriteStructBegin("finishTask_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -52459,15 +82470,6 @@ func (p *TAutoIncrementRangeResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -52486,207 +82488,83 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TAutoIncrementRangeResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceFinishTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TAutoIncrementRangeResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetStart() { - if err = oprot.WriteFieldBegin("start", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Start); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err := p.Request.Write(oprot); err != nil { + return err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TAutoIncrementRangeResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetLength() { - if err = oprot.WriteFieldBegin("length", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.Length); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TAutoIncrementRangeResult_) String() string { +func (p *FrontendServiceFinishTaskArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TAutoIncrementRangeResult_(%+v)", *p) + return fmt.Sprintf("FrontendServiceFinishTaskArgs(%+v)", *p) + } -func (p *TAutoIncrementRangeResult_) DeepEqual(ano *TAutoIncrementRangeResult_) bool { +func (p *FrontendServiceFinishTaskArgs) DeepEqual(ano *FrontendServiceFinishTaskArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Start) { - return false - } - if !p.Field3DeepEqual(ano.Length) { - return false - } - return true -} - -func (p *TAutoIncrementRangeResult_) Field1DeepEqual(src *status.TStatus) bool { - - if !p.Status.DeepEqual(src) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *TAutoIncrementRangeResult_) Field2DeepEqual(src *int64) bool { - if p.Start == src { - return true - } else if p.Start == nil || src == nil { - return false - } - if *p.Start != *src { - return false - } - return true -} -func (p *TAutoIncrementRangeResult_) Field3DeepEqual(src *int64) bool { +func (p *FrontendServiceFinishTaskArgs) Field1DeepEqual(src *masterservice.TFinishTaskRequest) bool { - if p.Length == src { - return true - } else if p.Length == nil || src == nil { - return false - } - if *p.Length != *src { + if !p.Request.DeepEqual(src) { return false } return true } -type TCreatePartitionRequest struct { - TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"` - DbId *int64 `thrift:"db_id,2,optional" frugal:"2,optional,i64" json:"db_id,omitempty"` - TableId *int64 `thrift:"table_id,3,optional" frugal:"3,optional,i64" json:"table_id,omitempty"` - PartitionValues [][]*exprs.TStringLiteral `thrift:"partitionValues,4,optional" frugal:"4,optional,list>" json:"partitionValues,omitempty"` -} - -func NewTCreatePartitionRequest() *TCreatePartitionRequest { - return &TCreatePartitionRequest{} -} - -func (p *TCreatePartitionRequest) InitDefault() { - *p = TCreatePartitionRequest{} -} - -var TCreatePartitionRequest_TxnId_DEFAULT int64 - -func (p *TCreatePartitionRequest) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TCreatePartitionRequest_TxnId_DEFAULT - } - return *p.TxnId +type FrontendServiceFinishTaskResult struct { + Success *masterservice.TMasterResult_ `thrift:"success,0,optional" frugal:"0,optional,masterservice.TMasterResult_" json:"success,omitempty"` } -var TCreatePartitionRequest_DbId_DEFAULT int64 - -func (p *TCreatePartitionRequest) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TCreatePartitionRequest_DbId_DEFAULT - } - return *p.DbId +func NewFrontendServiceFinishTaskResult() *FrontendServiceFinishTaskResult { + return &FrontendServiceFinishTaskResult{} } -var TCreatePartitionRequest_TableId_DEFAULT int64 - -func (p *TCreatePartitionRequest) GetTableId() (v int64) { - if !p.IsSetTableId() { - return TCreatePartitionRequest_TableId_DEFAULT - } - return *p.TableId +func (p *FrontendServiceFinishTaskResult) InitDefault() { } -var TCreatePartitionRequest_PartitionValues_DEFAULT [][]*exprs.TStringLiteral +var FrontendServiceFinishTaskResult_Success_DEFAULT *masterservice.TMasterResult_ -func (p *TCreatePartitionRequest) GetPartitionValues() (v [][]*exprs.TStringLiteral) { - if !p.IsSetPartitionValues() { - return TCreatePartitionRequest_PartitionValues_DEFAULT +func (p *FrontendServiceFinishTaskResult) GetSuccess() (v *masterservice.TMasterResult_) { + if !p.IsSetSuccess() { + return FrontendServiceFinishTaskResult_Success_DEFAULT } - return p.PartitionValues -} -func (p *TCreatePartitionRequest) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TCreatePartitionRequest) SetDbId(val *int64) { - p.DbId = val -} -func (p *TCreatePartitionRequest) SetTableId(val *int64) { - p.TableId = val -} -func (p *TCreatePartitionRequest) SetPartitionValues(val [][]*exprs.TStringLiteral) { - p.PartitionValues = val -} - -var fieldIDToName_TCreatePartitionRequest = map[int16]string{ - 1: "txn_id", - 2: "db_id", - 3: "table_id", - 4: "partitionValues", -} - -func (p *TCreatePartitionRequest) IsSetTxnId() bool { - return p.TxnId != nil + return p.Success } - -func (p *TCreatePartitionRequest) IsSetDbId() bool { - return p.DbId != nil +func (p *FrontendServiceFinishTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*masterservice.TMasterResult_) } -func (p *TCreatePartitionRequest) IsSetTableId() bool { - return p.TableId != nil +var fieldIDToName_FrontendServiceFinishTaskResult = map[int16]string{ + 0: "success", } -func (p *TCreatePartitionRequest) IsSetPartitionValues() bool { - return p.PartitionValues != nil +func (p *FrontendServiceFinishTaskResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *TCreatePartitionRequest) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFinishTaskResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -52705,52 +82583,19 @@ func (p *TCreatePartitionRequest) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err = p.ReadField4(iprot); err != nil { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -52765,7 +82610,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionRequest[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -52775,88 +82620,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCreatePartitionRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = &v - } - return nil -} - -func (p *TCreatePartitionRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DbId = &v - } - return nil -} - -func (p *TCreatePartitionRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TableId = &v - } - return nil -} - -func (p *TCreatePartitionRequest) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.PartitionValues = make([][]*exprs.TStringLiteral, 0, size) - for i := 0; i < size; i++ { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - _elem := make([]*exprs.TStringLiteral, 0, size) - for i := 0; i < size; i++ { - _elem1 := exprs.NewTStringLiteral() - if err := _elem1.Read(iprot); err != nil { - return err - } - - _elem = append(_elem, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - - p.PartitionValues = append(p.PartitionValues, _elem) - } - if err := iprot.ReadListEnd(); err != nil { +func (p *FrontendServiceFinishTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := masterservice.NewTMasterResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *TCreatePartitionRequest) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFinishTaskResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCreatePartitionRequest"); err != nil { + if err = oprot.WriteStructBegin("finishTask_result"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 + if err = p.writeField0(oprot); err != nil { + fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -52875,12 +82657,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCreatePartitionRequest) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil { +func (p *FrontendServiceFinishTaskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TxnId); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -52889,255 +82671,240 @@ func (p *TCreatePartitionRequest) writeField1(oprot thrift.TProtocol) (err error } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TCreatePartitionRequest) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceFinishTaskResult) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return fmt.Sprintf("FrontendServiceFinishTaskResult(%+v)", *p) + } -func (p *TCreatePartitionRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTableId() { - if err = oprot.WriteFieldBegin("table_id", thrift.I64, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TableId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceFinishTaskResult) DeepEqual(ano *FrontendServiceFinishTaskResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true } -func (p *TCreatePartitionRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetPartitionValues() { - if err = oprot.WriteFieldBegin("partitionValues", thrift.LIST, 4); err != nil { - goto WriteFieldBeginError +func (p *FrontendServiceFinishTaskResult) Field0DeepEqual(src *masterservice.TMasterResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type FrontendServiceReportArgs struct { + Request *masterservice.TReportRequest `thrift:"request,1" frugal:"1,default,masterservice.TReportRequest" json:"request"` +} + +func NewFrontendServiceReportArgs() *FrontendServiceReportArgs { + return &FrontendServiceReportArgs{} +} + +func (p *FrontendServiceReportArgs) InitDefault() { +} + +var FrontendServiceReportArgs_Request_DEFAULT *masterservice.TReportRequest + +func (p *FrontendServiceReportArgs) GetRequest() (v *masterservice.TReportRequest) { + if !p.IsSetRequest() { + return FrontendServiceReportArgs_Request_DEFAULT + } + return p.Request +} +func (p *FrontendServiceReportArgs) SetRequest(val *masterservice.TReportRequest) { + p.Request = val +} + +var fieldIDToName_FrontendServiceReportArgs = map[int16]string{ + 1: "request", +} + +func (p *FrontendServiceReportArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *FrontendServiceReportArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteListBegin(thrift.LIST, len(p.PartitionValues)); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - for _, v := range p.PartitionValues { - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return err - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return err + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - if err := oprot.WriteListEnd(); err != nil { - return err + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCreatePartitionRequest) String() string { - if p == nil { - return "" +func (p *FrontendServiceReportArgs) ReadField1(iprot thrift.TProtocol) error { + _field := masterservice.NewTReportRequest() + if err := _field.Read(iprot); err != nil { + return err } - return fmt.Sprintf("TCreatePartitionRequest(%+v)", *p) + p.Request = _field + return nil } -func (p *TCreatePartitionRequest) DeepEqual(ano *TCreatePartitionRequest) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.TxnId) { - return false +func (p *FrontendServiceReportArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("report_args"); err != nil { + goto WriteStructBeginError } - if !p.Field2DeepEqual(ano.DbId) { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if !p.Field3DeepEqual(ano.TableId) { - return false + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if !p.Field4DeepEqual(ano.PartitionValues) { - return false + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCreatePartitionRequest) Field1DeepEqual(src *int64) bool { - - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false +func (p *FrontendServiceReportArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - if *p.TxnId != *src { - return false + if err := p.Request.Write(oprot); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCreatePartitionRequest) Field2DeepEqual(src *int64) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false - } - if *p.DbId != *src { - return false +func (p *FrontendServiceReportArgs) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("FrontendServiceReportArgs(%+v)", *p) + } -func (p *TCreatePartitionRequest) Field3DeepEqual(src *int64) bool { - if p.TableId == src { +func (p *FrontendServiceReportArgs) DeepEqual(ano *FrontendServiceReportArgs) bool { + if p == ano { return true - } else if p.TableId == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.TableId != *src { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *TCreatePartitionRequest) Field4DeepEqual(src [][]*exprs.TStringLiteral) bool { - if len(p.PartitionValues) != len(src) { +func (p *FrontendServiceReportArgs) Field1DeepEqual(src *masterservice.TReportRequest) bool { + + if !p.Request.DeepEqual(src) { return false } - for i, v := range p.PartitionValues { - _src := src[i] - if len(v) != len(_src) { - return false - } - for i, v := range v { - _src1 := _src[i] - if !v.DeepEqual(_src1) { - return false - } - } - } return true } -type TCreatePartitionResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - Partitions []*descriptors.TOlapTablePartition `thrift:"partitions,2,optional" frugal:"2,optional,list" json:"partitions,omitempty"` - Tablets []*descriptors.TTabletLocation `thrift:"tablets,3,optional" frugal:"3,optional,list" json:"tablets,omitempty"` - Nodes []*descriptors.TNodeInfo `thrift:"nodes,4,optional" frugal:"4,optional,list" json:"nodes,omitempty"` -} - -func NewTCreatePartitionResult_() *TCreatePartitionResult_ { - return &TCreatePartitionResult_{} -} - -func (p *TCreatePartitionResult_) InitDefault() { - *p = TCreatePartitionResult_{} -} - -var TCreatePartitionResult__Status_DEFAULT *status.TStatus - -func (p *TCreatePartitionResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TCreatePartitionResult__Status_DEFAULT - } - return p.Status +type FrontendServiceReportResult struct { + Success *masterservice.TMasterResult_ `thrift:"success,0,optional" frugal:"0,optional,masterservice.TMasterResult_" json:"success,omitempty"` } -var TCreatePartitionResult__Partitions_DEFAULT []*descriptors.TOlapTablePartition - -func (p *TCreatePartitionResult_) GetPartitions() (v []*descriptors.TOlapTablePartition) { - if !p.IsSetPartitions() { - return TCreatePartitionResult__Partitions_DEFAULT - } - return p.Partitions +func NewFrontendServiceReportResult() *FrontendServiceReportResult { + return &FrontendServiceReportResult{} } -var TCreatePartitionResult__Tablets_DEFAULT []*descriptors.TTabletLocation - -func (p *TCreatePartitionResult_) GetTablets() (v []*descriptors.TTabletLocation) { - if !p.IsSetTablets() { - return TCreatePartitionResult__Tablets_DEFAULT - } - return p.Tablets +func (p *FrontendServiceReportResult) InitDefault() { } -var TCreatePartitionResult__Nodes_DEFAULT []*descriptors.TNodeInfo +var FrontendServiceReportResult_Success_DEFAULT *masterservice.TMasterResult_ -func (p *TCreatePartitionResult_) GetNodes() (v []*descriptors.TNodeInfo) { - if !p.IsSetNodes() { - return TCreatePartitionResult__Nodes_DEFAULT +func (p *FrontendServiceReportResult) GetSuccess() (v *masterservice.TMasterResult_) { + if !p.IsSetSuccess() { + return FrontendServiceReportResult_Success_DEFAULT } - return p.Nodes -} -func (p *TCreatePartitionResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TCreatePartitionResult_) SetPartitions(val []*descriptors.TOlapTablePartition) { - p.Partitions = val -} -func (p *TCreatePartitionResult_) SetTablets(val []*descriptors.TTabletLocation) { - p.Tablets = val -} -func (p *TCreatePartitionResult_) SetNodes(val []*descriptors.TNodeInfo) { - p.Nodes = val -} - -var fieldIDToName_TCreatePartitionResult_ = map[int16]string{ - 1: "status", - 2: "partitions", - 3: "tablets", - 4: "nodes", -} - -func (p *TCreatePartitionResult_) IsSetStatus() bool { - return p.Status != nil + return p.Success } - -func (p *TCreatePartitionResult_) IsSetPartitions() bool { - return p.Partitions != nil +func (p *FrontendServiceReportResult) SetSuccess(x interface{}) { + p.Success = x.(*masterservice.TMasterResult_) } -func (p *TCreatePartitionResult_) IsSetTablets() bool { - return p.Tablets != nil +var fieldIDToName_FrontendServiceReportResult = map[int16]string{ + 0: "success", } -func (p *TCreatePartitionResult_) IsSetNodes() bool { - return p.Nodes != nil +func (p *FrontendServiceReportResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *TCreatePartitionResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -53156,52 +82923,19 @@ func (p *TCreatePartitionResult_) Read(iprot thrift.TProtocol) (err error) { } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err = p.ReadField4(iprot); err != nil { + if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -53216,7 +82950,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -53226,97 +82960,149 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCreatePartitionResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *FrontendServiceReportResult) ReadField0(iprot thrift.TProtocol) error { + _field := masterservice.NewTMasterResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *TCreatePartitionResult_) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +func (p *FrontendServiceReportResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("report_result"); err != nil { + goto WriteStructBeginError } - p.Partitions = make([]*descriptors.TOlapTablePartition, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTOlapTablePartition() - if err := _elem.Read(iprot); err != nil { - return err + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError } - - p.Partitions = append(p.Partitions, _elem) } - if err := iprot.ReadListEnd(); err != nil { - return err + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCreatePartitionResult_) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.Tablets = make([]*descriptors.TTabletLocation, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTTabletLocation() - if err := _elem.Read(iprot); err != nil { +func (p *FrontendServiceReportResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { return err } - - p.Tablets = append(p.Tablets, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TCreatePartitionResult_) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +func (p *FrontendServiceReportResult) String() string { + if p == nil { + return "" } - p.Nodes = make([]*descriptors.TNodeInfo, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTNodeInfo() - if err := _elem.Read(iprot); err != nil { - return err - } + return fmt.Sprintf("FrontendServiceReportResult(%+v)", *p) + +} - p.Nodes = append(p.Nodes, _elem) +func (p *FrontendServiceReportResult) DeepEqual(ano *FrontendServiceReportResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err := iprot.ReadListEnd(); err != nil { - return err + if !p.Field0DeepEqual(ano.Success) { + return false } - return nil + return true } -func (p *TCreatePartitionResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportResult) Field0DeepEqual(src *masterservice.TMasterResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type FrontendServiceFetchResourceArgs struct { +} + +func NewFrontendServiceFetchResourceArgs() *FrontendServiceFetchResourceArgs { + return &FrontendServiceFetchResourceArgs{} +} + +func (p *FrontendServiceFetchResourceArgs) InitDefault() { +} + +var fieldIDToName_FrontendServiceFetchResourceArgs = map[int16]string{} + +func (p *FrontendServiceFetchResourceArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType var fieldId int16 - if err = oprot.WriteStructBegin("TCreatePartitionResult"); err != nil { - goto WriteStructBeginError + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError + if fieldTypeId == thrift.STOP { + break } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} +func (p *FrontendServiceFetchResourceArgs) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("fetchResource_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -53327,101 +83113,159 @@ func (p *TCreatePartitionResult_) Write(oprot thrift.TProtocol) (err error) { return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCreatePartitionResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError +func (p *FrontendServiceFetchResourceArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FrontendServiceFetchResourceArgs(%+v)", *p) + +} + +func (p *FrontendServiceFetchResourceArgs) DeepEqual(ano *FrontendServiceFetchResourceArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + return true +} + +type FrontendServiceFetchResourceResult struct { + Success *masterservice.TFetchResourceResult_ `thrift:"success,0,optional" frugal:"0,optional,masterservice.TFetchResourceResult_" json:"success,omitempty"` +} + +func NewFrontendServiceFetchResourceResult() *FrontendServiceFetchResourceResult { + return &FrontendServiceFetchResourceResult{} +} + +func (p *FrontendServiceFetchResourceResult) InitDefault() { +} + +var FrontendServiceFetchResourceResult_Success_DEFAULT *masterservice.TFetchResourceResult_ + +func (p *FrontendServiceFetchResourceResult) GetSuccess() (v *masterservice.TFetchResourceResult_) { + if !p.IsSetSuccess() { + return FrontendServiceFetchResourceResult_Success_DEFAULT + } + return p.Success +} +func (p *FrontendServiceFetchResourceResult) SetSuccess(x interface{}) { + p.Success = x.(*masterservice.TFetchResourceResult_) +} + +var fieldIDToName_FrontendServiceFetchResourceResult = map[int16]string{ + 0: "success", +} + +func (p *FrontendServiceFetchResourceResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *FrontendServiceFetchResourceResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := p.Status.Write(oprot); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchResourceResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCreatePartitionResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetPartitions() { - if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { - return err - } - for _, v := range p.Partitions { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *FrontendServiceFetchResourceResult) ReadField0(iprot thrift.TProtocol) error { + _field := masterservice.NewTFetchResourceResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TCreatePartitionResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTablets() { - if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { - return err - } - for _, v := range p.Tablets { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *FrontendServiceFetchResourceResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("fetchResource_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCreatePartitionResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetNodes() { - if err = oprot.WriteFieldBegin("nodes", thrift.LIST, 4); err != nil { +func (p *FrontendServiceFetchResourceResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Nodes)); err != nil { - return err - } - for _, v := range p.Nodes { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.Success.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -53430,2886 +83274,2451 @@ func (p *TCreatePartitionResult_) writeField4(oprot thrift.TProtocol) (err error } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TCreatePartitionResult_) String() string { +func (p *FrontendServiceFetchResourceResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TCreatePartitionResult_(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchResourceResult(%+v)", *p) + } -func (p *TCreatePartitionResult_) DeepEqual(ano *TCreatePartitionResult_) bool { +func (p *FrontendServiceFetchResourceResult) DeepEqual(ano *FrontendServiceFetchResourceResult) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.Partitions) { - return false - } - if !p.Field3DeepEqual(ano.Tablets) { - return false - } - if !p.Field4DeepEqual(ano.Nodes) { + if !p.Field0DeepEqual(ano.Success) { return false } return true } -func (p *TCreatePartitionResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *FrontendServiceFetchResourceResult) Field0DeepEqual(src *masterservice.TFetchResourceResult_) bool { - if !p.Status.DeepEqual(src) { + if !p.Success.DeepEqual(src) { return false } return true } -func (p *TCreatePartitionResult_) Field2DeepEqual(src []*descriptors.TOlapTablePartition) bool { - if len(p.Partitions) != len(src) { - return false - } - for i, v := range p.Partitions { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +type FrontendServiceForwardArgs struct { + Params *TMasterOpRequest `thrift:"params,1" frugal:"1,default,TMasterOpRequest" json:"params"` } -func (p *TCreatePartitionResult_) Field3DeepEqual(src []*descriptors.TTabletLocation) bool { - if len(p.Tablets) != len(src) { - return false - } - for i, v := range p.Tablets { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +func NewFrontendServiceForwardArgs() *FrontendServiceForwardArgs { + return &FrontendServiceForwardArgs{} } -func (p *TCreatePartitionResult_) Field4DeepEqual(src []*descriptors.TNodeInfo) bool { - if len(p.Nodes) != len(src) { - return false - } - for i, v := range p.Nodes { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +func (p *FrontendServiceForwardArgs) InitDefault() { } -type FrontendService interface { - GetDbNames(ctx context.Context, params *TGetDbsParams) (r *TGetDbsResult_, err error) - - GetTableNames(ctx context.Context, params *TGetTablesParams) (r *TGetTablesResult_, err error) - - DescribeTable(ctx context.Context, params *TDescribeTableParams) (r *TDescribeTableResult_, err error) - - DescribeTables(ctx context.Context, params *TDescribeTablesParams) (r *TDescribeTablesResult_, err error) - - ShowVariables(ctx context.Context, params *TShowVariableRequest) (r *TShowVariableResult_, err error) - - ReportExecStatus(ctx context.Context, params *TReportExecStatusParams) (r *TReportExecStatusResult_, err error) - - FinishTask(ctx context.Context, request *masterservice.TFinishTaskRequest) (r *masterservice.TMasterResult_, err error) - - Report(ctx context.Context, request *masterservice.TReportRequest) (r *masterservice.TMasterResult_, err error) - - FetchResource(ctx context.Context) (r *masterservice.TFetchResourceResult_, err error) - - Forward(ctx context.Context, params *TMasterOpRequest) (r *TMasterOpResult_, err error) - - ListTableStatus(ctx context.Context, params *TGetTablesParams) (r *TListTableStatusResult_, err error) - - ListTableMetadataNameIds(ctx context.Context, params *TGetTablesParams) (r *TListTableMetadataNameIdsResult_, err error) - - ListTablePrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) - - ListSchemaPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) - - ListUserPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) - - UpdateExportTaskStatus(ctx context.Context, request *TUpdateExportTaskStatusRequest) (r *TFeResult_, err error) - - LoadTxnBegin(ctx context.Context, request *TLoadTxnBeginRequest) (r *TLoadTxnBeginResult_, err error) - - LoadTxnPreCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) - - LoadTxn2PC(ctx context.Context, request *TLoadTxn2PCRequest) (r *TLoadTxn2PCResult_, err error) - - LoadTxnCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) - - LoadTxnRollback(ctx context.Context, request *TLoadTxnRollbackRequest) (r *TLoadTxnRollbackResult_, err error) - - BeginTxn(ctx context.Context, request *TBeginTxnRequest) (r *TBeginTxnResult_, err error) - - CommitTxn(ctx context.Context, request *TCommitTxnRequest) (r *TCommitTxnResult_, err error) - - RollbackTxn(ctx context.Context, request *TRollbackTxnRequest) (r *TRollbackTxnResult_, err error) - - GetBinlog(ctx context.Context, request *TGetBinlogRequest) (r *TGetBinlogResult_, err error) - - GetSnapshot(ctx context.Context, request *TGetSnapshotRequest) (r *TGetSnapshotResult_, err error) - - RestoreSnapshot(ctx context.Context, request *TRestoreSnapshotRequest) (r *TRestoreSnapshotResult_, err error) - - WaitingTxnStatus(ctx context.Context, request *TWaitingTxnStatusRequest) (r *TWaitingTxnStatusResult_, err error) - - StreamLoadPut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadPutResult_, err error) - - StreamLoadMultiTablePut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadMultiTablePutResult_, err error) - - SnapshotLoaderReport(ctx context.Context, request *TSnapshotLoaderReportRequest) (r *status.TStatus, err error) - - Ping(ctx context.Context, request *TFrontendPingFrontendRequest) (r *TFrontendPingFrontendResult_, err error) - - AddColumns(ctx context.Context, request *TAddColumnsRequest) (r *TAddColumnsResult_, err error) - - InitExternalCtlMeta(ctx context.Context, request *TInitExternalCtlMetaRequest) (r *TInitExternalCtlMetaResult_, err error) - - FetchSchemaTableData(ctx context.Context, request *TFetchSchemaTableDataRequest) (r *TFetchSchemaTableDataResult_, err error) - - AcquireToken(ctx context.Context) (r *TMySqlLoadAcquireTokenResult_, err error) - - ConfirmUnusedRemoteFiles(ctx context.Context, request *TConfirmUnusedRemoteFilesRequest) (r *TConfirmUnusedRemoteFilesResult_, err error) - - CheckAuth(ctx context.Context, request *TCheckAuthRequest) (r *TCheckAuthResult_, err error) - - GetQueryStats(ctx context.Context, request *TGetQueryStatsRequest) (r *TQueryStatsResult_, err error) - - GetTabletReplicaInfos(ctx context.Context, request *TGetTabletReplicaInfosRequest) (r *TGetTabletReplicaInfosResult_, err error) - - GetMasterToken(ctx context.Context, request *TGetMasterTokenRequest) (r *TGetMasterTokenResult_, err error) - - GetBinlogLag(ctx context.Context, request *TGetBinlogLagRequest) (r *TGetBinlogLagResult_, err error) - - UpdateStatsCache(ctx context.Context, request *TUpdateFollowerStatsCacheRequest) (r *status.TStatus, err error) - - GetAutoIncrementRange(ctx context.Context, request *TAutoIncrementRangeRequest) (r *TAutoIncrementRangeResult_, err error) +var FrontendServiceForwardArgs_Params_DEFAULT *TMasterOpRequest - CreatePartition(ctx context.Context, request *TCreatePartitionRequest) (r *TCreatePartitionResult_, err error) +func (p *FrontendServiceForwardArgs) GetParams() (v *TMasterOpRequest) { + if !p.IsSetParams() { + return FrontendServiceForwardArgs_Params_DEFAULT + } + return p.Params } - -type FrontendServiceClient struct { - c thrift.TClient +func (p *FrontendServiceForwardArgs) SetParams(val *TMasterOpRequest) { + p.Params = val } -func NewFrontendServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *FrontendServiceClient { - return &FrontendServiceClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } +var fieldIDToName_FrontendServiceForwardArgs = map[int16]string{ + 1: "params", } -func NewFrontendServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *FrontendServiceClient { - return &FrontendServiceClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } +func (p *FrontendServiceForwardArgs) IsSetParams() bool { + return p.Params != nil } -func NewFrontendServiceClient(c thrift.TClient) *FrontendServiceClient { - return &FrontendServiceClient{ - c: c, - } -} +func (p *FrontendServiceForwardArgs) Read(iprot thrift.TProtocol) (err error) { -func (p *FrontendServiceClient) Client_() thrift.TClient { - return p.c -} + var fieldTypeId thrift.TType + var fieldId int16 -func (p *FrontendServiceClient) GetDbNames(ctx context.Context, params *TGetDbsParams) (r *TGetDbsResult_, err error) { - var _args FrontendServiceGetDbNamesArgs - _args.Params = params - var _result FrontendServiceGetDbNamesResult - if err = p.Client_().Call(ctx, "getDbNames", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) GetTableNames(ctx context.Context, params *TGetTablesParams) (r *TGetTablesResult_, err error) { - var _args FrontendServiceGetTableNamesArgs - _args.Params = params - var _result FrontendServiceGetTableNamesResult - if err = p.Client_().Call(ctx, "getTableNames", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) DescribeTable(ctx context.Context, params *TDescribeTableParams) (r *TDescribeTableResult_, err error) { - var _args FrontendServiceDescribeTableArgs - _args.Params = params - var _result FrontendServiceDescribeTableResult - if err = p.Client_().Call(ctx, "describeTable", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) DescribeTables(ctx context.Context, params *TDescribeTablesParams) (r *TDescribeTablesResult_, err error) { - var _args FrontendServiceDescribeTablesArgs - _args.Params = params - var _result FrontendServiceDescribeTablesResult - if err = p.Client_().Call(ctx, "describeTables", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ShowVariables(ctx context.Context, params *TShowVariableRequest) (r *TShowVariableResult_, err error) { - var _args FrontendServiceShowVariablesArgs - _args.Params = params - var _result FrontendServiceShowVariablesResult - if err = p.Client_().Call(ctx, "showVariables", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ReportExecStatus(ctx context.Context, params *TReportExecStatusParams) (r *TReportExecStatusResult_, err error) { - var _args FrontendServiceReportExecStatusArgs - _args.Params = params - var _result FrontendServiceReportExecStatusResult - if err = p.Client_().Call(ctx, "reportExecStatus", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) FinishTask(ctx context.Context, request *masterservice.TFinishTaskRequest) (r *masterservice.TMasterResult_, err error) { - var _args FrontendServiceFinishTaskArgs - _args.Request = request - var _result FrontendServiceFinishTaskResult - if err = p.Client_().Call(ctx, "finishTask", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) Report(ctx context.Context, request *masterservice.TReportRequest) (r *masterservice.TMasterResult_, err error) { - var _args FrontendServiceReportArgs - _args.Request = request - var _result FrontendServiceReportResult - if err = p.Client_().Call(ctx, "report", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) FetchResource(ctx context.Context) (r *masterservice.TFetchResourceResult_, err error) { - var _args FrontendServiceFetchResourceArgs - var _result FrontendServiceFetchResourceResult - if err = p.Client_().Call(ctx, "fetchResource", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) Forward(ctx context.Context, params *TMasterOpRequest) (r *TMasterOpResult_, err error) { - var _args FrontendServiceForwardArgs - _args.Params = params - var _result FrontendServiceForwardResult - if err = p.Client_().Call(ctx, "forward", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ListTableStatus(ctx context.Context, params *TGetTablesParams) (r *TListTableStatusResult_, err error) { - var _args FrontendServiceListTableStatusArgs - _args.Params = params - var _result FrontendServiceListTableStatusResult - if err = p.Client_().Call(ctx, "listTableStatus", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ListTableMetadataNameIds(ctx context.Context, params *TGetTablesParams) (r *TListTableMetadataNameIdsResult_, err error) { - var _args FrontendServiceListTableMetadataNameIdsArgs - _args.Params = params - var _result FrontendServiceListTableMetadataNameIdsResult - if err = p.Client_().Call(ctx, "listTableMetadataNameIds", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ListTablePrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) { - var _args FrontendServiceListTablePrivilegeStatusArgs - _args.Params = params - var _result FrontendServiceListTablePrivilegeStatusResult - if err = p.Client_().Call(ctx, "listTablePrivilegeStatus", &_args, &_result); err != nil { - return + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ListSchemaPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) { - var _args FrontendServiceListSchemaPrivilegeStatusArgs - _args.Params = params - var _result FrontendServiceListSchemaPrivilegeStatusResult - if err = p.Client_().Call(ctx, "listSchemaPrivilegeStatus", &_args, &_result); err != nil { - return + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ListUserPrivilegeStatus(ctx context.Context, params *TGetTablesParams) (r *TListPrivilegesResult_, err error) { - var _args FrontendServiceListUserPrivilegeStatusArgs - _args.Params = params - var _result FrontendServiceListUserPrivilegeStatusResult - if err = p.Client_().Call(ctx, "listUserPrivilegeStatus", &_args, &_result); err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return _result.GetSuccess(), nil + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceClient) UpdateExportTaskStatus(ctx context.Context, request *TUpdateExportTaskStatusRequest) (r *TFeResult_, err error) { - var _args FrontendServiceUpdateExportTaskStatusArgs - _args.Request = request - var _result FrontendServiceUpdateExportTaskStatusResult - if err = p.Client_().Call(ctx, "updateExportTaskStatus", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTMasterOpRequest() + if err := _field.Read(iprot); err != nil { + return err } - return _result.GetSuccess(), nil + p.Params = _field + return nil } -func (p *FrontendServiceClient) LoadTxnBegin(ctx context.Context, request *TLoadTxnBeginRequest) (r *TLoadTxnBeginResult_, err error) { - var _args FrontendServiceLoadTxnBeginArgs - _args.Request = request - var _result FrontendServiceLoadTxnBeginResult - if err = p.Client_().Call(ctx, "loadTxnBegin", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("forward_args"); err != nil { + goto WriteStructBeginError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) LoadTxnPreCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) { - var _args FrontendServiceLoadTxnPreCommitArgs - _args.Request = request - var _result FrontendServiceLoadTxnPreCommitResult - if err = p.Client_().Call(ctx, "loadTxnPreCommit", &_args, &_result); err != nil { - return + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) LoadTxn2PC(ctx context.Context, request *TLoadTxn2PCRequest) (r *TLoadTxn2PCResult_, err error) { - var _args FrontendServiceLoadTxn2PCArgs - _args.Request = request - var _result FrontendServiceLoadTxn2PCResult - if err = p.Client_().Call(ctx, "loadTxn2PC", &_args, &_result); err != nil { - return + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) LoadTxnCommit(ctx context.Context, request *TLoadTxnCommitRequest) (r *TLoadTxnCommitResult_, err error) { - var _args FrontendServiceLoadTxnCommitArgs - _args.Request = request - var _result FrontendServiceLoadTxnCommitResult - if err = p.Client_().Call(ctx, "loadTxnCommit", &_args, &_result); err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return _result.GetSuccess(), nil + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceClient) LoadTxnRollback(ctx context.Context, request *TLoadTxnRollbackRequest) (r *TLoadTxnRollbackResult_, err error) { - var _args FrontendServiceLoadTxnRollbackArgs - _args.Request = request - var _result FrontendServiceLoadTxnRollbackResult - if err = p.Client_().Call(ctx, "loadTxnRollback", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) BeginTxn(ctx context.Context, request *TBeginTxnRequest) (r *TBeginTxnResult_, err error) { - var _args FrontendServiceBeginTxnArgs - _args.Request = request - var _result FrontendServiceBeginTxnResult - if err = p.Client_().Call(ctx, "beginTxn", &_args, &_result); err != nil { - return + if err := p.Params.Write(oprot); err != nil { + return err } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) CommitTxn(ctx context.Context, request *TCommitTxnRequest) (r *TCommitTxnResult_, err error) { - var _args FrontendServiceCommitTxnArgs - _args.Request = request - var _result FrontendServiceCommitTxnResult - if err = p.Client_().Call(ctx, "commitTxn", &_args, &_result); err != nil { - return + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - return _result.GetSuccess(), nil + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceClient) RollbackTxn(ctx context.Context, request *TRollbackTxnRequest) (r *TRollbackTxnResult_, err error) { - var _args FrontendServiceRollbackTxnArgs - _args.Request = request - var _result FrontendServiceRollbackTxnResult - if err = p.Client_().Call(ctx, "rollbackTxn", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardArgs) String() string { + if p == nil { + return "" } - return _result.GetSuccess(), nil + return fmt.Sprintf("FrontendServiceForwardArgs(%+v)", *p) + } -func (p *FrontendServiceClient) GetBinlog(ctx context.Context, request *TGetBinlogRequest) (r *TGetBinlogResult_, err error) { - var _args FrontendServiceGetBinlogArgs - _args.Request = request - var _result FrontendServiceGetBinlogResult - if err = p.Client_().Call(ctx, "getBinlog", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardArgs) DeepEqual(ano *FrontendServiceForwardArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) GetSnapshot(ctx context.Context, request *TGetSnapshotRequest) (r *TGetSnapshotResult_, err error) { - var _args FrontendServiceGetSnapshotArgs - _args.Request = request - var _result FrontendServiceGetSnapshotResult - if err = p.Client_().Call(ctx, "getSnapshot", &_args, &_result); err != nil { - return + if !p.Field1DeepEqual(ano.Params) { + return false } - return _result.GetSuccess(), nil + return true } -func (p *FrontendServiceClient) RestoreSnapshot(ctx context.Context, request *TRestoreSnapshotRequest) (r *TRestoreSnapshotResult_, err error) { - var _args FrontendServiceRestoreSnapshotArgs - _args.Request = request - var _result FrontendServiceRestoreSnapshotResult - if err = p.Client_().Call(ctx, "restoreSnapshot", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardArgs) Field1DeepEqual(src *TMasterOpRequest) bool { + + if !p.Params.DeepEqual(src) { + return false } - return _result.GetSuccess(), nil + return true } -func (p *FrontendServiceClient) WaitingTxnStatus(ctx context.Context, request *TWaitingTxnStatusRequest) (r *TWaitingTxnStatusResult_, err error) { - var _args FrontendServiceWaitingTxnStatusArgs - _args.Request = request - var _result FrontendServiceWaitingTxnStatusResult - if err = p.Client_().Call(ctx, "waitingTxnStatus", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil + +type FrontendServiceForwardResult struct { + Success *TMasterOpResult_ `thrift:"success,0,optional" frugal:"0,optional,TMasterOpResult_" json:"success,omitempty"` } -func (p *FrontendServiceClient) StreamLoadPut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadPutResult_, err error) { - var _args FrontendServiceStreamLoadPutArgs - _args.Request = request - var _result FrontendServiceStreamLoadPutResult - if err = p.Client_().Call(ctx, "streamLoadPut", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil + +func NewFrontendServiceForwardResult() *FrontendServiceForwardResult { + return &FrontendServiceForwardResult{} } -func (p *FrontendServiceClient) StreamLoadMultiTablePut(ctx context.Context, request *TStreamLoadPutRequest) (r *TStreamLoadMultiTablePutResult_, err error) { - var _args FrontendServiceStreamLoadMultiTablePutArgs - _args.Request = request - var _result FrontendServiceStreamLoadMultiTablePutResult - if err = p.Client_().Call(ctx, "streamLoadMultiTablePut", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil + +func (p *FrontendServiceForwardResult) InitDefault() { } -func (p *FrontendServiceClient) SnapshotLoaderReport(ctx context.Context, request *TSnapshotLoaderReportRequest) (r *status.TStatus, err error) { - var _args FrontendServiceSnapshotLoaderReportArgs - _args.Request = request - var _result FrontendServiceSnapshotLoaderReportResult - if err = p.Client_().Call(ctx, "snapshotLoaderReport", &_args, &_result); err != nil { - return + +var FrontendServiceForwardResult_Success_DEFAULT *TMasterOpResult_ + +func (p *FrontendServiceForwardResult) GetSuccess() (v *TMasterOpResult_) { + if !p.IsSetSuccess() { + return FrontendServiceForwardResult_Success_DEFAULT } - return _result.GetSuccess(), nil + return p.Success } -func (p *FrontendServiceClient) Ping(ctx context.Context, request *TFrontendPingFrontendRequest) (r *TFrontendPingFrontendResult_, err error) { - var _args FrontendServicePingArgs - _args.Request = request - var _result FrontendServicePingResult - if err = p.Client_().Call(ctx, "ping", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *FrontendServiceForwardResult) SetSuccess(x interface{}) { + p.Success = x.(*TMasterOpResult_) } -func (p *FrontendServiceClient) AddColumns(ctx context.Context, request *TAddColumnsRequest) (r *TAddColumnsResult_, err error) { - var _args FrontendServiceAddColumnsArgs - _args.Request = request - var _result FrontendServiceAddColumnsResult - if err = p.Client_().Call(ctx, "addColumns", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil + +var fieldIDToName_FrontendServiceForwardResult = map[int16]string{ + 0: "success", } -func (p *FrontendServiceClient) InitExternalCtlMeta(ctx context.Context, request *TInitExternalCtlMetaRequest) (r *TInitExternalCtlMetaResult_, err error) { - var _args FrontendServiceInitExternalCtlMetaArgs - _args.Request = request - var _result FrontendServiceInitExternalCtlMetaResult - if err = p.Client_().Call(ctx, "initExternalCtlMeta", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil + +func (p *FrontendServiceForwardResult) IsSetSuccess() bool { + return p.Success != nil } -func (p *FrontendServiceClient) FetchSchemaTableData(ctx context.Context, request *TFetchSchemaTableDataRequest) (r *TFetchSchemaTableDataResult_, err error) { - var _args FrontendServiceFetchSchemaTableDataArgs - _args.Request = request - var _result FrontendServiceFetchSchemaTableDataResult - if err = p.Client_().Call(ctx, "fetchSchemaTableData", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) AcquireToken(ctx context.Context) (r *TMySqlLoadAcquireTokenResult_, err error) { - var _args FrontendServiceAcquireTokenArgs - var _result FrontendServiceAcquireTokenResult - if err = p.Client_().Call(ctx, "acquireToken", &_args, &_result); err != nil { - return + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) ConfirmUnusedRemoteFiles(ctx context.Context, request *TConfirmUnusedRemoteFilesRequest) (r *TConfirmUnusedRemoteFilesResult_, err error) { - var _args FrontendServiceConfirmUnusedRemoteFilesArgs - _args.Request = request - var _result FrontendServiceConfirmUnusedRemoteFilesResult - if err = p.Client_().Call(ctx, "confirmUnusedRemoteFiles", &_args, &_result); err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return _result.GetSuccess(), nil + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceClient) CheckAuth(ctx context.Context, request *TCheckAuthRequest) (r *TCheckAuthResult_, err error) { - var _args FrontendServiceCheckAuthArgs - _args.Request = request - var _result FrontendServiceCheckAuthResult - if err = p.Client_().Call(ctx, "checkAuth", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTMasterOpResult_() + if err := _field.Read(iprot); err != nil { + return err } - return _result.GetSuccess(), nil + p.Success = _field + return nil } -func (p *FrontendServiceClient) GetQueryStats(ctx context.Context, request *TGetQueryStatsRequest) (r *TQueryStatsResult_, err error) { - var _args FrontendServiceGetQueryStatsArgs - _args.Request = request - var _result FrontendServiceGetQueryStatsResult - if err = p.Client_().Call(ctx, "getQueryStats", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("forward_result"); err != nil { + goto WriteStructBeginError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) GetTabletReplicaInfos(ctx context.Context, request *TGetTabletReplicaInfosRequest) (r *TGetTabletReplicaInfosResult_, err error) { - var _args FrontendServiceGetTabletReplicaInfosArgs - _args.Request = request - var _result FrontendServiceGetTabletReplicaInfosResult - if err = p.Client_().Call(ctx, "getTabletReplicaInfos", &_args, &_result); err != nil { - return + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) GetMasterToken(ctx context.Context, request *TGetMasterTokenRequest) (r *TGetMasterTokenResult_, err error) { - var _args FrontendServiceGetMasterTokenArgs - _args.Request = request - var _result FrontendServiceGetMasterTokenResult - if err = p.Client_().Call(ctx, "getMasterToken", &_args, &_result); err != nil { - return + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return _result.GetSuccess(), nil -} -func (p *FrontendServiceClient) GetBinlogLag(ctx context.Context, request *TGetBinlogLagRequest) (r *TGetBinlogLagResult_, err error) { - var _args FrontendServiceGetBinlogLagArgs - _args.Request = request - var _result FrontendServiceGetBinlogLagResult - if err = p.Client_().Call(ctx, "getBinlogLag", &_args, &_result); err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return _result.GetSuccess(), nil + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceClient) UpdateStatsCache(ctx context.Context, request *TUpdateFollowerStatsCacheRequest) (r *status.TStatus, err error) { - var _args FrontendServiceUpdateStatsCacheArgs - _args.Request = request - var _result FrontendServiceUpdateStatsCacheResult - if err = p.Client_().Call(ctx, "updateStatsCache", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return _result.GetSuccess(), nil + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceClient) GetAutoIncrementRange(ctx context.Context, request *TAutoIncrementRangeRequest) (r *TAutoIncrementRangeResult_, err error) { - var _args FrontendServiceGetAutoIncrementRangeArgs - _args.Request = request - var _result FrontendServiceGetAutoIncrementRangeResult - if err = p.Client_().Call(ctx, "getAutoIncrementRange", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardResult) String() string { + if p == nil { + return "" } - return _result.GetSuccess(), nil + return fmt.Sprintf("FrontendServiceForwardResult(%+v)", *p) + } -func (p *FrontendServiceClient) CreatePartition(ctx context.Context, request *TCreatePartitionRequest) (r *TCreatePartitionResult_, err error) { - var _args FrontendServiceCreatePartitionArgs - _args.Request = request - var _result FrontendServiceCreatePartitionResult - if err = p.Client_().Call(ctx, "createPartition", &_args, &_result); err != nil { - return + +func (p *FrontendServiceForwardResult) DeepEqual(ano *FrontendServiceForwardResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return _result.GetSuccess(), nil + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true } -type FrontendServiceProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler FrontendService -} +func (p *FrontendServiceForwardResult) Field0DeepEqual(src *TMasterOpResult_) bool { -func (p *FrontendServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor + if !p.Success.DeepEqual(src) { + return false + } + return true } -func (p *FrontendServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok +type FrontendServiceListTableStatusArgs struct { + Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` } -func (p *FrontendServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap +func NewFrontendServiceListTableStatusArgs() *FrontendServiceListTableStatusArgs { + return &FrontendServiceListTableStatusArgs{} } -func NewFrontendServiceProcessor(handler FrontendService) *FrontendServiceProcessor { - self := &FrontendServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self.AddToProcessorMap("getDbNames", &frontendServiceProcessorGetDbNames{handler: handler}) - self.AddToProcessorMap("getTableNames", &frontendServiceProcessorGetTableNames{handler: handler}) - self.AddToProcessorMap("describeTable", &frontendServiceProcessorDescribeTable{handler: handler}) - self.AddToProcessorMap("describeTables", &frontendServiceProcessorDescribeTables{handler: handler}) - self.AddToProcessorMap("showVariables", &frontendServiceProcessorShowVariables{handler: handler}) - self.AddToProcessorMap("reportExecStatus", &frontendServiceProcessorReportExecStatus{handler: handler}) - self.AddToProcessorMap("finishTask", &frontendServiceProcessorFinishTask{handler: handler}) - self.AddToProcessorMap("report", &frontendServiceProcessorReport{handler: handler}) - self.AddToProcessorMap("fetchResource", &frontendServiceProcessorFetchResource{handler: handler}) - self.AddToProcessorMap("forward", &frontendServiceProcessorForward{handler: handler}) - self.AddToProcessorMap("listTableStatus", &frontendServiceProcessorListTableStatus{handler: handler}) - self.AddToProcessorMap("listTableMetadataNameIds", &frontendServiceProcessorListTableMetadataNameIds{handler: handler}) - self.AddToProcessorMap("listTablePrivilegeStatus", &frontendServiceProcessorListTablePrivilegeStatus{handler: handler}) - self.AddToProcessorMap("listSchemaPrivilegeStatus", &frontendServiceProcessorListSchemaPrivilegeStatus{handler: handler}) - self.AddToProcessorMap("listUserPrivilegeStatus", &frontendServiceProcessorListUserPrivilegeStatus{handler: handler}) - self.AddToProcessorMap("updateExportTaskStatus", &frontendServiceProcessorUpdateExportTaskStatus{handler: handler}) - self.AddToProcessorMap("loadTxnBegin", &frontendServiceProcessorLoadTxnBegin{handler: handler}) - self.AddToProcessorMap("loadTxnPreCommit", &frontendServiceProcessorLoadTxnPreCommit{handler: handler}) - self.AddToProcessorMap("loadTxn2PC", &frontendServiceProcessorLoadTxn2PC{handler: handler}) - self.AddToProcessorMap("loadTxnCommit", &frontendServiceProcessorLoadTxnCommit{handler: handler}) - self.AddToProcessorMap("loadTxnRollback", &frontendServiceProcessorLoadTxnRollback{handler: handler}) - self.AddToProcessorMap("beginTxn", &frontendServiceProcessorBeginTxn{handler: handler}) - self.AddToProcessorMap("commitTxn", &frontendServiceProcessorCommitTxn{handler: handler}) - self.AddToProcessorMap("rollbackTxn", &frontendServiceProcessorRollbackTxn{handler: handler}) - self.AddToProcessorMap("getBinlog", &frontendServiceProcessorGetBinlog{handler: handler}) - self.AddToProcessorMap("getSnapshot", &frontendServiceProcessorGetSnapshot{handler: handler}) - self.AddToProcessorMap("restoreSnapshot", &frontendServiceProcessorRestoreSnapshot{handler: handler}) - self.AddToProcessorMap("waitingTxnStatus", &frontendServiceProcessorWaitingTxnStatus{handler: handler}) - self.AddToProcessorMap("streamLoadPut", &frontendServiceProcessorStreamLoadPut{handler: handler}) - self.AddToProcessorMap("streamLoadMultiTablePut", &frontendServiceProcessorStreamLoadMultiTablePut{handler: handler}) - self.AddToProcessorMap("snapshotLoaderReport", &frontendServiceProcessorSnapshotLoaderReport{handler: handler}) - self.AddToProcessorMap("ping", &frontendServiceProcessorPing{handler: handler}) - self.AddToProcessorMap("addColumns", &frontendServiceProcessorAddColumns{handler: handler}) - self.AddToProcessorMap("initExternalCtlMeta", &frontendServiceProcessorInitExternalCtlMeta{handler: handler}) - self.AddToProcessorMap("fetchSchemaTableData", &frontendServiceProcessorFetchSchemaTableData{handler: handler}) - self.AddToProcessorMap("acquireToken", &frontendServiceProcessorAcquireToken{handler: handler}) - self.AddToProcessorMap("confirmUnusedRemoteFiles", &frontendServiceProcessorConfirmUnusedRemoteFiles{handler: handler}) - self.AddToProcessorMap("checkAuth", &frontendServiceProcessorCheckAuth{handler: handler}) - self.AddToProcessorMap("getQueryStats", &frontendServiceProcessorGetQueryStats{handler: handler}) - self.AddToProcessorMap("getTabletReplicaInfos", &frontendServiceProcessorGetTabletReplicaInfos{handler: handler}) - self.AddToProcessorMap("getMasterToken", &frontendServiceProcessorGetMasterToken{handler: handler}) - self.AddToProcessorMap("getBinlogLag", &frontendServiceProcessorGetBinlogLag{handler: handler}) - self.AddToProcessorMap("updateStatsCache", &frontendServiceProcessorUpdateStatsCache{handler: handler}) - self.AddToProcessorMap("getAutoIncrementRange", &frontendServiceProcessorGetAutoIncrementRange{handler: handler}) - self.AddToProcessorMap("createPartition", &frontendServiceProcessorCreatePartition{handler: handler}) - return self +func (p *FrontendServiceListTableStatusArgs) InitDefault() { } -func (p *FrontendServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) + +var FrontendServiceListTableStatusArgs_Params_DEFAULT *TGetTablesParams + +func (p *FrontendServiceListTableStatusArgs) GetParams() (v *TGetTablesParams) { + if !p.IsSetParams() { + return FrontendServiceListTableStatusArgs_Params_DEFAULT } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x + return p.Params +} +func (p *FrontendServiceListTableStatusArgs) SetParams(val *TGetTablesParams) { + p.Params = val } -type frontendServiceProcessorGetDbNames struct { - handler FrontendService +var fieldIDToName_FrontendServiceListTableStatusArgs = map[int16]string{ + 1: "params", } -func (p *frontendServiceProcessorGetDbNames) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetDbNamesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getDbNames", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListTableStatusArgs) IsSetParams() bool { + return p.Params != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetDbNamesResult{} - var retval *TGetDbsResult_ - if retval, err2 = p.handler.GetDbNames(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getDbNames: "+err2.Error()) - oprot.WriteMessageBegin("getDbNames", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getDbNames", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTableStatusArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorGetTableNames struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorGetTableNames) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetTableNamesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getTableNames", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTablesParams() + if err := _field.Read(iprot); err != nil { + return err } + p.Params = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetTableNamesResult{} - var retval *TGetTablesResult_ - if retval, err2 = p.handler.GetTableNames(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getTableNames: "+err2.Error()) - oprot.WriteMessageBegin("getTableNames", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getTableNames", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTableStatusArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listTableStatus_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorDescribeTable struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorDescribeTable) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceDescribeTableArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("describeTable", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceDescribeTableResult{} - var retval *TDescribeTableResult_ - if retval, err2 = p.handler.DescribeTable(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing describeTable: "+err2.Error()) - oprot.WriteMessageBegin("describeTable", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Params.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("describeTable", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceListTableStatusArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceListTableStatusArgs(%+v)", *p) + +} + +func (p *FrontendServiceListTableStatusArgs) DeepEqual(ano *FrontendServiceListTableStatusArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Params) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListTableStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { + + if !p.Params.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorDescribeTables struct { - handler FrontendService +type FrontendServiceListTableStatusResult struct { + Success *TListTableStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,TListTableStatusResult_" json:"success,omitempty"` } -func (p *frontendServiceProcessorDescribeTables) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceDescribeTablesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("describeTables", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListTableStatusResult() *FrontendServiceListTableStatusResult { + return &FrontendServiceListTableStatusResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceDescribeTablesResult{} - var retval *TDescribeTablesResult_ - if retval, err2 = p.handler.DescribeTables(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing describeTables: "+err2.Error()) - oprot.WriteMessageBegin("describeTables", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("describeTables", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListTableStatusResult) InitDefault() { +} + +var FrontendServiceListTableStatusResult_Success_DEFAULT *TListTableStatusResult_ + +func (p *FrontendServiceListTableStatusResult) GetSuccess() (v *TListTableStatusResult_) { + if !p.IsSetSuccess() { + return FrontendServiceListTableStatusResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *FrontendServiceListTableStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TListTableStatusResult_) } -type frontendServiceProcessorShowVariables struct { - handler FrontendService +var fieldIDToName_FrontendServiceListTableStatusResult = map[int16]string{ + 0: "success", } -func (p *frontendServiceProcessorShowVariables) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceShowVariablesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("showVariables", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *FrontendServiceListTableStatusResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceShowVariablesResult{} - var retval *TShowVariableResult_ - if retval, err2 = p.handler.ShowVariables(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing showVariables: "+err2.Error()) - oprot.WriteMessageBegin("showVariables", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err2 = oprot.WriteMessageBegin("showVariables", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *FrontendServiceListTableStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTListTableStatusResult_() + if err := _field.Read(iprot); err != nil { + return err } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + p.Success = _field + return nil +} + +func (p *FrontendServiceListTableStatusResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listTableStatus_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err != nil { - return + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return true, err + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type frontendServiceProcessorReportExecStatus struct { - handler FrontendService +func (p *FrontendServiceListTableStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *frontendServiceProcessorReportExecStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceReportExecStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("reportExecStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableStatusResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("FrontendServiceListTableStatusResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceReportExecStatusResult{} - var retval *TReportExecStatusResult_ - if retval, err2 = p.handler.ReportExecStatus(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing reportExecStatus: "+err2.Error()) - oprot.WriteMessageBegin("reportExecStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("reportExecStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *FrontendServiceListTableStatusResult) DeepEqual(ano *FrontendServiceListTableStatusResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListTableStatusResult) Field0DeepEqual(src *TListTableStatusResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorFinishTask struct { - handler FrontendService +type FrontendServiceListTableMetadataNameIdsArgs struct { + Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` } -func (p *frontendServiceProcessorFinishTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceFinishTaskArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("finishTask", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListTableMetadataNameIdsArgs() *FrontendServiceListTableMetadataNameIdsArgs { + return &FrontendServiceListTableMetadataNameIdsArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceFinishTaskResult{} - var retval *masterservice.TMasterResult_ - if retval, err2 = p.handler.FinishTask(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing finishTask: "+err2.Error()) - oprot.WriteMessageBegin("finishTask", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("finishTask", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTableMetadataNameIdsArgs) InitDefault() { +} + +var FrontendServiceListTableMetadataNameIdsArgs_Params_DEFAULT *TGetTablesParams + +func (p *FrontendServiceListTableMetadataNameIdsArgs) GetParams() (v *TGetTablesParams) { + if !p.IsSetParams() { + return FrontendServiceListTableMetadataNameIdsArgs_Params_DEFAULT } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return p.Params +} +func (p *FrontendServiceListTableMetadataNameIdsArgs) SetParams(val *TGetTablesParams) { + p.Params = val +} + +var fieldIDToName_FrontendServiceListTableMetadataNameIdsArgs = map[int16]string{ + 1: "params", +} + +func (p *FrontendServiceListTableMetadataNameIdsArgs) IsSetParams() bool { + return p.Params != nil +} + +func (p *FrontendServiceListTableMetadataNameIdsArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorReport struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorReport) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceReportArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("report", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableMetadataNameIdsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTablesParams() + if err := _field.Read(iprot); err != nil { + return err } + p.Params = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceReportResult{} - var retval *masterservice.TMasterResult_ - if retval, err2 = p.handler.Report(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing report: "+err2.Error()) - oprot.WriteMessageBegin("report", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("report", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTableMetadataNameIdsArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listTableMetadataNameIds_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorFetchResource struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorFetchResource) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceFetchResourceArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("fetchResource", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableMetadataNameIdsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceFetchResourceResult{} - var retval *masterservice.TFetchResourceResult_ - if retval, err2 = p.handler.FetchResource(ctx); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchResource: "+err2.Error()) - oprot.WriteMessageBegin("fetchResource", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Params.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("fetchResource", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceListTableMetadataNameIdsArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceListTableMetadataNameIdsArgs(%+v)", *p) + +} + +func (p *FrontendServiceListTableMetadataNameIdsArgs) DeepEqual(ano *FrontendServiceListTableMetadataNameIdsArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Params) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListTableMetadataNameIdsArgs) Field1DeepEqual(src *TGetTablesParams) bool { + + if !p.Params.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorForward struct { - handler FrontendService +type FrontendServiceListTableMetadataNameIdsResult struct { + Success *TListTableMetadataNameIdsResult_ `thrift:"success,0,optional" frugal:"0,optional,TListTableMetadataNameIdsResult_" json:"success,omitempty"` } -func (p *frontendServiceProcessorForward) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceForwardArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("forward", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListTableMetadataNameIdsResult() *FrontendServiceListTableMetadataNameIdsResult { + return &FrontendServiceListTableMetadataNameIdsResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceForwardResult{} - var retval *TMasterOpResult_ - if retval, err2 = p.handler.Forward(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing forward: "+err2.Error()) - oprot.WriteMessageBegin("forward", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("forward", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListTableMetadataNameIdsResult) InitDefault() { +} + +var FrontendServiceListTableMetadataNameIdsResult_Success_DEFAULT *TListTableMetadataNameIdsResult_ + +func (p *FrontendServiceListTableMetadataNameIdsResult) GetSuccess() (v *TListTableMetadataNameIdsResult_) { + if !p.IsSetSuccess() { + return FrontendServiceListTableMetadataNameIdsResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *FrontendServiceListTableMetadataNameIdsResult) SetSuccess(x interface{}) { + p.Success = x.(*TListTableMetadataNameIdsResult_) } -type frontendServiceProcessorListTableStatus struct { - handler FrontendService +var fieldIDToName_FrontendServiceListTableMetadataNameIdsResult = map[int16]string{ + 0: "success", } -func (p *frontendServiceProcessorListTableStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceListTableStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("listTableStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListTableMetadataNameIdsResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceListTableStatusResult{} - var retval *TListTableStatusResult_ - if retval, err2 = p.handler.ListTableStatus(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listTableStatus: "+err2.Error()) - oprot.WriteMessageBegin("listTableStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("listTableStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTableMetadataNameIdsResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorListTableMetadataNameIds struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorListTableMetadataNameIds) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceListTableMetadataNameIdsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("listTableMetadataNameIds", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableMetadataNameIdsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTListTableMetadataNameIdsResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceListTableMetadataNameIdsResult{} - var retval *TListTableMetadataNameIdsResult_ - if retval, err2 = p.handler.ListTableMetadataNameIds(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listTableMetadataNameIds: "+err2.Error()) - oprot.WriteMessageBegin("listTableMetadataNameIds", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("listTableMetadataNameIds", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTableMetadataNameIdsResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listTableMetadataNameIds_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type frontendServiceProcessorListTablePrivilegeStatus struct { - handler FrontendService +func (p *FrontendServiceListTableMetadataNameIdsResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *frontendServiceProcessorListTablePrivilegeStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceListTablePrivilegeStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("listTablePrivilegeStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTableMetadataNameIdsResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("FrontendServiceListTableMetadataNameIdsResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceListTablePrivilegeStatusResult{} - var retval *TListPrivilegesResult_ - if retval, err2 = p.handler.ListTablePrivilegeStatus(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listTablePrivilegeStatus: "+err2.Error()) - oprot.WriteMessageBegin("listTablePrivilegeStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("listTablePrivilegeStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *FrontendServiceListTableMetadataNameIdsResult) DeepEqual(ano *FrontendServiceListTableMetadataNameIdsResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListTableMetadataNameIdsResult) Field0DeepEqual(src *TListTableMetadataNameIdsResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorListSchemaPrivilegeStatus struct { - handler FrontendService +type FrontendServiceListTablePrivilegeStatusArgs struct { + Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` } -func (p *frontendServiceProcessorListSchemaPrivilegeStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceListSchemaPrivilegeStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("listSchemaPrivilegeStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListTablePrivilegeStatusArgs() *FrontendServiceListTablePrivilegeStatusArgs { + return &FrontendServiceListTablePrivilegeStatusArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceListSchemaPrivilegeStatusResult{} - var retval *TListPrivilegesResult_ - if retval, err2 = p.handler.ListSchemaPrivilegeStatus(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listSchemaPrivilegeStatus: "+err2.Error()) - oprot.WriteMessageBegin("listSchemaPrivilegeStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("listSchemaPrivilegeStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListTablePrivilegeStatusArgs) InitDefault() { +} + +var FrontendServiceListTablePrivilegeStatusArgs_Params_DEFAULT *TGetTablesParams + +func (p *FrontendServiceListTablePrivilegeStatusArgs) GetParams() (v *TGetTablesParams) { + if !p.IsSetParams() { + return FrontendServiceListTablePrivilegeStatusArgs_Params_DEFAULT } - return true, err + return p.Params +} +func (p *FrontendServiceListTablePrivilegeStatusArgs) SetParams(val *TGetTablesParams) { + p.Params = val } -type frontendServiceProcessorListUserPrivilegeStatus struct { - handler FrontendService +var fieldIDToName_FrontendServiceListTablePrivilegeStatusArgs = map[int16]string{ + 1: "params", } -func (p *frontendServiceProcessorListUserPrivilegeStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceListUserPrivilegeStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("listUserPrivilegeStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListTablePrivilegeStatusArgs) IsSetParams() bool { + return p.Params != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceListUserPrivilegeStatusResult{} - var retval *TListPrivilegesResult_ - if retval, err2 = p.handler.ListUserPrivilegeStatus(ctx, args.Params); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing listUserPrivilegeStatus: "+err2.Error()) - oprot.WriteMessageBegin("listUserPrivilegeStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("listUserPrivilegeStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTablePrivilegeStatusArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorUpdateExportTaskStatus struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorUpdateExportTaskStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceUpdateExportTaskStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("updateExportTaskStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTablePrivilegeStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTablesParams() + if err := _field.Read(iprot); err != nil { + return err } + p.Params = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceUpdateExportTaskStatusResult{} - var retval *TFeResult_ - if retval, err2 = p.handler.UpdateExportTaskStatus(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing updateExportTaskStatus: "+err2.Error()) - oprot.WriteMessageBegin("updateExportTaskStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("updateExportTaskStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTablePrivilegeStatusArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listTablePrivilegeStatus_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorLoadTxnBegin struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorLoadTxnBegin) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceLoadTxnBeginArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("loadTxnBegin", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTablePrivilegeStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceLoadTxnBeginResult{} - var retval *TLoadTxnBeginResult_ - if retval, err2 = p.handler.LoadTxnBegin(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnBegin: "+err2.Error()) - oprot.WriteMessageBegin("loadTxnBegin", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Params.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("loadTxnBegin", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceListTablePrivilegeStatusArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceListTablePrivilegeStatusArgs(%+v)", *p) + +} + +func (p *FrontendServiceListTablePrivilegeStatusArgs) DeepEqual(ano *FrontendServiceListTablePrivilegeStatusArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Params) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListTablePrivilegeStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { + + if !p.Params.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorLoadTxnPreCommit struct { - handler FrontendService +type FrontendServiceListTablePrivilegeStatusResult struct { + Success *TListPrivilegesResult_ `thrift:"success,0,optional" frugal:"0,optional,TListPrivilegesResult_" json:"success,omitempty"` } -func (p *frontendServiceProcessorLoadTxnPreCommit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceLoadTxnPreCommitArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("loadTxnPreCommit", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListTablePrivilegeStatusResult() *FrontendServiceListTablePrivilegeStatusResult { + return &FrontendServiceListTablePrivilegeStatusResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceLoadTxnPreCommitResult{} - var retval *TLoadTxnCommitResult_ - if retval, err2 = p.handler.LoadTxnPreCommit(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnPreCommit: "+err2.Error()) - oprot.WriteMessageBegin("loadTxnPreCommit", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("loadTxnPreCommit", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListTablePrivilegeStatusResult) InitDefault() { +} + +var FrontendServiceListTablePrivilegeStatusResult_Success_DEFAULT *TListPrivilegesResult_ + +func (p *FrontendServiceListTablePrivilegeStatusResult) GetSuccess() (v *TListPrivilegesResult_) { + if !p.IsSetSuccess() { + return FrontendServiceListTablePrivilegeStatusResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *FrontendServiceListTablePrivilegeStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TListPrivilegesResult_) } -type frontendServiceProcessorLoadTxn2PC struct { - handler FrontendService +var fieldIDToName_FrontendServiceListTablePrivilegeStatusResult = map[int16]string{ + 0: "success", } -func (p *frontendServiceProcessorLoadTxn2PC) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceLoadTxn2PCArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("loadTxn2PC", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListTablePrivilegeStatusResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceLoadTxn2PCResult{} - var retval *TLoadTxn2PCResult_ - if retval, err2 = p.handler.LoadTxn2PC(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxn2PC: "+err2.Error()) - oprot.WriteMessageBegin("loadTxn2PC", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("loadTxn2PC", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTablePrivilegeStatusResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorLoadTxnCommit struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorLoadTxnCommit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceLoadTxnCommitArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("loadTxnCommit", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTablePrivilegeStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTListPrivilegesResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceLoadTxnCommitResult{} - var retval *TLoadTxnCommitResult_ - if retval, err2 = p.handler.LoadTxnCommit(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnCommit: "+err2.Error()) - oprot.WriteMessageBegin("loadTxnCommit", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("loadTxnCommit", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTablePrivilegeStatusResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listTablePrivilegeStatus_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorLoadTxnRollback struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorLoadTxnRollback) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceLoadTxnRollbackArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("loadTxnRollback", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListTablePrivilegeStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceLoadTxnRollbackResult{} - var retval *TLoadTxnRollbackResult_ - if retval, err2 = p.handler.LoadTxnRollback(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing loadTxnRollback: "+err2.Error()) - oprot.WriteMessageBegin("loadTxnRollback", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("loadTxnRollback", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListTablePrivilegeStatusResult) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceListTablePrivilegeStatusResult(%+v)", *p) + +} + +func (p *FrontendServiceListTablePrivilegeStatusResult) DeepEqual(ano *FrontendServiceListTablePrivilegeStatusResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListTablePrivilegeStatusResult) Field0DeepEqual(src *TListPrivilegesResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorBeginTxn struct { - handler FrontendService +type FrontendServiceListSchemaPrivilegeStatusArgs struct { + Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` } -func (p *frontendServiceProcessorBeginTxn) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceBeginTxnArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("beginTxn", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListSchemaPrivilegeStatusArgs() *FrontendServiceListSchemaPrivilegeStatusArgs { + return &FrontendServiceListSchemaPrivilegeStatusArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceBeginTxnResult{} - var retval *TBeginTxnResult_ - if retval, err2 = p.handler.BeginTxn(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing beginTxn: "+err2.Error()) - oprot.WriteMessageBegin("beginTxn", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("beginTxn", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) InitDefault() { +} + +var FrontendServiceListSchemaPrivilegeStatusArgs_Params_DEFAULT *TGetTablesParams + +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) GetParams() (v *TGetTablesParams) { + if !p.IsSetParams() { + return FrontendServiceListSchemaPrivilegeStatusArgs_Params_DEFAULT } - return true, err + return p.Params +} +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) SetParams(val *TGetTablesParams) { + p.Params = val } -type frontendServiceProcessorCommitTxn struct { - handler FrontendService +var fieldIDToName_FrontendServiceListSchemaPrivilegeStatusArgs = map[int16]string{ + 1: "params", } -func (p *frontendServiceProcessorCommitTxn) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceCommitTxnArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("commitTxn", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) IsSetParams() bool { + return p.Params != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceCommitTxnResult{} - var retval *TCommitTxnResult_ - if retval, err2 = p.handler.CommitTxn(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing commitTxn: "+err2.Error()) - oprot.WriteMessageBegin("commitTxn", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("commitTxn", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorRollbackTxn struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorRollbackTxn) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceRollbackTxnArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("rollbackTxn", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTablesParams() + if err := _field.Read(iprot); err != nil { + return err } + p.Params = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceRollbackTxnResult{} - var retval *TRollbackTxnResult_ - if retval, err2 = p.handler.RollbackTxn(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing rollbackTxn: "+err2.Error()) - oprot.WriteMessageBegin("rollbackTxn", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("rollbackTxn", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listSchemaPrivilegeStatus_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorGetBinlog struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorGetBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetBinlogArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getBinlog", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetBinlogResult{} - var retval *TGetBinlogResult_ - if retval, err2 = p.handler.GetBinlog(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBinlog: "+err2.Error()) - oprot.WriteMessageBegin("getBinlog", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Params.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("getBinlog", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceListSchemaPrivilegeStatusArgs(%+v)", *p) + +} + +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) DeepEqual(ano *FrontendServiceListSchemaPrivilegeStatusArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Params) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { + + if !p.Params.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorGetSnapshot struct { - handler FrontendService +type FrontendServiceListSchemaPrivilegeStatusResult struct { + Success *TListPrivilegesResult_ `thrift:"success,0,optional" frugal:"0,optional,TListPrivilegesResult_" json:"success,omitempty"` } -func (p *frontendServiceProcessorGetSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetSnapshotArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getSnapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListSchemaPrivilegeStatusResult() *FrontendServiceListSchemaPrivilegeStatusResult { + return &FrontendServiceListSchemaPrivilegeStatusResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetSnapshotResult{} - var retval *TGetSnapshotResult_ - if retval, err2 = p.handler.GetSnapshot(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSnapshot: "+err2.Error()) - oprot.WriteMessageBegin("getSnapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getSnapshot", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListSchemaPrivilegeStatusResult) InitDefault() { +} + +var FrontendServiceListSchemaPrivilegeStatusResult_Success_DEFAULT *TListPrivilegesResult_ + +func (p *FrontendServiceListSchemaPrivilegeStatusResult) GetSuccess() (v *TListPrivilegesResult_) { + if !p.IsSetSuccess() { + return FrontendServiceListSchemaPrivilegeStatusResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *FrontendServiceListSchemaPrivilegeStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TListPrivilegesResult_) } -type frontendServiceProcessorRestoreSnapshot struct { - handler FrontendService +var fieldIDToName_FrontendServiceListSchemaPrivilegeStatusResult = map[int16]string{ + 0: "success", } -func (p *frontendServiceProcessorRestoreSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceRestoreSnapshotArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("restoreSnapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListSchemaPrivilegeStatusResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceRestoreSnapshotResult{} - var retval *TRestoreSnapshotResult_ - if retval, err2 = p.handler.RestoreSnapshot(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing restoreSnapshot: "+err2.Error()) - oprot.WriteMessageBegin("restoreSnapshot", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("restoreSnapshot", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListSchemaPrivilegeStatusResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorWaitingTxnStatus struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorWaitingTxnStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceWaitingTxnStatusArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("waitingTxnStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListSchemaPrivilegeStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTListPrivilegesResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceWaitingTxnStatusResult{} - var retval *TWaitingTxnStatusResult_ - if retval, err2 = p.handler.WaitingTxnStatus(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing waitingTxnStatus: "+err2.Error()) - oprot.WriteMessageBegin("waitingTxnStatus", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("waitingTxnStatus", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListSchemaPrivilegeStatusResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listSchemaPrivilegeStatus_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type frontendServiceProcessorStreamLoadPut struct { - handler FrontendService +func (p *FrontendServiceListSchemaPrivilegeStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *frontendServiceProcessorStreamLoadPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceStreamLoadPutArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("streamLoadPut", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListSchemaPrivilegeStatusResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("FrontendServiceListSchemaPrivilegeStatusResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceStreamLoadPutResult{} - var retval *TStreamLoadPutResult_ - if retval, err2 = p.handler.StreamLoadPut(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing streamLoadPut: "+err2.Error()) - oprot.WriteMessageBegin("streamLoadPut", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("streamLoadPut", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *FrontendServiceListSchemaPrivilegeStatusResult) DeepEqual(ano *FrontendServiceListSchemaPrivilegeStatusResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListSchemaPrivilegeStatusResult) Field0DeepEqual(src *TListPrivilegesResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorStreamLoadMultiTablePut struct { - handler FrontendService +type FrontendServiceListUserPrivilegeStatusArgs struct { + Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` } -func (p *frontendServiceProcessorStreamLoadMultiTablePut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceStreamLoadMultiTablePutArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("streamLoadMultiTablePut", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListUserPrivilegeStatusArgs() *FrontendServiceListUserPrivilegeStatusArgs { + return &FrontendServiceListUserPrivilegeStatusArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceStreamLoadMultiTablePutResult{} - var retval *TStreamLoadMultiTablePutResult_ - if retval, err2 = p.handler.StreamLoadMultiTablePut(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing streamLoadMultiTablePut: "+err2.Error()) - oprot.WriteMessageBegin("streamLoadMultiTablePut", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("streamLoadMultiTablePut", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListUserPrivilegeStatusArgs) InitDefault() { +} + +var FrontendServiceListUserPrivilegeStatusArgs_Params_DEFAULT *TGetTablesParams + +func (p *FrontendServiceListUserPrivilegeStatusArgs) GetParams() (v *TGetTablesParams) { + if !p.IsSetParams() { + return FrontendServiceListUserPrivilegeStatusArgs_Params_DEFAULT } - return true, err + return p.Params +} +func (p *FrontendServiceListUserPrivilegeStatusArgs) SetParams(val *TGetTablesParams) { + p.Params = val } -type frontendServiceProcessorSnapshotLoaderReport struct { - handler FrontendService +var fieldIDToName_FrontendServiceListUserPrivilegeStatusArgs = map[int16]string{ + 1: "params", } -func (p *frontendServiceProcessorSnapshotLoaderReport) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceSnapshotLoaderReportArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("snapshotLoaderReport", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListUserPrivilegeStatusArgs) IsSetParams() bool { + return p.Params != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceSnapshotLoaderReportResult{} - var retval *status.TStatus - if retval, err2 = p.handler.SnapshotLoaderReport(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing snapshotLoaderReport: "+err2.Error()) - oprot.WriteMessageBegin("snapshotLoaderReport", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("snapshotLoaderReport", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListUserPrivilegeStatusArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorPing struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorPing) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServicePingArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ping", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListUserPrivilegeStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTablesParams() + if err := _field.Read(iprot); err != nil { + return err } + p.Params = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServicePingResult{} - var retval *TFrontendPingFrontendResult_ - if retval, err2 = p.handler.Ping(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ping: "+err2.Error()) - oprot.WriteMessageBegin("ping", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("ping", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListUserPrivilegeStatusArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listUserPrivilegeStatus_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorAddColumns struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorAddColumns) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceAddColumnsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("addColumns", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListUserPrivilegeStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceAddColumnsResult{} - var retval *TAddColumnsResult_ - if retval, err2 = p.handler.AddColumns(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing addColumns: "+err2.Error()) - oprot.WriteMessageBegin("addColumns", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Params.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("addColumns", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceListUserPrivilegeStatusArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceListUserPrivilegeStatusArgs(%+v)", *p) + +} + +func (p *FrontendServiceListUserPrivilegeStatusArgs) DeepEqual(ano *FrontendServiceListUserPrivilegeStatusArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Params) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListUserPrivilegeStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { + + if !p.Params.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorInitExternalCtlMeta struct { - handler FrontendService +type FrontendServiceListUserPrivilegeStatusResult struct { + Success *TListPrivilegesResult_ `thrift:"success,0,optional" frugal:"0,optional,TListPrivilegesResult_" json:"success,omitempty"` } -func (p *frontendServiceProcessorInitExternalCtlMeta) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceInitExternalCtlMetaArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("initExternalCtlMeta", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceListUserPrivilegeStatusResult() *FrontendServiceListUserPrivilegeStatusResult { + return &FrontendServiceListUserPrivilegeStatusResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceInitExternalCtlMetaResult{} - var retval *TInitExternalCtlMetaResult_ - if retval, err2 = p.handler.InitExternalCtlMeta(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing initExternalCtlMeta: "+err2.Error()) - oprot.WriteMessageBegin("initExternalCtlMeta", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("initExternalCtlMeta", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceListUserPrivilegeStatusResult) InitDefault() { +} + +var FrontendServiceListUserPrivilegeStatusResult_Success_DEFAULT *TListPrivilegesResult_ + +func (p *FrontendServiceListUserPrivilegeStatusResult) GetSuccess() (v *TListPrivilegesResult_) { + if !p.IsSetSuccess() { + return FrontendServiceListUserPrivilegeStatusResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *FrontendServiceListUserPrivilegeStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TListPrivilegesResult_) } -type frontendServiceProcessorFetchSchemaTableData struct { - handler FrontendService +var fieldIDToName_FrontendServiceListUserPrivilegeStatusResult = map[int16]string{ + 0: "success", } -func (p *frontendServiceProcessorFetchSchemaTableData) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceFetchSchemaTableDataArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("fetchSchemaTableData", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceListUserPrivilegeStatusResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceFetchSchemaTableDataResult{} - var retval *TFetchSchemaTableDataResult_ - if retval, err2 = p.handler.FetchSchemaTableData(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchSchemaTableData: "+err2.Error()) - oprot.WriteMessageBegin("fetchSchemaTableData", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("fetchSchemaTableData", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListUserPrivilegeStatusResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorAcquireToken struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorAcquireToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceAcquireTokenArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("acquireToken", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListUserPrivilegeStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTListPrivilegesResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceAcquireTokenResult{} - var retval *TMySqlLoadAcquireTokenResult_ - if retval, err2 = p.handler.AcquireToken(ctx); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing acquireToken: "+err2.Error()) - oprot.WriteMessageBegin("acquireToken", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("acquireToken", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceListUserPrivilegeStatusResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("listUserPrivilegeStatus_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type frontendServiceProcessorConfirmUnusedRemoteFiles struct { - handler FrontendService +func (p *FrontendServiceListUserPrivilegeStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *frontendServiceProcessorConfirmUnusedRemoteFiles) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceConfirmUnusedRemoteFilesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("confirmUnusedRemoteFiles", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceListUserPrivilegeStatusResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("FrontendServiceListUserPrivilegeStatusResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceConfirmUnusedRemoteFilesResult{} - var retval *TConfirmUnusedRemoteFilesResult_ - if retval, err2 = p.handler.ConfirmUnusedRemoteFiles(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing confirmUnusedRemoteFiles: "+err2.Error()) - oprot.WriteMessageBegin("confirmUnusedRemoteFiles", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("confirmUnusedRemoteFiles", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *FrontendServiceListUserPrivilegeStatusResult) DeepEqual(ano *FrontendServiceListUserPrivilegeStatusResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceListUserPrivilegeStatusResult) Field0DeepEqual(src *TListPrivilegesResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorCheckAuth struct { - handler FrontendService +type FrontendServiceUpdateExportTaskStatusArgs struct { + Request *TUpdateExportTaskStatusRequest `thrift:"request,1" frugal:"1,default,TUpdateExportTaskStatusRequest" json:"request"` } -func (p *frontendServiceProcessorCheckAuth) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceCheckAuthArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("checkAuth", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceUpdateExportTaskStatusArgs() *FrontendServiceUpdateExportTaskStatusArgs { + return &FrontendServiceUpdateExportTaskStatusArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceCheckAuthResult{} - var retval *TCheckAuthResult_ - if retval, err2 = p.handler.CheckAuth(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing checkAuth: "+err2.Error()) - oprot.WriteMessageBegin("checkAuth", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("checkAuth", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceUpdateExportTaskStatusArgs) InitDefault() { +} + +var FrontendServiceUpdateExportTaskStatusArgs_Request_DEFAULT *TUpdateExportTaskStatusRequest + +func (p *FrontendServiceUpdateExportTaskStatusArgs) GetRequest() (v *TUpdateExportTaskStatusRequest) { + if !p.IsSetRequest() { + return FrontendServiceUpdateExportTaskStatusArgs_Request_DEFAULT } - return true, err + return p.Request +} +func (p *FrontendServiceUpdateExportTaskStatusArgs) SetRequest(val *TUpdateExportTaskStatusRequest) { + p.Request = val } -type frontendServiceProcessorGetQueryStats struct { - handler FrontendService +var fieldIDToName_FrontendServiceUpdateExportTaskStatusArgs = map[int16]string{ + 1: "request", } -func (p *frontendServiceProcessorGetQueryStats) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetQueryStatsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getQueryStats", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceUpdateExportTaskStatusArgs) IsSetRequest() bool { + return p.Request != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetQueryStatsResult{} - var retval *TQueryStatsResult_ - if retval, err2 = p.handler.GetQueryStats(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getQueryStats: "+err2.Error()) - oprot.WriteMessageBegin("getQueryStats", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getQueryStats", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceUpdateExportTaskStatusArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorGetTabletReplicaInfos struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorGetTabletReplicaInfos) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetTabletReplicaInfosArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getTabletReplicaInfos", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceUpdateExportTaskStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTUpdateExportTaskStatusRequest() + if err := _field.Read(iprot); err != nil { + return err } + p.Request = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetTabletReplicaInfosResult{} - var retval *TGetTabletReplicaInfosResult_ - if retval, err2 = p.handler.GetTabletReplicaInfos(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getTabletReplicaInfos: "+err2.Error()) - oprot.WriteMessageBegin("getTabletReplicaInfos", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getTabletReplicaInfos", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceUpdateExportTaskStatusArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("updateExportTaskStatus_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err -} - -type frontendServiceProcessorGetMasterToken struct { - handler FrontendService + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *frontendServiceProcessorGetMasterToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetMasterTokenArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getMasterToken", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceUpdateExportTaskStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetMasterTokenResult{} - var retval *TGetMasterTokenResult_ - if retval, err2 = p.handler.GetMasterToken(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getMasterToken: "+err2.Error()) - oprot.WriteMessageBegin("getMasterToken", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Request.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("getMasterToken", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceUpdateExportTaskStatusArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("FrontendServiceUpdateExportTaskStatusArgs(%+v)", *p) + +} + +func (p *FrontendServiceUpdateExportTaskStatusArgs) DeepEqual(ano *FrontendServiceUpdateExportTaskStatusArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Request) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceUpdateExportTaskStatusArgs) Field1DeepEqual(src *TUpdateExportTaskStatusRequest) bool { + + if !p.Request.DeepEqual(src) { + return false } - return true, err + return true } -type frontendServiceProcessorGetBinlogLag struct { - handler FrontendService +type FrontendServiceUpdateExportTaskStatusResult struct { + Success *TFeResult_ `thrift:"success,0,optional" frugal:"0,optional,TFeResult_" json:"success,omitempty"` } -func (p *frontendServiceProcessorGetBinlogLag) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetBinlogLagArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getBinlogLag", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewFrontendServiceUpdateExportTaskStatusResult() *FrontendServiceUpdateExportTaskStatusResult { + return &FrontendServiceUpdateExportTaskStatusResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetBinlogLagResult{} - var retval *TGetBinlogLagResult_ - if retval, err2 = p.handler.GetBinlogLag(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBinlogLag: "+err2.Error()) - oprot.WriteMessageBegin("getBinlogLag", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getBinlogLag", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return +func (p *FrontendServiceUpdateExportTaskStatusResult) InitDefault() { +} + +var FrontendServiceUpdateExportTaskStatusResult_Success_DEFAULT *TFeResult_ + +func (p *FrontendServiceUpdateExportTaskStatusResult) GetSuccess() (v *TFeResult_) { + if !p.IsSetSuccess() { + return FrontendServiceUpdateExportTaskStatusResult_Success_DEFAULT } - return true, err + return p.Success +} +func (p *FrontendServiceUpdateExportTaskStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TFeResult_) } -type frontendServiceProcessorUpdateStatsCache struct { - handler FrontendService +var fieldIDToName_FrontendServiceUpdateExportTaskStatusResult = map[int16]string{ + 0: "success", } -func (p *frontendServiceProcessorUpdateStatsCache) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceUpdateStatsCacheArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("updateStatsCache", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *FrontendServiceUpdateExportTaskStatusResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceUpdateStatsCacheResult{} - var retval *status.TStatus - if retval, err2 = p.handler.UpdateStatsCache(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing updateStatsCache: "+err2.Error()) - oprot.WriteMessageBegin("updateStatsCache", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("updateStatsCache", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceUpdateExportTaskStatusResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type frontendServiceProcessorGetAutoIncrementRange struct { - handler FrontendService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *frontendServiceProcessorGetAutoIncrementRange) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceGetAutoIncrementRangeArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getAutoIncrementRange", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceUpdateExportTaskStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTFeResult_() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceGetAutoIncrementRangeResult{} - var retval *TAutoIncrementRangeResult_ - if retval, err2 = p.handler.GetAutoIncrementRange(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getAutoIncrementRange: "+err2.Error()) - oprot.WriteMessageBegin("getAutoIncrementRange", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getAutoIncrementRange", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *FrontendServiceUpdateExportTaskStatusResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("updateExportTaskStatus_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err != nil { - return + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true, err + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -type frontendServiceProcessorCreatePartition struct { - handler FrontendService +func (p *FrontendServiceUpdateExportTaskStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *frontendServiceProcessorCreatePartition) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := FrontendServiceCreatePartitionArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("createPartition", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *FrontendServiceUpdateExportTaskStatusResult) String() string { + if p == nil { + return "" } + return fmt.Sprintf("FrontendServiceUpdateExportTaskStatusResult(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := FrontendServiceCreatePartitionResult{} - var retval *TCreatePartitionResult_ - if retval, err2 = p.handler.CreatePartition(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing createPartition: "+err2.Error()) - oprot.WriteMessageBegin("createPartition", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("createPartition", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *FrontendServiceUpdateExportTaskStatusResult) DeepEqual(ano *FrontendServiceUpdateExportTaskStatusResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *FrontendServiceUpdateExportTaskStatusResult) Field0DeepEqual(src *TFeResult_) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type FrontendServiceGetDbNamesArgs struct { - Params *TGetDbsParams `thrift:"params,1" frugal:"1,default,TGetDbsParams" json:"params"` +type FrontendServiceLoadTxnBeginArgs struct { + Request *TLoadTxnBeginRequest `thrift:"request,1" frugal:"1,default,TLoadTxnBeginRequest" json:"request"` } -func NewFrontendServiceGetDbNamesArgs() *FrontendServiceGetDbNamesArgs { - return &FrontendServiceGetDbNamesArgs{} +func NewFrontendServiceLoadTxnBeginArgs() *FrontendServiceLoadTxnBeginArgs { + return &FrontendServiceLoadTxnBeginArgs{} } -func (p *FrontendServiceGetDbNamesArgs) InitDefault() { - *p = FrontendServiceGetDbNamesArgs{} +func (p *FrontendServiceLoadTxnBeginArgs) InitDefault() { } -var FrontendServiceGetDbNamesArgs_Params_DEFAULT *TGetDbsParams +var FrontendServiceLoadTxnBeginArgs_Request_DEFAULT *TLoadTxnBeginRequest -func (p *FrontendServiceGetDbNamesArgs) GetParams() (v *TGetDbsParams) { - if !p.IsSetParams() { - return FrontendServiceGetDbNamesArgs_Params_DEFAULT +func (p *FrontendServiceLoadTxnBeginArgs) GetRequest() (v *TLoadTxnBeginRequest) { + if !p.IsSetRequest() { + return FrontendServiceLoadTxnBeginArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceGetDbNamesArgs) SetParams(val *TGetDbsParams) { - p.Params = val +func (p *FrontendServiceLoadTxnBeginArgs) SetRequest(val *TLoadTxnBeginRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceGetDbNamesArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceLoadTxnBeginArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceGetDbNamesArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceLoadTxnBeginArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceGetDbNamesArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnBeginArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -56333,17 +85742,14 @@ func (p *FrontendServiceGetDbNamesArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -56358,7 +85764,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -56368,17 +85774,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetDbNamesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetDbsParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnBeginArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTLoadTxnBeginRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetDbNamesArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnBeginArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getDbNames_args"); err != nil { + if err = oprot.WriteStructBegin("loadTxnBegin_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -56386,7 +85793,6 @@ func (p *FrontendServiceGetDbNamesArgs) Write(oprot thrift.TProtocol) (err error fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -56405,11 +85811,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetDbNamesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceLoadTxnBeginArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -56422,66 +85828,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetDbNamesArgs) String() string { +func (p *FrontendServiceLoadTxnBeginArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetDbNamesArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnBeginArgs(%+v)", *p) + } -func (p *FrontendServiceGetDbNamesArgs) DeepEqual(ano *FrontendServiceGetDbNamesArgs) bool { +func (p *FrontendServiceLoadTxnBeginArgs) DeepEqual(ano *FrontendServiceLoadTxnBeginArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceGetDbNamesArgs) Field1DeepEqual(src *TGetDbsParams) bool { +func (p *FrontendServiceLoadTxnBeginArgs) Field1DeepEqual(src *TLoadTxnBeginRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceGetDbNamesResult struct { - Success *TGetDbsResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetDbsResult_" json:"success,omitempty"` +type FrontendServiceLoadTxnBeginResult struct { + Success *TLoadTxnBeginResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnBeginResult_" json:"success,omitempty"` } -func NewFrontendServiceGetDbNamesResult() *FrontendServiceGetDbNamesResult { - return &FrontendServiceGetDbNamesResult{} +func NewFrontendServiceLoadTxnBeginResult() *FrontendServiceLoadTxnBeginResult { + return &FrontendServiceLoadTxnBeginResult{} } -func (p *FrontendServiceGetDbNamesResult) InitDefault() { - *p = FrontendServiceGetDbNamesResult{} +func (p *FrontendServiceLoadTxnBeginResult) InitDefault() { } -var FrontendServiceGetDbNamesResult_Success_DEFAULT *TGetDbsResult_ +var FrontendServiceLoadTxnBeginResult_Success_DEFAULT *TLoadTxnBeginResult_ -func (p *FrontendServiceGetDbNamesResult) GetSuccess() (v *TGetDbsResult_) { +func (p *FrontendServiceLoadTxnBeginResult) GetSuccess() (v *TLoadTxnBeginResult_) { if !p.IsSetSuccess() { - return FrontendServiceGetDbNamesResult_Success_DEFAULT + return FrontendServiceLoadTxnBeginResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetDbNamesResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetDbsResult_) +func (p *FrontendServiceLoadTxnBeginResult) SetSuccess(x interface{}) { + p.Success = x.(*TLoadTxnBeginResult_) } -var fieldIDToName_FrontendServiceGetDbNamesResult = map[int16]string{ +var fieldIDToName_FrontendServiceLoadTxnBeginResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetDbNamesResult) IsSetSuccess() bool { +func (p *FrontendServiceLoadTxnBeginResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetDbNamesResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnBeginResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -56505,17 +85911,14 @@ func (p *FrontendServiceGetDbNamesResult) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -56530,7 +85933,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -56540,17 +85943,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetDbNamesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetDbsResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnBeginResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTLoadTxnBeginResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetDbNamesResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnBeginResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getDbNames_result"); err != nil { + if err = oprot.WriteStructBegin("loadTxnBegin_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -56558,7 +85962,6 @@ func (p *FrontendServiceGetDbNamesResult) Write(oprot thrift.TProtocol) (err err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -56577,7 +85980,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetDbNamesResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnBeginResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -56596,14 +85999,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetDbNamesResult) String() string { +func (p *FrontendServiceLoadTxnBeginResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetDbNamesResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnBeginResult(%+v)", *p) + } -func (p *FrontendServiceGetDbNamesResult) DeepEqual(ano *FrontendServiceGetDbNamesResult) bool { +func (p *FrontendServiceLoadTxnBeginResult) DeepEqual(ano *FrontendServiceLoadTxnBeginResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -56615,7 +86019,7 @@ func (p *FrontendServiceGetDbNamesResult) DeepEqual(ano *FrontendServiceGetDbNam return true } -func (p *FrontendServiceGetDbNamesResult) Field0DeepEqual(src *TGetDbsResult_) bool { +func (p *FrontendServiceLoadTxnBeginResult) Field0DeepEqual(src *TLoadTxnBeginResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -56623,39 +86027,38 @@ func (p *FrontendServiceGetDbNamesResult) Field0DeepEqual(src *TGetDbsResult_) b return true } -type FrontendServiceGetTableNamesArgs struct { - Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` +type FrontendServiceLoadTxnPreCommitArgs struct { + Request *TLoadTxnCommitRequest `thrift:"request,1" frugal:"1,default,TLoadTxnCommitRequest" json:"request"` } -func NewFrontendServiceGetTableNamesArgs() *FrontendServiceGetTableNamesArgs { - return &FrontendServiceGetTableNamesArgs{} +func NewFrontendServiceLoadTxnPreCommitArgs() *FrontendServiceLoadTxnPreCommitArgs { + return &FrontendServiceLoadTxnPreCommitArgs{} } -func (p *FrontendServiceGetTableNamesArgs) InitDefault() { - *p = FrontendServiceGetTableNamesArgs{} +func (p *FrontendServiceLoadTxnPreCommitArgs) InitDefault() { } -var FrontendServiceGetTableNamesArgs_Params_DEFAULT *TGetTablesParams +var FrontendServiceLoadTxnPreCommitArgs_Request_DEFAULT *TLoadTxnCommitRequest -func (p *FrontendServiceGetTableNamesArgs) GetParams() (v *TGetTablesParams) { - if !p.IsSetParams() { - return FrontendServiceGetTableNamesArgs_Params_DEFAULT +func (p *FrontendServiceLoadTxnPreCommitArgs) GetRequest() (v *TLoadTxnCommitRequest) { + if !p.IsSetRequest() { + return FrontendServiceLoadTxnPreCommitArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceGetTableNamesArgs) SetParams(val *TGetTablesParams) { - p.Params = val +func (p *FrontendServiceLoadTxnPreCommitArgs) SetRequest(val *TLoadTxnCommitRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceGetTableNamesArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceLoadTxnPreCommitArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceGetTableNamesArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceLoadTxnPreCommitArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceGetTableNamesArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnPreCommitArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -56679,17 +86082,14 @@ func (p *FrontendServiceGetTableNamesArgs) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -56704,7 +86104,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -56714,17 +86114,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTableNamesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnPreCommitArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTLoadTxnCommitRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetTableNamesArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnPreCommitArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getTableNames_args"); err != nil { + if err = oprot.WriteStructBegin("loadTxnPreCommit_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -56732,7 +86133,6 @@ func (p *FrontendServiceGetTableNamesArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -56751,11 +86151,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetTableNamesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceLoadTxnPreCommitArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -56768,66 +86168,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetTableNamesArgs) String() string { +func (p *FrontendServiceLoadTxnPreCommitArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetTableNamesArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnPreCommitArgs(%+v)", *p) + } -func (p *FrontendServiceGetTableNamesArgs) DeepEqual(ano *FrontendServiceGetTableNamesArgs) bool { +func (p *FrontendServiceLoadTxnPreCommitArgs) DeepEqual(ano *FrontendServiceLoadTxnPreCommitArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceGetTableNamesArgs) Field1DeepEqual(src *TGetTablesParams) bool { +func (p *FrontendServiceLoadTxnPreCommitArgs) Field1DeepEqual(src *TLoadTxnCommitRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceGetTableNamesResult struct { - Success *TGetTablesResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetTablesResult_" json:"success,omitempty"` +type FrontendServiceLoadTxnPreCommitResult struct { + Success *TLoadTxnCommitResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnCommitResult_" json:"success,omitempty"` } -func NewFrontendServiceGetTableNamesResult() *FrontendServiceGetTableNamesResult { - return &FrontendServiceGetTableNamesResult{} +func NewFrontendServiceLoadTxnPreCommitResult() *FrontendServiceLoadTxnPreCommitResult { + return &FrontendServiceLoadTxnPreCommitResult{} } -func (p *FrontendServiceGetTableNamesResult) InitDefault() { - *p = FrontendServiceGetTableNamesResult{} +func (p *FrontendServiceLoadTxnPreCommitResult) InitDefault() { } -var FrontendServiceGetTableNamesResult_Success_DEFAULT *TGetTablesResult_ +var FrontendServiceLoadTxnPreCommitResult_Success_DEFAULT *TLoadTxnCommitResult_ -func (p *FrontendServiceGetTableNamesResult) GetSuccess() (v *TGetTablesResult_) { +func (p *FrontendServiceLoadTxnPreCommitResult) GetSuccess() (v *TLoadTxnCommitResult_) { if !p.IsSetSuccess() { - return FrontendServiceGetTableNamesResult_Success_DEFAULT + return FrontendServiceLoadTxnPreCommitResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetTableNamesResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetTablesResult_) +func (p *FrontendServiceLoadTxnPreCommitResult) SetSuccess(x interface{}) { + p.Success = x.(*TLoadTxnCommitResult_) } -var fieldIDToName_FrontendServiceGetTableNamesResult = map[int16]string{ +var fieldIDToName_FrontendServiceLoadTxnPreCommitResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetTableNamesResult) IsSetSuccess() bool { +func (p *FrontendServiceLoadTxnPreCommitResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetTableNamesResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnPreCommitResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -56851,17 +86251,14 @@ func (p *FrontendServiceGetTableNamesResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -56876,7 +86273,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -56886,17 +86283,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTableNamesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetTablesResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnPreCommitResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTLoadTxnCommitResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetTableNamesResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnPreCommitResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getTableNames_result"); err != nil { + if err = oprot.WriteStructBegin("loadTxnPreCommit_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -56904,7 +86302,6 @@ func (p *FrontendServiceGetTableNamesResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -56923,7 +86320,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetTableNamesResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnPreCommitResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -56942,14 +86339,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetTableNamesResult) String() string { +func (p *FrontendServiceLoadTxnPreCommitResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetTableNamesResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnPreCommitResult(%+v)", *p) + } -func (p *FrontendServiceGetTableNamesResult) DeepEqual(ano *FrontendServiceGetTableNamesResult) bool { +func (p *FrontendServiceLoadTxnPreCommitResult) DeepEqual(ano *FrontendServiceLoadTxnPreCommitResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -56961,7 +86359,7 @@ func (p *FrontendServiceGetTableNamesResult) DeepEqual(ano *FrontendServiceGetTa return true } -func (p *FrontendServiceGetTableNamesResult) Field0DeepEqual(src *TGetTablesResult_) bool { +func (p *FrontendServiceLoadTxnPreCommitResult) Field0DeepEqual(src *TLoadTxnCommitResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -56969,39 +86367,38 @@ func (p *FrontendServiceGetTableNamesResult) Field0DeepEqual(src *TGetTablesResu return true } -type FrontendServiceDescribeTableArgs struct { - Params *TDescribeTableParams `thrift:"params,1" frugal:"1,default,TDescribeTableParams" json:"params"` +type FrontendServiceLoadTxn2PCArgs struct { + Request *TLoadTxn2PCRequest `thrift:"request,1" frugal:"1,default,TLoadTxn2PCRequest" json:"request"` } -func NewFrontendServiceDescribeTableArgs() *FrontendServiceDescribeTableArgs { - return &FrontendServiceDescribeTableArgs{} +func NewFrontendServiceLoadTxn2PCArgs() *FrontendServiceLoadTxn2PCArgs { + return &FrontendServiceLoadTxn2PCArgs{} } -func (p *FrontendServiceDescribeTableArgs) InitDefault() { - *p = FrontendServiceDescribeTableArgs{} +func (p *FrontendServiceLoadTxn2PCArgs) InitDefault() { } -var FrontendServiceDescribeTableArgs_Params_DEFAULT *TDescribeTableParams +var FrontendServiceLoadTxn2PCArgs_Request_DEFAULT *TLoadTxn2PCRequest -func (p *FrontendServiceDescribeTableArgs) GetParams() (v *TDescribeTableParams) { - if !p.IsSetParams() { - return FrontendServiceDescribeTableArgs_Params_DEFAULT +func (p *FrontendServiceLoadTxn2PCArgs) GetRequest() (v *TLoadTxn2PCRequest) { + if !p.IsSetRequest() { + return FrontendServiceLoadTxn2PCArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceDescribeTableArgs) SetParams(val *TDescribeTableParams) { - p.Params = val +func (p *FrontendServiceLoadTxn2PCArgs) SetRequest(val *TLoadTxn2PCRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceDescribeTableArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceLoadTxn2PCArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceDescribeTableArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceLoadTxn2PCArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceDescribeTableArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxn2PCArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -57025,17 +86422,14 @@ func (p *FrontendServiceDescribeTableArgs) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -57050,7 +86444,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -57060,17 +86454,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTableArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTDescribeTableParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxn2PCArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTLoadTxn2PCRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceDescribeTableArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxn2PCArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("describeTable_args"); err != nil { + if err = oprot.WriteStructBegin("loadTxn2PC_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -57078,7 +86473,6 @@ func (p *FrontendServiceDescribeTableArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -57097,11 +86491,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceDescribeTableArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceLoadTxn2PCArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -57114,66 +86508,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceDescribeTableArgs) String() string { +func (p *FrontendServiceLoadTxn2PCArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceDescribeTableArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxn2PCArgs(%+v)", *p) + } -func (p *FrontendServiceDescribeTableArgs) DeepEqual(ano *FrontendServiceDescribeTableArgs) bool { +func (p *FrontendServiceLoadTxn2PCArgs) DeepEqual(ano *FrontendServiceLoadTxn2PCArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceDescribeTableArgs) Field1DeepEqual(src *TDescribeTableParams) bool { +func (p *FrontendServiceLoadTxn2PCArgs) Field1DeepEqual(src *TLoadTxn2PCRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceDescribeTableResult struct { - Success *TDescribeTableResult_ `thrift:"success,0,optional" frugal:"0,optional,TDescribeTableResult_" json:"success,omitempty"` +type FrontendServiceLoadTxn2PCResult struct { + Success *TLoadTxn2PCResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxn2PCResult_" json:"success,omitempty"` } -func NewFrontendServiceDescribeTableResult() *FrontendServiceDescribeTableResult { - return &FrontendServiceDescribeTableResult{} +func NewFrontendServiceLoadTxn2PCResult() *FrontendServiceLoadTxn2PCResult { + return &FrontendServiceLoadTxn2PCResult{} } -func (p *FrontendServiceDescribeTableResult) InitDefault() { - *p = FrontendServiceDescribeTableResult{} +func (p *FrontendServiceLoadTxn2PCResult) InitDefault() { } -var FrontendServiceDescribeTableResult_Success_DEFAULT *TDescribeTableResult_ +var FrontendServiceLoadTxn2PCResult_Success_DEFAULT *TLoadTxn2PCResult_ -func (p *FrontendServiceDescribeTableResult) GetSuccess() (v *TDescribeTableResult_) { +func (p *FrontendServiceLoadTxn2PCResult) GetSuccess() (v *TLoadTxn2PCResult_) { if !p.IsSetSuccess() { - return FrontendServiceDescribeTableResult_Success_DEFAULT + return FrontendServiceLoadTxn2PCResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceDescribeTableResult) SetSuccess(x interface{}) { - p.Success = x.(*TDescribeTableResult_) +func (p *FrontendServiceLoadTxn2PCResult) SetSuccess(x interface{}) { + p.Success = x.(*TLoadTxn2PCResult_) } -var fieldIDToName_FrontendServiceDescribeTableResult = map[int16]string{ +var fieldIDToName_FrontendServiceLoadTxn2PCResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceDescribeTableResult) IsSetSuccess() bool { +func (p *FrontendServiceLoadTxn2PCResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceDescribeTableResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxn2PCResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -57197,17 +86591,14 @@ func (p *FrontendServiceDescribeTableResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -57222,7 +86613,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -57232,17 +86623,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTableResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTDescribeTableResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxn2PCResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTLoadTxn2PCResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceDescribeTableResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxn2PCResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("describeTable_result"); err != nil { + if err = oprot.WriteStructBegin("loadTxn2PC_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -57250,7 +86642,6 @@ func (p *FrontendServiceDescribeTableResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -57269,7 +86660,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceDescribeTableResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxn2PCResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -57288,14 +86679,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceDescribeTableResult) String() string { +func (p *FrontendServiceLoadTxn2PCResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceDescribeTableResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxn2PCResult(%+v)", *p) + } -func (p *FrontendServiceDescribeTableResult) DeepEqual(ano *FrontendServiceDescribeTableResult) bool { +func (p *FrontendServiceLoadTxn2PCResult) DeepEqual(ano *FrontendServiceLoadTxn2PCResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -57307,7 +86699,7 @@ func (p *FrontendServiceDescribeTableResult) DeepEqual(ano *FrontendServiceDescr return true } -func (p *FrontendServiceDescribeTableResult) Field0DeepEqual(src *TDescribeTableResult_) bool { +func (p *FrontendServiceLoadTxn2PCResult) Field0DeepEqual(src *TLoadTxn2PCResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -57315,39 +86707,38 @@ func (p *FrontendServiceDescribeTableResult) Field0DeepEqual(src *TDescribeTable return true } -type FrontendServiceDescribeTablesArgs struct { - Params *TDescribeTablesParams `thrift:"params,1" frugal:"1,default,TDescribeTablesParams" json:"params"` +type FrontendServiceLoadTxnCommitArgs struct { + Request *TLoadTxnCommitRequest `thrift:"request,1" frugal:"1,default,TLoadTxnCommitRequest" json:"request"` } -func NewFrontendServiceDescribeTablesArgs() *FrontendServiceDescribeTablesArgs { - return &FrontendServiceDescribeTablesArgs{} +func NewFrontendServiceLoadTxnCommitArgs() *FrontendServiceLoadTxnCommitArgs { + return &FrontendServiceLoadTxnCommitArgs{} } -func (p *FrontendServiceDescribeTablesArgs) InitDefault() { - *p = FrontendServiceDescribeTablesArgs{} +func (p *FrontendServiceLoadTxnCommitArgs) InitDefault() { } -var FrontendServiceDescribeTablesArgs_Params_DEFAULT *TDescribeTablesParams +var FrontendServiceLoadTxnCommitArgs_Request_DEFAULT *TLoadTxnCommitRequest -func (p *FrontendServiceDescribeTablesArgs) GetParams() (v *TDescribeTablesParams) { - if !p.IsSetParams() { - return FrontendServiceDescribeTablesArgs_Params_DEFAULT +func (p *FrontendServiceLoadTxnCommitArgs) GetRequest() (v *TLoadTxnCommitRequest) { + if !p.IsSetRequest() { + return FrontendServiceLoadTxnCommitArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceDescribeTablesArgs) SetParams(val *TDescribeTablesParams) { - p.Params = val +func (p *FrontendServiceLoadTxnCommitArgs) SetRequest(val *TLoadTxnCommitRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceDescribeTablesArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceLoadTxnCommitArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceDescribeTablesArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceLoadTxnCommitArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceDescribeTablesArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnCommitArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -57371,17 +86762,14 @@ func (p *FrontendServiceDescribeTablesArgs) Read(iprot thrift.TProtocol) (err er if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -57396,7 +86784,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -57406,17 +86794,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTablesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTDescribeTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnCommitArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTLoadTxnCommitRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceDescribeTablesArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnCommitArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("describeTables_args"); err != nil { + if err = oprot.WriteStructBegin("loadTxnCommit_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -57424,7 +86813,6 @@ func (p *FrontendServiceDescribeTablesArgs) Write(oprot thrift.TProtocol) (err e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -57443,11 +86831,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceDescribeTablesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceLoadTxnCommitArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -57460,66 +86848,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceDescribeTablesArgs) String() string { +func (p *FrontendServiceLoadTxnCommitArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceDescribeTablesArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnCommitArgs(%+v)", *p) + } -func (p *FrontendServiceDescribeTablesArgs) DeepEqual(ano *FrontendServiceDescribeTablesArgs) bool { +func (p *FrontendServiceLoadTxnCommitArgs) DeepEqual(ano *FrontendServiceLoadTxnCommitArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceDescribeTablesArgs) Field1DeepEqual(src *TDescribeTablesParams) bool { +func (p *FrontendServiceLoadTxnCommitArgs) Field1DeepEqual(src *TLoadTxnCommitRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceDescribeTablesResult struct { - Success *TDescribeTablesResult_ `thrift:"success,0,optional" frugal:"0,optional,TDescribeTablesResult_" json:"success,omitempty"` +type FrontendServiceLoadTxnCommitResult struct { + Success *TLoadTxnCommitResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnCommitResult_" json:"success,omitempty"` } -func NewFrontendServiceDescribeTablesResult() *FrontendServiceDescribeTablesResult { - return &FrontendServiceDescribeTablesResult{} +func NewFrontendServiceLoadTxnCommitResult() *FrontendServiceLoadTxnCommitResult { + return &FrontendServiceLoadTxnCommitResult{} } -func (p *FrontendServiceDescribeTablesResult) InitDefault() { - *p = FrontendServiceDescribeTablesResult{} +func (p *FrontendServiceLoadTxnCommitResult) InitDefault() { } -var FrontendServiceDescribeTablesResult_Success_DEFAULT *TDescribeTablesResult_ +var FrontendServiceLoadTxnCommitResult_Success_DEFAULT *TLoadTxnCommitResult_ -func (p *FrontendServiceDescribeTablesResult) GetSuccess() (v *TDescribeTablesResult_) { +func (p *FrontendServiceLoadTxnCommitResult) GetSuccess() (v *TLoadTxnCommitResult_) { if !p.IsSetSuccess() { - return FrontendServiceDescribeTablesResult_Success_DEFAULT + return FrontendServiceLoadTxnCommitResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceDescribeTablesResult) SetSuccess(x interface{}) { - p.Success = x.(*TDescribeTablesResult_) +func (p *FrontendServiceLoadTxnCommitResult) SetSuccess(x interface{}) { + p.Success = x.(*TLoadTxnCommitResult_) } -var fieldIDToName_FrontendServiceDescribeTablesResult = map[int16]string{ +var fieldIDToName_FrontendServiceLoadTxnCommitResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceDescribeTablesResult) IsSetSuccess() bool { +func (p *FrontendServiceLoadTxnCommitResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceDescribeTablesResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnCommitResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -57543,17 +86931,14 @@ func (p *FrontendServiceDescribeTablesResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -57568,7 +86953,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -57578,17 +86963,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTablesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTDescribeTablesResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnCommitResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTLoadTxnCommitResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceDescribeTablesResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnCommitResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("describeTables_result"); err != nil { + if err = oprot.WriteStructBegin("loadTxnCommit_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -57596,7 +86982,6 @@ func (p *FrontendServiceDescribeTablesResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -57615,7 +87000,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceDescribeTablesResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnCommitResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -57634,14 +87019,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceDescribeTablesResult) String() string { +func (p *FrontendServiceLoadTxnCommitResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceDescribeTablesResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnCommitResult(%+v)", *p) + } -func (p *FrontendServiceDescribeTablesResult) DeepEqual(ano *FrontendServiceDescribeTablesResult) bool { +func (p *FrontendServiceLoadTxnCommitResult) DeepEqual(ano *FrontendServiceLoadTxnCommitResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -57653,7 +87039,7 @@ func (p *FrontendServiceDescribeTablesResult) DeepEqual(ano *FrontendServiceDesc return true } -func (p *FrontendServiceDescribeTablesResult) Field0DeepEqual(src *TDescribeTablesResult_) bool { +func (p *FrontendServiceLoadTxnCommitResult) Field0DeepEqual(src *TLoadTxnCommitResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -57661,39 +87047,38 @@ func (p *FrontendServiceDescribeTablesResult) Field0DeepEqual(src *TDescribeTabl return true } -type FrontendServiceShowVariablesArgs struct { - Params *TShowVariableRequest `thrift:"params,1" frugal:"1,default,TShowVariableRequest" json:"params"` +type FrontendServiceLoadTxnRollbackArgs struct { + Request *TLoadTxnRollbackRequest `thrift:"request,1" frugal:"1,default,TLoadTxnRollbackRequest" json:"request"` } -func NewFrontendServiceShowVariablesArgs() *FrontendServiceShowVariablesArgs { - return &FrontendServiceShowVariablesArgs{} +func NewFrontendServiceLoadTxnRollbackArgs() *FrontendServiceLoadTxnRollbackArgs { + return &FrontendServiceLoadTxnRollbackArgs{} } -func (p *FrontendServiceShowVariablesArgs) InitDefault() { - *p = FrontendServiceShowVariablesArgs{} +func (p *FrontendServiceLoadTxnRollbackArgs) InitDefault() { } -var FrontendServiceShowVariablesArgs_Params_DEFAULT *TShowVariableRequest +var FrontendServiceLoadTxnRollbackArgs_Request_DEFAULT *TLoadTxnRollbackRequest -func (p *FrontendServiceShowVariablesArgs) GetParams() (v *TShowVariableRequest) { - if !p.IsSetParams() { - return FrontendServiceShowVariablesArgs_Params_DEFAULT +func (p *FrontendServiceLoadTxnRollbackArgs) GetRequest() (v *TLoadTxnRollbackRequest) { + if !p.IsSetRequest() { + return FrontendServiceLoadTxnRollbackArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceShowVariablesArgs) SetParams(val *TShowVariableRequest) { - p.Params = val +func (p *FrontendServiceLoadTxnRollbackArgs) SetRequest(val *TLoadTxnRollbackRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceShowVariablesArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceLoadTxnRollbackArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceShowVariablesArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceLoadTxnRollbackArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceShowVariablesArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnRollbackArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -57717,17 +87102,14 @@ func (p *FrontendServiceShowVariablesArgs) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -57742,7 +87124,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -57752,17 +87134,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceShowVariablesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTShowVariableRequest() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnRollbackArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTLoadTxnRollbackRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceShowVariablesArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnRollbackArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("showVariables_args"); err != nil { + if err = oprot.WriteStructBegin("loadTxnRollback_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -57770,7 +87153,6 @@ func (p *FrontendServiceShowVariablesArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -57789,11 +87171,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceShowVariablesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceLoadTxnRollbackArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -57806,66 +87188,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceShowVariablesArgs) String() string { +func (p *FrontendServiceLoadTxnRollbackArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceShowVariablesArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnRollbackArgs(%+v)", *p) + } -func (p *FrontendServiceShowVariablesArgs) DeepEqual(ano *FrontendServiceShowVariablesArgs) bool { +func (p *FrontendServiceLoadTxnRollbackArgs) DeepEqual(ano *FrontendServiceLoadTxnRollbackArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceShowVariablesArgs) Field1DeepEqual(src *TShowVariableRequest) bool { +func (p *FrontendServiceLoadTxnRollbackArgs) Field1DeepEqual(src *TLoadTxnRollbackRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceShowVariablesResult struct { - Success *TShowVariableResult_ `thrift:"success,0,optional" frugal:"0,optional,TShowVariableResult_" json:"success,omitempty"` +type FrontendServiceLoadTxnRollbackResult struct { + Success *TLoadTxnRollbackResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnRollbackResult_" json:"success,omitempty"` } -func NewFrontendServiceShowVariablesResult() *FrontendServiceShowVariablesResult { - return &FrontendServiceShowVariablesResult{} +func NewFrontendServiceLoadTxnRollbackResult() *FrontendServiceLoadTxnRollbackResult { + return &FrontendServiceLoadTxnRollbackResult{} } -func (p *FrontendServiceShowVariablesResult) InitDefault() { - *p = FrontendServiceShowVariablesResult{} +func (p *FrontendServiceLoadTxnRollbackResult) InitDefault() { } -var FrontendServiceShowVariablesResult_Success_DEFAULT *TShowVariableResult_ +var FrontendServiceLoadTxnRollbackResult_Success_DEFAULT *TLoadTxnRollbackResult_ -func (p *FrontendServiceShowVariablesResult) GetSuccess() (v *TShowVariableResult_) { +func (p *FrontendServiceLoadTxnRollbackResult) GetSuccess() (v *TLoadTxnRollbackResult_) { if !p.IsSetSuccess() { - return FrontendServiceShowVariablesResult_Success_DEFAULT + return FrontendServiceLoadTxnRollbackResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceShowVariablesResult) SetSuccess(x interface{}) { - p.Success = x.(*TShowVariableResult_) +func (p *FrontendServiceLoadTxnRollbackResult) SetSuccess(x interface{}) { + p.Success = x.(*TLoadTxnRollbackResult_) } -var fieldIDToName_FrontendServiceShowVariablesResult = map[int16]string{ +var fieldIDToName_FrontendServiceLoadTxnRollbackResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceShowVariablesResult) IsSetSuccess() bool { +func (p *FrontendServiceLoadTxnRollbackResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceShowVariablesResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnRollbackResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -57889,17 +87271,14 @@ func (p *FrontendServiceShowVariablesResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -57914,7 +87293,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -57924,17 +87303,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceShowVariablesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTShowVariableResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceLoadTxnRollbackResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTLoadTxnRollbackResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceShowVariablesResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnRollbackResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("showVariables_result"); err != nil { + if err = oprot.WriteStructBegin("loadTxnRollback_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -57942,7 +87322,6 @@ func (p *FrontendServiceShowVariablesResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -57961,7 +87340,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceShowVariablesResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceLoadTxnRollbackResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -57980,14 +87359,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceShowVariablesResult) String() string { +func (p *FrontendServiceLoadTxnRollbackResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceShowVariablesResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceLoadTxnRollbackResult(%+v)", *p) + } -func (p *FrontendServiceShowVariablesResult) DeepEqual(ano *FrontendServiceShowVariablesResult) bool { +func (p *FrontendServiceLoadTxnRollbackResult) DeepEqual(ano *FrontendServiceLoadTxnRollbackResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -57999,7 +87379,7 @@ func (p *FrontendServiceShowVariablesResult) DeepEqual(ano *FrontendServiceShowV return true } -func (p *FrontendServiceShowVariablesResult) Field0DeepEqual(src *TShowVariableResult_) bool { +func (p *FrontendServiceLoadTxnRollbackResult) Field0DeepEqual(src *TLoadTxnRollbackResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -58007,39 +87387,38 @@ func (p *FrontendServiceShowVariablesResult) Field0DeepEqual(src *TShowVariableR return true } -type FrontendServiceReportExecStatusArgs struct { - Params *TReportExecStatusParams `thrift:"params,1" frugal:"1,default,TReportExecStatusParams" json:"params"` +type FrontendServiceBeginTxnArgs struct { + Request *TBeginTxnRequest `thrift:"request,1" frugal:"1,default,TBeginTxnRequest" json:"request"` } -func NewFrontendServiceReportExecStatusArgs() *FrontendServiceReportExecStatusArgs { - return &FrontendServiceReportExecStatusArgs{} +func NewFrontendServiceBeginTxnArgs() *FrontendServiceBeginTxnArgs { + return &FrontendServiceBeginTxnArgs{} } -func (p *FrontendServiceReportExecStatusArgs) InitDefault() { - *p = FrontendServiceReportExecStatusArgs{} +func (p *FrontendServiceBeginTxnArgs) InitDefault() { } -var FrontendServiceReportExecStatusArgs_Params_DEFAULT *TReportExecStatusParams +var FrontendServiceBeginTxnArgs_Request_DEFAULT *TBeginTxnRequest -func (p *FrontendServiceReportExecStatusArgs) GetParams() (v *TReportExecStatusParams) { - if !p.IsSetParams() { - return FrontendServiceReportExecStatusArgs_Params_DEFAULT +func (p *FrontendServiceBeginTxnArgs) GetRequest() (v *TBeginTxnRequest) { + if !p.IsSetRequest() { + return FrontendServiceBeginTxnArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceReportExecStatusArgs) SetParams(val *TReportExecStatusParams) { - p.Params = val +func (p *FrontendServiceBeginTxnArgs) SetRequest(val *TBeginTxnRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceReportExecStatusArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceBeginTxnArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceReportExecStatusArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceBeginTxnArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceReportExecStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceBeginTxnArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -58063,17 +87442,14 @@ func (p *FrontendServiceReportExecStatusArgs) Read(iprot thrift.TProtocol) (err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -58088,7 +87464,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -58098,17 +87474,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportExecStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTReportExecStatusParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceBeginTxnArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTBeginTxnRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceReportExecStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceBeginTxnArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("reportExecStatus_args"); err != nil { + if err = oprot.WriteStructBegin("beginTxn_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -58116,7 +87493,6 @@ func (p *FrontendServiceReportExecStatusArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -58135,11 +87511,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceReportExecStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceBeginTxnArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -58152,66 +87528,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceReportExecStatusArgs) String() string { +func (p *FrontendServiceBeginTxnArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceReportExecStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceBeginTxnArgs(%+v)", *p) + } -func (p *FrontendServiceReportExecStatusArgs) DeepEqual(ano *FrontendServiceReportExecStatusArgs) bool { +func (p *FrontendServiceBeginTxnArgs) DeepEqual(ano *FrontendServiceBeginTxnArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceReportExecStatusArgs) Field1DeepEqual(src *TReportExecStatusParams) bool { +func (p *FrontendServiceBeginTxnArgs) Field1DeepEqual(src *TBeginTxnRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceReportExecStatusResult struct { - Success *TReportExecStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,TReportExecStatusResult_" json:"success,omitempty"` +type FrontendServiceBeginTxnResult struct { + Success *TBeginTxnResult_ `thrift:"success,0,optional" frugal:"0,optional,TBeginTxnResult_" json:"success,omitempty"` } -func NewFrontendServiceReportExecStatusResult() *FrontendServiceReportExecStatusResult { - return &FrontendServiceReportExecStatusResult{} +func NewFrontendServiceBeginTxnResult() *FrontendServiceBeginTxnResult { + return &FrontendServiceBeginTxnResult{} } -func (p *FrontendServiceReportExecStatusResult) InitDefault() { - *p = FrontendServiceReportExecStatusResult{} +func (p *FrontendServiceBeginTxnResult) InitDefault() { } -var FrontendServiceReportExecStatusResult_Success_DEFAULT *TReportExecStatusResult_ +var FrontendServiceBeginTxnResult_Success_DEFAULT *TBeginTxnResult_ -func (p *FrontendServiceReportExecStatusResult) GetSuccess() (v *TReportExecStatusResult_) { +func (p *FrontendServiceBeginTxnResult) GetSuccess() (v *TBeginTxnResult_) { if !p.IsSetSuccess() { - return FrontendServiceReportExecStatusResult_Success_DEFAULT + return FrontendServiceBeginTxnResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceReportExecStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TReportExecStatusResult_) +func (p *FrontendServiceBeginTxnResult) SetSuccess(x interface{}) { + p.Success = x.(*TBeginTxnResult_) } -var fieldIDToName_FrontendServiceReportExecStatusResult = map[int16]string{ +var fieldIDToName_FrontendServiceBeginTxnResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceReportExecStatusResult) IsSetSuccess() bool { +func (p *FrontendServiceBeginTxnResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceReportExecStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceBeginTxnResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -58235,17 +87611,14 @@ func (p *FrontendServiceReportExecStatusResult) Read(iprot thrift.TProtocol) (er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -58260,7 +87633,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -58270,17 +87643,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportExecStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTReportExecStatusResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceBeginTxnResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTBeginTxnResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceReportExecStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceBeginTxnResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("reportExecStatus_result"); err != nil { + if err = oprot.WriteStructBegin("beginTxn_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -58288,7 +87662,6 @@ func (p *FrontendServiceReportExecStatusResult) Write(oprot thrift.TProtocol) (e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -58307,7 +87680,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceReportExecStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceBeginTxnResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -58326,14 +87699,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceReportExecStatusResult) String() string { +func (p *FrontendServiceBeginTxnResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceReportExecStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceBeginTxnResult(%+v)", *p) + } -func (p *FrontendServiceReportExecStatusResult) DeepEqual(ano *FrontendServiceReportExecStatusResult) bool { +func (p *FrontendServiceBeginTxnResult) DeepEqual(ano *FrontendServiceBeginTxnResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -58345,7 +87719,7 @@ func (p *FrontendServiceReportExecStatusResult) DeepEqual(ano *FrontendServiceRe return true } -func (p *FrontendServiceReportExecStatusResult) Field0DeepEqual(src *TReportExecStatusResult_) bool { +func (p *FrontendServiceBeginTxnResult) Field0DeepEqual(src *TBeginTxnResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -58353,39 +87727,38 @@ func (p *FrontendServiceReportExecStatusResult) Field0DeepEqual(src *TReportExec return true } -type FrontendServiceFinishTaskArgs struct { - Request *masterservice.TFinishTaskRequest `thrift:"request,1" frugal:"1,default,masterservice.TFinishTaskRequest" json:"request"` +type FrontendServiceCommitTxnArgs struct { + Request *TCommitTxnRequest `thrift:"request,1" frugal:"1,default,TCommitTxnRequest" json:"request"` } -func NewFrontendServiceFinishTaskArgs() *FrontendServiceFinishTaskArgs { - return &FrontendServiceFinishTaskArgs{} +func NewFrontendServiceCommitTxnArgs() *FrontendServiceCommitTxnArgs { + return &FrontendServiceCommitTxnArgs{} } -func (p *FrontendServiceFinishTaskArgs) InitDefault() { - *p = FrontendServiceFinishTaskArgs{} +func (p *FrontendServiceCommitTxnArgs) InitDefault() { } -var FrontendServiceFinishTaskArgs_Request_DEFAULT *masterservice.TFinishTaskRequest +var FrontendServiceCommitTxnArgs_Request_DEFAULT *TCommitTxnRequest -func (p *FrontendServiceFinishTaskArgs) GetRequest() (v *masterservice.TFinishTaskRequest) { +func (p *FrontendServiceCommitTxnArgs) GetRequest() (v *TCommitTxnRequest) { if !p.IsSetRequest() { - return FrontendServiceFinishTaskArgs_Request_DEFAULT + return FrontendServiceCommitTxnArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceFinishTaskArgs) SetRequest(val *masterservice.TFinishTaskRequest) { +func (p *FrontendServiceCommitTxnArgs) SetRequest(val *TCommitTxnRequest) { p.Request = val } -var fieldIDToName_FrontendServiceFinishTaskArgs = map[int16]string{ +var fieldIDToName_FrontendServiceCommitTxnArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceFinishTaskArgs) IsSetRequest() bool { +func (p *FrontendServiceCommitTxnArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceFinishTaskArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCommitTxnArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -58409,17 +87782,14 @@ func (p *FrontendServiceFinishTaskArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -58434,7 +87804,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -58444,17 +87814,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFinishTaskArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = masterservice.NewTFinishTaskRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceCommitTxnArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTCommitTxnRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceFinishTaskArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCommitTxnArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("finishTask_args"); err != nil { + if err = oprot.WriteStructBegin("commitTxn_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -58462,7 +87833,6 @@ func (p *FrontendServiceFinishTaskArgs) Write(oprot thrift.TProtocol) (err error fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -58481,7 +87851,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceFinishTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCommitTxnArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -58498,14 +87868,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceFinishTaskArgs) String() string { +func (p *FrontendServiceCommitTxnArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceFinishTaskArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceCommitTxnArgs(%+v)", *p) + } -func (p *FrontendServiceFinishTaskArgs) DeepEqual(ano *FrontendServiceFinishTaskArgs) bool { +func (p *FrontendServiceCommitTxnArgs) DeepEqual(ano *FrontendServiceCommitTxnArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -58517,7 +87888,7 @@ func (p *FrontendServiceFinishTaskArgs) DeepEqual(ano *FrontendServiceFinishTask return true } -func (p *FrontendServiceFinishTaskArgs) Field1DeepEqual(src *masterservice.TFinishTaskRequest) bool { +func (p *FrontendServiceCommitTxnArgs) Field1DeepEqual(src *TCommitTxnRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -58525,39 +87896,38 @@ func (p *FrontendServiceFinishTaskArgs) Field1DeepEqual(src *masterservice.TFini return true } -type FrontendServiceFinishTaskResult struct { - Success *masterservice.TMasterResult_ `thrift:"success,0,optional" frugal:"0,optional,masterservice.TMasterResult_" json:"success,omitempty"` +type FrontendServiceCommitTxnResult struct { + Success *TCommitTxnResult_ `thrift:"success,0,optional" frugal:"0,optional,TCommitTxnResult_" json:"success,omitempty"` } -func NewFrontendServiceFinishTaskResult() *FrontendServiceFinishTaskResult { - return &FrontendServiceFinishTaskResult{} +func NewFrontendServiceCommitTxnResult() *FrontendServiceCommitTxnResult { + return &FrontendServiceCommitTxnResult{} } -func (p *FrontendServiceFinishTaskResult) InitDefault() { - *p = FrontendServiceFinishTaskResult{} +func (p *FrontendServiceCommitTxnResult) InitDefault() { } -var FrontendServiceFinishTaskResult_Success_DEFAULT *masterservice.TMasterResult_ +var FrontendServiceCommitTxnResult_Success_DEFAULT *TCommitTxnResult_ -func (p *FrontendServiceFinishTaskResult) GetSuccess() (v *masterservice.TMasterResult_) { +func (p *FrontendServiceCommitTxnResult) GetSuccess() (v *TCommitTxnResult_) { if !p.IsSetSuccess() { - return FrontendServiceFinishTaskResult_Success_DEFAULT + return FrontendServiceCommitTxnResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceFinishTaskResult) SetSuccess(x interface{}) { - p.Success = x.(*masterservice.TMasterResult_) +func (p *FrontendServiceCommitTxnResult) SetSuccess(x interface{}) { + p.Success = x.(*TCommitTxnResult_) } -var fieldIDToName_FrontendServiceFinishTaskResult = map[int16]string{ +var fieldIDToName_FrontendServiceCommitTxnResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceFinishTaskResult) IsSetSuccess() bool { +func (p *FrontendServiceCommitTxnResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceFinishTaskResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCommitTxnResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -58581,17 +87951,14 @@ func (p *FrontendServiceFinishTaskResult) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -58606,7 +87973,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -58616,17 +87983,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFinishTaskResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = masterservice.NewTMasterResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceCommitTxnResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTCommitTxnResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceFinishTaskResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCommitTxnResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("finishTask_result"); err != nil { + if err = oprot.WriteStructBegin("commitTxn_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -58634,7 +88002,6 @@ func (p *FrontendServiceFinishTaskResult) Write(oprot thrift.TProtocol) (err err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -58653,7 +88020,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceFinishTaskResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCommitTxnResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -58672,14 +88039,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceFinishTaskResult) String() string { +func (p *FrontendServiceCommitTxnResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceFinishTaskResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceCommitTxnResult(%+v)", *p) + } -func (p *FrontendServiceFinishTaskResult) DeepEqual(ano *FrontendServiceFinishTaskResult) bool { +func (p *FrontendServiceCommitTxnResult) DeepEqual(ano *FrontendServiceCommitTxnResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -58691,7 +88059,7 @@ func (p *FrontendServiceFinishTaskResult) DeepEqual(ano *FrontendServiceFinishTa return true } -func (p *FrontendServiceFinishTaskResult) Field0DeepEqual(src *masterservice.TMasterResult_) bool { +func (p *FrontendServiceCommitTxnResult) Field0DeepEqual(src *TCommitTxnResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -58699,39 +88067,38 @@ func (p *FrontendServiceFinishTaskResult) Field0DeepEqual(src *masterservice.TMa return true } -type FrontendServiceReportArgs struct { - Request *masterservice.TReportRequest `thrift:"request,1" frugal:"1,default,masterservice.TReportRequest" json:"request"` +type FrontendServiceRollbackTxnArgs struct { + Request *TRollbackTxnRequest `thrift:"request,1" frugal:"1,default,TRollbackTxnRequest" json:"request"` } -func NewFrontendServiceReportArgs() *FrontendServiceReportArgs { - return &FrontendServiceReportArgs{} +func NewFrontendServiceRollbackTxnArgs() *FrontendServiceRollbackTxnArgs { + return &FrontendServiceRollbackTxnArgs{} } -func (p *FrontendServiceReportArgs) InitDefault() { - *p = FrontendServiceReportArgs{} +func (p *FrontendServiceRollbackTxnArgs) InitDefault() { } -var FrontendServiceReportArgs_Request_DEFAULT *masterservice.TReportRequest +var FrontendServiceRollbackTxnArgs_Request_DEFAULT *TRollbackTxnRequest -func (p *FrontendServiceReportArgs) GetRequest() (v *masterservice.TReportRequest) { +func (p *FrontendServiceRollbackTxnArgs) GetRequest() (v *TRollbackTxnRequest) { if !p.IsSetRequest() { - return FrontendServiceReportArgs_Request_DEFAULT + return FrontendServiceRollbackTxnArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceReportArgs) SetRequest(val *masterservice.TReportRequest) { +func (p *FrontendServiceRollbackTxnArgs) SetRequest(val *TRollbackTxnRequest) { p.Request = val } -var fieldIDToName_FrontendServiceReportArgs = map[int16]string{ +var fieldIDToName_FrontendServiceRollbackTxnArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceReportArgs) IsSetRequest() bool { +func (p *FrontendServiceRollbackTxnArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceReportArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRollbackTxnArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -58755,17 +88122,14 @@ func (p *FrontendServiceReportArgs) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -58780,7 +88144,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -58790,17 +88154,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = masterservice.NewTReportRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceRollbackTxnArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTRollbackTxnRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceReportArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRollbackTxnArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("report_args"); err != nil { + if err = oprot.WriteStructBegin("rollbackTxn_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -58808,7 +88173,6 @@ func (p *FrontendServiceReportArgs) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -58827,7 +88191,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceReportArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRollbackTxnArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -58844,14 +88208,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceReportArgs) String() string { +func (p *FrontendServiceRollbackTxnArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceReportArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceRollbackTxnArgs(%+v)", *p) + } -func (p *FrontendServiceReportArgs) DeepEqual(ano *FrontendServiceReportArgs) bool { +func (p *FrontendServiceRollbackTxnArgs) DeepEqual(ano *FrontendServiceRollbackTxnArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -58863,7 +88228,7 @@ func (p *FrontendServiceReportArgs) DeepEqual(ano *FrontendServiceReportArgs) bo return true } -func (p *FrontendServiceReportArgs) Field1DeepEqual(src *masterservice.TReportRequest) bool { +func (p *FrontendServiceRollbackTxnArgs) Field1DeepEqual(src *TRollbackTxnRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -58871,39 +88236,38 @@ func (p *FrontendServiceReportArgs) Field1DeepEqual(src *masterservice.TReportRe return true } -type FrontendServiceReportResult struct { - Success *masterservice.TMasterResult_ `thrift:"success,0,optional" frugal:"0,optional,masterservice.TMasterResult_" json:"success,omitempty"` +type FrontendServiceRollbackTxnResult struct { + Success *TRollbackTxnResult_ `thrift:"success,0,optional" frugal:"0,optional,TRollbackTxnResult_" json:"success,omitempty"` } -func NewFrontendServiceReportResult() *FrontendServiceReportResult { - return &FrontendServiceReportResult{} +func NewFrontendServiceRollbackTxnResult() *FrontendServiceRollbackTxnResult { + return &FrontendServiceRollbackTxnResult{} } -func (p *FrontendServiceReportResult) InitDefault() { - *p = FrontendServiceReportResult{} +func (p *FrontendServiceRollbackTxnResult) InitDefault() { } -var FrontendServiceReportResult_Success_DEFAULT *masterservice.TMasterResult_ +var FrontendServiceRollbackTxnResult_Success_DEFAULT *TRollbackTxnResult_ -func (p *FrontendServiceReportResult) GetSuccess() (v *masterservice.TMasterResult_) { +func (p *FrontendServiceRollbackTxnResult) GetSuccess() (v *TRollbackTxnResult_) { if !p.IsSetSuccess() { - return FrontendServiceReportResult_Success_DEFAULT + return FrontendServiceRollbackTxnResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceReportResult) SetSuccess(x interface{}) { - p.Success = x.(*masterservice.TMasterResult_) +func (p *FrontendServiceRollbackTxnResult) SetSuccess(x interface{}) { + p.Success = x.(*TRollbackTxnResult_) } -var fieldIDToName_FrontendServiceReportResult = map[int16]string{ +var fieldIDToName_FrontendServiceRollbackTxnResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceReportResult) IsSetSuccess() bool { +func (p *FrontendServiceRollbackTxnResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceReportResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRollbackTxnResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -58927,17 +88291,14 @@ func (p *FrontendServiceReportResult) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -58952,7 +88313,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -58962,17 +88323,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = masterservice.NewTMasterResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceRollbackTxnResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTRollbackTxnResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceReportResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRollbackTxnResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("report_result"); err != nil { + if err = oprot.WriteStructBegin("rollbackTxn_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -58980,7 +88342,6 @@ func (p *FrontendServiceReportResult) Write(oprot thrift.TProtocol) (err error) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -58999,7 +88360,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceReportResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRollbackTxnResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -59018,14 +88379,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceReportResult) String() string { +func (p *FrontendServiceRollbackTxnResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceReportResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceRollbackTxnResult(%+v)", *p) + } -func (p *FrontendServiceReportResult) DeepEqual(ano *FrontendServiceReportResult) bool { +func (p *FrontendServiceRollbackTxnResult) DeepEqual(ano *FrontendServiceRollbackTxnResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -59037,7 +88399,7 @@ func (p *FrontendServiceReportResult) DeepEqual(ano *FrontendServiceReportResult return true } -func (p *FrontendServiceReportResult) Field0DeepEqual(src *masterservice.TMasterResult_) bool { +func (p *FrontendServiceRollbackTxnResult) Field0DeepEqual(src *TRollbackTxnResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -59045,20 +88407,38 @@ func (p *FrontendServiceReportResult) Field0DeepEqual(src *masterservice.TMaster return true } -type FrontendServiceFetchResourceArgs struct { +type FrontendServiceGetBinlogArgs struct { + Request *TGetBinlogRequest `thrift:"request,1" frugal:"1,default,TGetBinlogRequest" json:"request"` } -func NewFrontendServiceFetchResourceArgs() *FrontendServiceFetchResourceArgs { - return &FrontendServiceFetchResourceArgs{} +func NewFrontendServiceGetBinlogArgs() *FrontendServiceGetBinlogArgs { + return &FrontendServiceGetBinlogArgs{} } -func (p *FrontendServiceFetchResourceArgs) InitDefault() { - *p = FrontendServiceFetchResourceArgs{} +func (p *FrontendServiceGetBinlogArgs) InitDefault() { } -var fieldIDToName_FrontendServiceFetchResourceArgs = map[int16]string{} +var FrontendServiceGetBinlogArgs_Request_DEFAULT *TGetBinlogRequest -func (p *FrontendServiceFetchResourceArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogArgs) GetRequest() (v *TGetBinlogRequest) { + if !p.IsSetRequest() { + return FrontendServiceGetBinlogArgs_Request_DEFAULT + } + return p.Request +} +func (p *FrontendServiceGetBinlogArgs) SetRequest(val *TGetBinlogRequest) { + p.Request = val +} + +var fieldIDToName_FrontendServiceGetBinlogArgs = map[int16]string{ + 1: "request", +} + +func (p *FrontendServiceGetBinlogArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *FrontendServiceGetBinlogArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -59075,10 +88455,21 @@ func (p *FrontendServiceFetchResourceArgs) Read(iprot thrift.TProtocol) (err err if fieldTypeId == thrift.STOP { break } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -59092,8 +88483,10 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -59101,12 +88494,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchResourceArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("fetchResource_args"); err != nil { +func (p *FrontendServiceGetBinlogArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetBinlogRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Request = _field + return nil +} + +func (p *FrontendServiceGetBinlogArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("getBinlog_args"); err != nil { goto WriteStructBeginError } if p != nil { - + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -59117,61 +88523,91 @@ func (p *FrontendServiceFetchResourceArgs) Write(oprot thrift.TProtocol) (err er return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceFetchResourceArgs) String() string { +func (p *FrontendServiceGetBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Request.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceGetBinlogArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceFetchResourceArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetBinlogArgs(%+v)", *p) + } -func (p *FrontendServiceFetchResourceArgs) DeepEqual(ano *FrontendServiceFetchResourceArgs) bool { +func (p *FrontendServiceGetBinlogArgs) DeepEqual(ano *FrontendServiceGetBinlogArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } + if !p.Field1DeepEqual(ano.Request) { + return false + } return true } -type FrontendServiceFetchResourceResult struct { - Success *masterservice.TFetchResourceResult_ `thrift:"success,0,optional" frugal:"0,optional,masterservice.TFetchResourceResult_" json:"success,omitempty"` +func (p *FrontendServiceGetBinlogArgs) Field1DeepEqual(src *TGetBinlogRequest) bool { + + if !p.Request.DeepEqual(src) { + return false + } + return true } -func NewFrontendServiceFetchResourceResult() *FrontendServiceFetchResourceResult { - return &FrontendServiceFetchResourceResult{} +type FrontendServiceGetBinlogResult struct { + Success *TGetBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetBinlogResult_" json:"success,omitempty"` } -func (p *FrontendServiceFetchResourceResult) InitDefault() { - *p = FrontendServiceFetchResourceResult{} +func NewFrontendServiceGetBinlogResult() *FrontendServiceGetBinlogResult { + return &FrontendServiceGetBinlogResult{} } -var FrontendServiceFetchResourceResult_Success_DEFAULT *masterservice.TFetchResourceResult_ +func (p *FrontendServiceGetBinlogResult) InitDefault() { +} -func (p *FrontendServiceFetchResourceResult) GetSuccess() (v *masterservice.TFetchResourceResult_) { +var FrontendServiceGetBinlogResult_Success_DEFAULT *TGetBinlogResult_ + +func (p *FrontendServiceGetBinlogResult) GetSuccess() (v *TGetBinlogResult_) { if !p.IsSetSuccess() { - return FrontendServiceFetchResourceResult_Success_DEFAULT + return FrontendServiceGetBinlogResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceFetchResourceResult) SetSuccess(x interface{}) { - p.Success = x.(*masterservice.TFetchResourceResult_) +func (p *FrontendServiceGetBinlogResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetBinlogResult_) } -var fieldIDToName_FrontendServiceFetchResourceResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetBinlogResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceFetchResourceResult) IsSetSuccess() bool { +func (p *FrontendServiceGetBinlogResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceFetchResourceResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -59195,17 +88631,14 @@ func (p *FrontendServiceFetchResourceResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -59220,7 +88653,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchResourceResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -59230,17 +88663,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchResourceResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = masterservice.NewTFetchResourceResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetBinlogResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetBinlogResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceFetchResourceResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("fetchResource_result"); err != nil { + if err = oprot.WriteStructBegin("getBinlog_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -59248,7 +88682,6 @@ func (p *FrontendServiceFetchResourceResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -59267,7 +88700,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceFetchResourceResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -59286,14 +88719,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceFetchResourceResult) String() string { +func (p *FrontendServiceGetBinlogResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceFetchResourceResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetBinlogResult(%+v)", *p) + } -func (p *FrontendServiceFetchResourceResult) DeepEqual(ano *FrontendServiceFetchResourceResult) bool { +func (p *FrontendServiceGetBinlogResult) DeepEqual(ano *FrontendServiceGetBinlogResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -59305,7 +88739,7 @@ func (p *FrontendServiceFetchResourceResult) DeepEqual(ano *FrontendServiceFetch return true } -func (p *FrontendServiceFetchResourceResult) Field0DeepEqual(src *masterservice.TFetchResourceResult_) bool { +func (p *FrontendServiceGetBinlogResult) Field0DeepEqual(src *TGetBinlogResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -59313,39 +88747,38 @@ func (p *FrontendServiceFetchResourceResult) Field0DeepEqual(src *masterservice. return true } -type FrontendServiceForwardArgs struct { - Params *TMasterOpRequest `thrift:"params,1" frugal:"1,default,TMasterOpRequest" json:"params"` +type FrontendServiceGetSnapshotArgs struct { + Request *TGetSnapshotRequest `thrift:"request,1" frugal:"1,default,TGetSnapshotRequest" json:"request"` } -func NewFrontendServiceForwardArgs() *FrontendServiceForwardArgs { - return &FrontendServiceForwardArgs{} +func NewFrontendServiceGetSnapshotArgs() *FrontendServiceGetSnapshotArgs { + return &FrontendServiceGetSnapshotArgs{} } -func (p *FrontendServiceForwardArgs) InitDefault() { - *p = FrontendServiceForwardArgs{} +func (p *FrontendServiceGetSnapshotArgs) InitDefault() { } -var FrontendServiceForwardArgs_Params_DEFAULT *TMasterOpRequest +var FrontendServiceGetSnapshotArgs_Request_DEFAULT *TGetSnapshotRequest -func (p *FrontendServiceForwardArgs) GetParams() (v *TMasterOpRequest) { - if !p.IsSetParams() { - return FrontendServiceForwardArgs_Params_DEFAULT +func (p *FrontendServiceGetSnapshotArgs) GetRequest() (v *TGetSnapshotRequest) { + if !p.IsSetRequest() { + return FrontendServiceGetSnapshotArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceForwardArgs) SetParams(val *TMasterOpRequest) { - p.Params = val +func (p *FrontendServiceGetSnapshotArgs) SetRequest(val *TGetSnapshotRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceForwardArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceGetSnapshotArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceForwardArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceGetSnapshotArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceForwardArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -59369,17 +88802,14 @@ func (p *FrontendServiceForwardArgs) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -59394,7 +88824,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -59404,17 +88834,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceForwardArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTMasterOpRequest() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceGetSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetSnapshotRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceForwardArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("forward_args"); err != nil { + if err = oprot.WriteStructBegin("getSnapshot_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -59422,7 +88853,6 @@ func (p *FrontendServiceForwardArgs) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -59441,11 +88871,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceForwardArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceGetSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -59458,66 +88888,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceForwardArgs) String() string { +func (p *FrontendServiceGetSnapshotArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceForwardArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetSnapshotArgs(%+v)", *p) + } -func (p *FrontendServiceForwardArgs) DeepEqual(ano *FrontendServiceForwardArgs) bool { +func (p *FrontendServiceGetSnapshotArgs) DeepEqual(ano *FrontendServiceGetSnapshotArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceForwardArgs) Field1DeepEqual(src *TMasterOpRequest) bool { +func (p *FrontendServiceGetSnapshotArgs) Field1DeepEqual(src *TGetSnapshotRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceForwardResult struct { - Success *TMasterOpResult_ `thrift:"success,0,optional" frugal:"0,optional,TMasterOpResult_" json:"success,omitempty"` +type FrontendServiceGetSnapshotResult struct { + Success *TGetSnapshotResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetSnapshotResult_" json:"success,omitempty"` } -func NewFrontendServiceForwardResult() *FrontendServiceForwardResult { - return &FrontendServiceForwardResult{} +func NewFrontendServiceGetSnapshotResult() *FrontendServiceGetSnapshotResult { + return &FrontendServiceGetSnapshotResult{} } -func (p *FrontendServiceForwardResult) InitDefault() { - *p = FrontendServiceForwardResult{} +func (p *FrontendServiceGetSnapshotResult) InitDefault() { } -var FrontendServiceForwardResult_Success_DEFAULT *TMasterOpResult_ +var FrontendServiceGetSnapshotResult_Success_DEFAULT *TGetSnapshotResult_ -func (p *FrontendServiceForwardResult) GetSuccess() (v *TMasterOpResult_) { +func (p *FrontendServiceGetSnapshotResult) GetSuccess() (v *TGetSnapshotResult_) { if !p.IsSetSuccess() { - return FrontendServiceForwardResult_Success_DEFAULT + return FrontendServiceGetSnapshotResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceForwardResult) SetSuccess(x interface{}) { - p.Success = x.(*TMasterOpResult_) +func (p *FrontendServiceGetSnapshotResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetSnapshotResult_) } -var fieldIDToName_FrontendServiceForwardResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetSnapshotResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceForwardResult) IsSetSuccess() bool { +func (p *FrontendServiceGetSnapshotResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceForwardResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetSnapshotResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -59541,17 +88971,14 @@ func (p *FrontendServiceForwardResult) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -59566,7 +88993,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -59576,17 +89003,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceForwardResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTMasterOpResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetSnapshotResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetSnapshotResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceForwardResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetSnapshotResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("forward_result"); err != nil { + if err = oprot.WriteStructBegin("getSnapshot_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -59594,7 +89022,6 @@ func (p *FrontendServiceForwardResult) Write(oprot thrift.TProtocol) (err error) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -59613,7 +89040,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceForwardResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -59632,14 +89059,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceForwardResult) String() string { +func (p *FrontendServiceGetSnapshotResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceForwardResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetSnapshotResult(%+v)", *p) + } -func (p *FrontendServiceForwardResult) DeepEqual(ano *FrontendServiceForwardResult) bool { +func (p *FrontendServiceGetSnapshotResult) DeepEqual(ano *FrontendServiceGetSnapshotResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -59651,7 +89079,7 @@ func (p *FrontendServiceForwardResult) DeepEqual(ano *FrontendServiceForwardResu return true } -func (p *FrontendServiceForwardResult) Field0DeepEqual(src *TMasterOpResult_) bool { +func (p *FrontendServiceGetSnapshotResult) Field0DeepEqual(src *TGetSnapshotResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -59659,39 +89087,38 @@ func (p *FrontendServiceForwardResult) Field0DeepEqual(src *TMasterOpResult_) bo return true } -type FrontendServiceListTableStatusArgs struct { - Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` +type FrontendServiceRestoreSnapshotArgs struct { + Request *TRestoreSnapshotRequest `thrift:"request,1" frugal:"1,default,TRestoreSnapshotRequest" json:"request"` } -func NewFrontendServiceListTableStatusArgs() *FrontendServiceListTableStatusArgs { - return &FrontendServiceListTableStatusArgs{} +func NewFrontendServiceRestoreSnapshotArgs() *FrontendServiceRestoreSnapshotArgs { + return &FrontendServiceRestoreSnapshotArgs{} } -func (p *FrontendServiceListTableStatusArgs) InitDefault() { - *p = FrontendServiceListTableStatusArgs{} +func (p *FrontendServiceRestoreSnapshotArgs) InitDefault() { } -var FrontendServiceListTableStatusArgs_Params_DEFAULT *TGetTablesParams +var FrontendServiceRestoreSnapshotArgs_Request_DEFAULT *TRestoreSnapshotRequest -func (p *FrontendServiceListTableStatusArgs) GetParams() (v *TGetTablesParams) { - if !p.IsSetParams() { - return FrontendServiceListTableStatusArgs_Params_DEFAULT +func (p *FrontendServiceRestoreSnapshotArgs) GetRequest() (v *TRestoreSnapshotRequest) { + if !p.IsSetRequest() { + return FrontendServiceRestoreSnapshotArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceListTableStatusArgs) SetParams(val *TGetTablesParams) { - p.Params = val +func (p *FrontendServiceRestoreSnapshotArgs) SetRequest(val *TRestoreSnapshotRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceListTableStatusArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceRestoreSnapshotArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceListTableStatusArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceRestoreSnapshotArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceListTableStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRestoreSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -59715,17 +89142,14 @@ func (p *FrontendServiceListTableStatusArgs) Read(iprot thrift.TProtocol) (err e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -59740,7 +89164,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -59750,17 +89174,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceRestoreSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTRestoreSnapshotRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceListTableStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRestoreSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listTableStatus_args"); err != nil { + if err = oprot.WriteStructBegin("restoreSnapshot_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -59768,7 +89193,6 @@ func (p *FrontendServiceListTableStatusArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -59787,11 +89211,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListTableStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceRestoreSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -59804,66 +89228,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceListTableStatusArgs) String() string { +func (p *FrontendServiceRestoreSnapshotArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListTableStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceRestoreSnapshotArgs(%+v)", *p) + } -func (p *FrontendServiceListTableStatusArgs) DeepEqual(ano *FrontendServiceListTableStatusArgs) bool { +func (p *FrontendServiceRestoreSnapshotArgs) DeepEqual(ano *FrontendServiceRestoreSnapshotArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceListTableStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { +func (p *FrontendServiceRestoreSnapshotArgs) Field1DeepEqual(src *TRestoreSnapshotRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceListTableStatusResult struct { - Success *TListTableStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,TListTableStatusResult_" json:"success,omitempty"` +type FrontendServiceRestoreSnapshotResult struct { + Success *TRestoreSnapshotResult_ `thrift:"success,0,optional" frugal:"0,optional,TRestoreSnapshotResult_" json:"success,omitempty"` } -func NewFrontendServiceListTableStatusResult() *FrontendServiceListTableStatusResult { - return &FrontendServiceListTableStatusResult{} +func NewFrontendServiceRestoreSnapshotResult() *FrontendServiceRestoreSnapshotResult { + return &FrontendServiceRestoreSnapshotResult{} } -func (p *FrontendServiceListTableStatusResult) InitDefault() { - *p = FrontendServiceListTableStatusResult{} +func (p *FrontendServiceRestoreSnapshotResult) InitDefault() { } -var FrontendServiceListTableStatusResult_Success_DEFAULT *TListTableStatusResult_ +var FrontendServiceRestoreSnapshotResult_Success_DEFAULT *TRestoreSnapshotResult_ -func (p *FrontendServiceListTableStatusResult) GetSuccess() (v *TListTableStatusResult_) { +func (p *FrontendServiceRestoreSnapshotResult) GetSuccess() (v *TRestoreSnapshotResult_) { if !p.IsSetSuccess() { - return FrontendServiceListTableStatusResult_Success_DEFAULT + return FrontendServiceRestoreSnapshotResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceListTableStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TListTableStatusResult_) +func (p *FrontendServiceRestoreSnapshotResult) SetSuccess(x interface{}) { + p.Success = x.(*TRestoreSnapshotResult_) } -var fieldIDToName_FrontendServiceListTableStatusResult = map[int16]string{ +var fieldIDToName_FrontendServiceRestoreSnapshotResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceListTableStatusResult) IsSetSuccess() bool { +func (p *FrontendServiceRestoreSnapshotResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceListTableStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRestoreSnapshotResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -59887,17 +89311,14 @@ func (p *FrontendServiceListTableStatusResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -59912,7 +89333,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -59922,17 +89343,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTListTableStatusResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceRestoreSnapshotResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTRestoreSnapshotResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceListTableStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRestoreSnapshotResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listTableStatus_result"); err != nil { + if err = oprot.WriteStructBegin("restoreSnapshot_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -59940,7 +89362,6 @@ func (p *FrontendServiceListTableStatusResult) Write(oprot thrift.TProtocol) (er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -59959,7 +89380,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListTableStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceRestoreSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -59978,14 +89399,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceListTableStatusResult) String() string { +func (p *FrontendServiceRestoreSnapshotResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListTableStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceRestoreSnapshotResult(%+v)", *p) + } -func (p *FrontendServiceListTableStatusResult) DeepEqual(ano *FrontendServiceListTableStatusResult) bool { +func (p *FrontendServiceRestoreSnapshotResult) DeepEqual(ano *FrontendServiceRestoreSnapshotResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -59997,7 +89419,7 @@ func (p *FrontendServiceListTableStatusResult) DeepEqual(ano *FrontendServiceLis return true } -func (p *FrontendServiceListTableStatusResult) Field0DeepEqual(src *TListTableStatusResult_) bool { +func (p *FrontendServiceRestoreSnapshotResult) Field0DeepEqual(src *TRestoreSnapshotResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -60005,39 +89427,38 @@ func (p *FrontendServiceListTableStatusResult) Field0DeepEqual(src *TListTableSt return true } -type FrontendServiceListTableMetadataNameIdsArgs struct { - Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` +type FrontendServiceWaitingTxnStatusArgs struct { + Request *TWaitingTxnStatusRequest `thrift:"request,1" frugal:"1,default,TWaitingTxnStatusRequest" json:"request"` } -func NewFrontendServiceListTableMetadataNameIdsArgs() *FrontendServiceListTableMetadataNameIdsArgs { - return &FrontendServiceListTableMetadataNameIdsArgs{} +func NewFrontendServiceWaitingTxnStatusArgs() *FrontendServiceWaitingTxnStatusArgs { + return &FrontendServiceWaitingTxnStatusArgs{} } -func (p *FrontendServiceListTableMetadataNameIdsArgs) InitDefault() { - *p = FrontendServiceListTableMetadataNameIdsArgs{} +func (p *FrontendServiceWaitingTxnStatusArgs) InitDefault() { } -var FrontendServiceListTableMetadataNameIdsArgs_Params_DEFAULT *TGetTablesParams +var FrontendServiceWaitingTxnStatusArgs_Request_DEFAULT *TWaitingTxnStatusRequest -func (p *FrontendServiceListTableMetadataNameIdsArgs) GetParams() (v *TGetTablesParams) { - if !p.IsSetParams() { - return FrontendServiceListTableMetadataNameIdsArgs_Params_DEFAULT +func (p *FrontendServiceWaitingTxnStatusArgs) GetRequest() (v *TWaitingTxnStatusRequest) { + if !p.IsSetRequest() { + return FrontendServiceWaitingTxnStatusArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceListTableMetadataNameIdsArgs) SetParams(val *TGetTablesParams) { - p.Params = val +func (p *FrontendServiceWaitingTxnStatusArgs) SetRequest(val *TWaitingTxnStatusRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceListTableMetadataNameIdsArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceWaitingTxnStatusArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceListTableMetadataNameIdsArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceWaitingTxnStatusArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceListTableMetadataNameIdsArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceWaitingTxnStatusArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -60061,17 +89482,14 @@ func (p *FrontendServiceListTableMetadataNameIdsArgs) Read(iprot thrift.TProtoco if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -60086,7 +89504,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -60096,17 +89514,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceWaitingTxnStatusArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTWaitingTxnStatusRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceListTableMetadataNameIdsArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceWaitingTxnStatusArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listTableMetadataNameIds_args"); err != nil { + if err = oprot.WriteStructBegin("waitingTxnStatus_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -60114,7 +89533,6 @@ func (p *FrontendServiceListTableMetadataNameIdsArgs) Write(oprot thrift.TProtoc fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -60133,11 +89551,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceWaitingTxnStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -60150,66 +89568,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsArgs) String() string { +func (p *FrontendServiceWaitingTxnStatusArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListTableMetadataNameIdsArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceWaitingTxnStatusArgs(%+v)", *p) + } -func (p *FrontendServiceListTableMetadataNameIdsArgs) DeepEqual(ano *FrontendServiceListTableMetadataNameIdsArgs) bool { +func (p *FrontendServiceWaitingTxnStatusArgs) DeepEqual(ano *FrontendServiceWaitingTxnStatusArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceListTableMetadataNameIdsArgs) Field1DeepEqual(src *TGetTablesParams) bool { +func (p *FrontendServiceWaitingTxnStatusArgs) Field1DeepEqual(src *TWaitingTxnStatusRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceListTableMetadataNameIdsResult struct { - Success *TListTableMetadataNameIdsResult_ `thrift:"success,0,optional" frugal:"0,optional,TListTableMetadataNameIdsResult_" json:"success,omitempty"` +type FrontendServiceWaitingTxnStatusResult struct { + Success *TWaitingTxnStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,TWaitingTxnStatusResult_" json:"success,omitempty"` } -func NewFrontendServiceListTableMetadataNameIdsResult() *FrontendServiceListTableMetadataNameIdsResult { - return &FrontendServiceListTableMetadataNameIdsResult{} +func NewFrontendServiceWaitingTxnStatusResult() *FrontendServiceWaitingTxnStatusResult { + return &FrontendServiceWaitingTxnStatusResult{} } -func (p *FrontendServiceListTableMetadataNameIdsResult) InitDefault() { - *p = FrontendServiceListTableMetadataNameIdsResult{} +func (p *FrontendServiceWaitingTxnStatusResult) InitDefault() { } -var FrontendServiceListTableMetadataNameIdsResult_Success_DEFAULT *TListTableMetadataNameIdsResult_ +var FrontendServiceWaitingTxnStatusResult_Success_DEFAULT *TWaitingTxnStatusResult_ -func (p *FrontendServiceListTableMetadataNameIdsResult) GetSuccess() (v *TListTableMetadataNameIdsResult_) { +func (p *FrontendServiceWaitingTxnStatusResult) GetSuccess() (v *TWaitingTxnStatusResult_) { if !p.IsSetSuccess() { - return FrontendServiceListTableMetadataNameIdsResult_Success_DEFAULT + return FrontendServiceWaitingTxnStatusResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceListTableMetadataNameIdsResult) SetSuccess(x interface{}) { - p.Success = x.(*TListTableMetadataNameIdsResult_) +func (p *FrontendServiceWaitingTxnStatusResult) SetSuccess(x interface{}) { + p.Success = x.(*TWaitingTxnStatusResult_) } -var fieldIDToName_FrontendServiceListTableMetadataNameIdsResult = map[int16]string{ +var fieldIDToName_FrontendServiceWaitingTxnStatusResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceListTableMetadataNameIdsResult) IsSetSuccess() bool { +func (p *FrontendServiceWaitingTxnStatusResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceListTableMetadataNameIdsResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceWaitingTxnStatusResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -60233,17 +89651,14 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) Read(iprot thrift.TProto if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -60258,7 +89673,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -60268,17 +89683,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTListTableMetadataNameIdsResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceWaitingTxnStatusResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTWaitingTxnStatusResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceListTableMetadataNameIdsResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceWaitingTxnStatusResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listTableMetadataNameIds_result"); err != nil { + if err = oprot.WriteStructBegin("waitingTxnStatus_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -60286,7 +89702,6 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) Write(oprot thrift.TProt fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -60305,7 +89720,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceWaitingTxnStatusResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -60324,14 +89739,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsResult) String() string { +func (p *FrontendServiceWaitingTxnStatusResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListTableMetadataNameIdsResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceWaitingTxnStatusResult(%+v)", *p) + } -func (p *FrontendServiceListTableMetadataNameIdsResult) DeepEqual(ano *FrontendServiceListTableMetadataNameIdsResult) bool { +func (p *FrontendServiceWaitingTxnStatusResult) DeepEqual(ano *FrontendServiceWaitingTxnStatusResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -60343,7 +89759,7 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) DeepEqual(ano *FrontendS return true } -func (p *FrontendServiceListTableMetadataNameIdsResult) Field0DeepEqual(src *TListTableMetadataNameIdsResult_) bool { +func (p *FrontendServiceWaitingTxnStatusResult) Field0DeepEqual(src *TWaitingTxnStatusResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -60351,39 +89767,38 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) Field0DeepEqual(src *TLi return true } -type FrontendServiceListTablePrivilegeStatusArgs struct { - Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` +type FrontendServiceStreamLoadPutArgs struct { + Request *TStreamLoadPutRequest `thrift:"request,1" frugal:"1,default,TStreamLoadPutRequest" json:"request"` } -func NewFrontendServiceListTablePrivilegeStatusArgs() *FrontendServiceListTablePrivilegeStatusArgs { - return &FrontendServiceListTablePrivilegeStatusArgs{} +func NewFrontendServiceStreamLoadPutArgs() *FrontendServiceStreamLoadPutArgs { + return &FrontendServiceStreamLoadPutArgs{} } -func (p *FrontendServiceListTablePrivilegeStatusArgs) InitDefault() { - *p = FrontendServiceListTablePrivilegeStatusArgs{} +func (p *FrontendServiceStreamLoadPutArgs) InitDefault() { } -var FrontendServiceListTablePrivilegeStatusArgs_Params_DEFAULT *TGetTablesParams +var FrontendServiceStreamLoadPutArgs_Request_DEFAULT *TStreamLoadPutRequest -func (p *FrontendServiceListTablePrivilegeStatusArgs) GetParams() (v *TGetTablesParams) { - if !p.IsSetParams() { - return FrontendServiceListTablePrivilegeStatusArgs_Params_DEFAULT +func (p *FrontendServiceStreamLoadPutArgs) GetRequest() (v *TStreamLoadPutRequest) { + if !p.IsSetRequest() { + return FrontendServiceStreamLoadPutArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceListTablePrivilegeStatusArgs) SetParams(val *TGetTablesParams) { - p.Params = val +func (p *FrontendServiceStreamLoadPutArgs) SetRequest(val *TStreamLoadPutRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceListTablePrivilegeStatusArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceStreamLoadPutArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceListTablePrivilegeStatusArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceStreamLoadPutArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceListTablePrivilegeStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadPutArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -60407,17 +89822,14 @@ func (p *FrontendServiceListTablePrivilegeStatusArgs) Read(iprot thrift.TProtoco if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -60432,7 +89844,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -60442,17 +89854,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceStreamLoadPutArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTStreamLoadPutRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceListTablePrivilegeStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadPutArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listTablePrivilegeStatus_args"); err != nil { + if err = oprot.WriteStructBegin("streamLoadPut_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -60460,7 +89873,6 @@ func (p *FrontendServiceListTablePrivilegeStatusArgs) Write(oprot thrift.TProtoc fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -60479,11 +89891,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceStreamLoadPutArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -60496,66 +89908,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusArgs) String() string { +func (p *FrontendServiceStreamLoadPutArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListTablePrivilegeStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceStreamLoadPutArgs(%+v)", *p) + } -func (p *FrontendServiceListTablePrivilegeStatusArgs) DeepEqual(ano *FrontendServiceListTablePrivilegeStatusArgs) bool { +func (p *FrontendServiceStreamLoadPutArgs) DeepEqual(ano *FrontendServiceStreamLoadPutArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceListTablePrivilegeStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { +func (p *FrontendServiceStreamLoadPutArgs) Field1DeepEqual(src *TStreamLoadPutRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceListTablePrivilegeStatusResult struct { - Success *TListPrivilegesResult_ `thrift:"success,0,optional" frugal:"0,optional,TListPrivilegesResult_" json:"success,omitempty"` +type FrontendServiceStreamLoadPutResult struct { + Success *TStreamLoadPutResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadPutResult_" json:"success,omitempty"` } -func NewFrontendServiceListTablePrivilegeStatusResult() *FrontendServiceListTablePrivilegeStatusResult { - return &FrontendServiceListTablePrivilegeStatusResult{} +func NewFrontendServiceStreamLoadPutResult() *FrontendServiceStreamLoadPutResult { + return &FrontendServiceStreamLoadPutResult{} } -func (p *FrontendServiceListTablePrivilegeStatusResult) InitDefault() { - *p = FrontendServiceListTablePrivilegeStatusResult{} +func (p *FrontendServiceStreamLoadPutResult) InitDefault() { } -var FrontendServiceListTablePrivilegeStatusResult_Success_DEFAULT *TListPrivilegesResult_ +var FrontendServiceStreamLoadPutResult_Success_DEFAULT *TStreamLoadPutResult_ -func (p *FrontendServiceListTablePrivilegeStatusResult) GetSuccess() (v *TListPrivilegesResult_) { +func (p *FrontendServiceStreamLoadPutResult) GetSuccess() (v *TStreamLoadPutResult_) { if !p.IsSetSuccess() { - return FrontendServiceListTablePrivilegeStatusResult_Success_DEFAULT + return FrontendServiceStreamLoadPutResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceListTablePrivilegeStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TListPrivilegesResult_) +func (p *FrontendServiceStreamLoadPutResult) SetSuccess(x interface{}) { + p.Success = x.(*TStreamLoadPutResult_) } -var fieldIDToName_FrontendServiceListTablePrivilegeStatusResult = map[int16]string{ +var fieldIDToName_FrontendServiceStreamLoadPutResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceListTablePrivilegeStatusResult) IsSetSuccess() bool { +func (p *FrontendServiceStreamLoadPutResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceListTablePrivilegeStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadPutResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -60579,17 +89991,14 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) Read(iprot thrift.TProto if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -60604,7 +90013,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -60614,17 +90023,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTListPrivilegesResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceStreamLoadPutResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTStreamLoadPutResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceListTablePrivilegeStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadPutResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listTablePrivilegeStatus_result"); err != nil { + if err = oprot.WriteStructBegin("streamLoadPut_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -60632,7 +90042,6 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) Write(oprot thrift.TProt fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -60651,7 +90060,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadPutResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -60670,14 +90079,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusResult) String() string { +func (p *FrontendServiceStreamLoadPutResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListTablePrivilegeStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceStreamLoadPutResult(%+v)", *p) + } -func (p *FrontendServiceListTablePrivilegeStatusResult) DeepEqual(ano *FrontendServiceListTablePrivilegeStatusResult) bool { +func (p *FrontendServiceStreamLoadPutResult) DeepEqual(ano *FrontendServiceStreamLoadPutResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -60689,7 +90099,7 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) DeepEqual(ano *FrontendS return true } -func (p *FrontendServiceListTablePrivilegeStatusResult) Field0DeepEqual(src *TListPrivilegesResult_) bool { +func (p *FrontendServiceStreamLoadPutResult) Field0DeepEqual(src *TStreamLoadPutResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -60697,39 +90107,38 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) Field0DeepEqual(src *TLi return true } -type FrontendServiceListSchemaPrivilegeStatusArgs struct { - Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` +type FrontendServiceStreamLoadMultiTablePutArgs struct { + Request *TStreamLoadPutRequest `thrift:"request,1" frugal:"1,default,TStreamLoadPutRequest" json:"request"` } -func NewFrontendServiceListSchemaPrivilegeStatusArgs() *FrontendServiceListSchemaPrivilegeStatusArgs { - return &FrontendServiceListSchemaPrivilegeStatusArgs{} +func NewFrontendServiceStreamLoadMultiTablePutArgs() *FrontendServiceStreamLoadMultiTablePutArgs { + return &FrontendServiceStreamLoadMultiTablePutArgs{} } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) InitDefault() { - *p = FrontendServiceListSchemaPrivilegeStatusArgs{} +func (p *FrontendServiceStreamLoadMultiTablePutArgs) InitDefault() { } -var FrontendServiceListSchemaPrivilegeStatusArgs_Params_DEFAULT *TGetTablesParams +var FrontendServiceStreamLoadMultiTablePutArgs_Request_DEFAULT *TStreamLoadPutRequest -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) GetParams() (v *TGetTablesParams) { - if !p.IsSetParams() { - return FrontendServiceListSchemaPrivilegeStatusArgs_Params_DEFAULT +func (p *FrontendServiceStreamLoadMultiTablePutArgs) GetRequest() (v *TStreamLoadPutRequest) { + if !p.IsSetRequest() { + return FrontendServiceStreamLoadMultiTablePutArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) SetParams(val *TGetTablesParams) { - p.Params = val +func (p *FrontendServiceStreamLoadMultiTablePutArgs) SetRequest(val *TStreamLoadPutRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceListSchemaPrivilegeStatusArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceStreamLoadMultiTablePutArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceStreamLoadMultiTablePutArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -60753,17 +90162,14 @@ func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Read(iprot thrift.TProtoc if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -60778,7 +90184,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -60788,17 +90194,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTStreamLoadPutRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listSchemaPrivilegeStatus_args"); err != nil { + if err = oprot.WriteStructBegin("streamLoadMultiTablePut_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -60806,7 +90213,6 @@ func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Write(oprot thrift.TProto fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -60825,11 +90231,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -60842,66 +90248,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) String() string { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListSchemaPrivilegeStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceStreamLoadMultiTablePutArgs(%+v)", *p) + } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) DeepEqual(ano *FrontendServiceListSchemaPrivilegeStatusArgs) bool { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) DeepEqual(ano *FrontendServiceStreamLoadMultiTablePutArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) Field1DeepEqual(src *TStreamLoadPutRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceListSchemaPrivilegeStatusResult struct { - Success *TListPrivilegesResult_ `thrift:"success,0,optional" frugal:"0,optional,TListPrivilegesResult_" json:"success,omitempty"` +type FrontendServiceStreamLoadMultiTablePutResult struct { + Success *TStreamLoadMultiTablePutResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadMultiTablePutResult_" json:"success,omitempty"` } -func NewFrontendServiceListSchemaPrivilegeStatusResult() *FrontendServiceListSchemaPrivilegeStatusResult { - return &FrontendServiceListSchemaPrivilegeStatusResult{} +func NewFrontendServiceStreamLoadMultiTablePutResult() *FrontendServiceStreamLoadMultiTablePutResult { + return &FrontendServiceStreamLoadMultiTablePutResult{} } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) InitDefault() { - *p = FrontendServiceListSchemaPrivilegeStatusResult{} +func (p *FrontendServiceStreamLoadMultiTablePutResult) InitDefault() { } -var FrontendServiceListSchemaPrivilegeStatusResult_Success_DEFAULT *TListPrivilegesResult_ +var FrontendServiceStreamLoadMultiTablePutResult_Success_DEFAULT *TStreamLoadMultiTablePutResult_ -func (p *FrontendServiceListSchemaPrivilegeStatusResult) GetSuccess() (v *TListPrivilegesResult_) { +func (p *FrontendServiceStreamLoadMultiTablePutResult) GetSuccess() (v *TStreamLoadMultiTablePutResult_) { if !p.IsSetSuccess() { - return FrontendServiceListSchemaPrivilegeStatusResult_Success_DEFAULT + return FrontendServiceStreamLoadMultiTablePutResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TListPrivilegesResult_) +func (p *FrontendServiceStreamLoadMultiTablePutResult) SetSuccess(x interface{}) { + p.Success = x.(*TStreamLoadMultiTablePutResult_) } -var fieldIDToName_FrontendServiceListSchemaPrivilegeStatusResult = map[int16]string{ +var fieldIDToName_FrontendServiceStreamLoadMultiTablePutResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) IsSetSuccess() bool { +func (p *FrontendServiceStreamLoadMultiTablePutResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadMultiTablePutResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -60925,17 +90331,14 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) Read(iprot thrift.TProt if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -60950,7 +90353,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -60960,17 +90363,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTListPrivilegesResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceStreamLoadMultiTablePutResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTStreamLoadMultiTablePutResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadMultiTablePutResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listSchemaPrivilegeStatus_result"); err != nil { + if err = oprot.WriteStructBegin("streamLoadMultiTablePut_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -60978,7 +90382,6 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) Write(oprot thrift.TPro fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -60997,7 +90400,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceStreamLoadMultiTablePutResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -61016,14 +90419,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) String() string { +func (p *FrontendServiceStreamLoadMultiTablePutResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListSchemaPrivilegeStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceStreamLoadMultiTablePutResult(%+v)", *p) + } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) DeepEqual(ano *FrontendServiceListSchemaPrivilegeStatusResult) bool { +func (p *FrontendServiceStreamLoadMultiTablePutResult) DeepEqual(ano *FrontendServiceStreamLoadMultiTablePutResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -61035,7 +90439,7 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) DeepEqual(ano *Frontend return true } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) Field0DeepEqual(src *TListPrivilegesResult_) bool { +func (p *FrontendServiceStreamLoadMultiTablePutResult) Field0DeepEqual(src *TStreamLoadMultiTablePutResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -61043,39 +90447,38 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) Field0DeepEqual(src *TL return true } -type FrontendServiceListUserPrivilegeStatusArgs struct { - Params *TGetTablesParams `thrift:"params,1" frugal:"1,default,TGetTablesParams" json:"params"` +type FrontendServiceSnapshotLoaderReportArgs struct { + Request *TSnapshotLoaderReportRequest `thrift:"request,1" frugal:"1,default,TSnapshotLoaderReportRequest" json:"request"` } -func NewFrontendServiceListUserPrivilegeStatusArgs() *FrontendServiceListUserPrivilegeStatusArgs { - return &FrontendServiceListUserPrivilegeStatusArgs{} +func NewFrontendServiceSnapshotLoaderReportArgs() *FrontendServiceSnapshotLoaderReportArgs { + return &FrontendServiceSnapshotLoaderReportArgs{} } -func (p *FrontendServiceListUserPrivilegeStatusArgs) InitDefault() { - *p = FrontendServiceListUserPrivilegeStatusArgs{} +func (p *FrontendServiceSnapshotLoaderReportArgs) InitDefault() { } -var FrontendServiceListUserPrivilegeStatusArgs_Params_DEFAULT *TGetTablesParams +var FrontendServiceSnapshotLoaderReportArgs_Request_DEFAULT *TSnapshotLoaderReportRequest -func (p *FrontendServiceListUserPrivilegeStatusArgs) GetParams() (v *TGetTablesParams) { - if !p.IsSetParams() { - return FrontendServiceListUserPrivilegeStatusArgs_Params_DEFAULT +func (p *FrontendServiceSnapshotLoaderReportArgs) GetRequest() (v *TSnapshotLoaderReportRequest) { + if !p.IsSetRequest() { + return FrontendServiceSnapshotLoaderReportArgs_Request_DEFAULT } - return p.Params + return p.Request } -func (p *FrontendServiceListUserPrivilegeStatusArgs) SetParams(val *TGetTablesParams) { - p.Params = val +func (p *FrontendServiceSnapshotLoaderReportArgs) SetRequest(val *TSnapshotLoaderReportRequest) { + p.Request = val } -var fieldIDToName_FrontendServiceListUserPrivilegeStatusArgs = map[int16]string{ - 1: "params", +var fieldIDToName_FrontendServiceSnapshotLoaderReportArgs = map[int16]string{ + 1: "request", } -func (p *FrontendServiceListUserPrivilegeStatusArgs) IsSetParams() bool { - return p.Params != nil +func (p *FrontendServiceSnapshotLoaderReportArgs) IsSetRequest() bool { + return p.Request != nil } -func (p *FrontendServiceListUserPrivilegeStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSnapshotLoaderReportArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -61099,17 +90502,14 @@ func (p *FrontendServiceListUserPrivilegeStatusArgs) Read(iprot thrift.TProtocol if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -61124,7 +90524,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -61134,17 +90534,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Params = NewTGetTablesParams() - if err := p.Params.Read(iprot); err != nil { +func (p *FrontendServiceSnapshotLoaderReportArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTSnapshotLoaderReportRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceListUserPrivilegeStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSnapshotLoaderReportArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listUserPrivilegeStatus_args"); err != nil { + if err = oprot.WriteStructBegin("snapshotLoaderReport_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -61152,7 +90553,6 @@ func (p *FrontendServiceListUserPrivilegeStatusArgs) Write(oprot thrift.TProtoco fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -61171,11 +90571,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceSnapshotLoaderReportArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.Request.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -61188,66 +90588,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusArgs) String() string { +func (p *FrontendServiceSnapshotLoaderReportArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListUserPrivilegeStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceSnapshotLoaderReportArgs(%+v)", *p) + } -func (p *FrontendServiceListUserPrivilegeStatusArgs) DeepEqual(ano *FrontendServiceListUserPrivilegeStatusArgs) bool { +func (p *FrontendServiceSnapshotLoaderReportArgs) DeepEqual(ano *FrontendServiceSnapshotLoaderReportArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Params) { + if !p.Field1DeepEqual(ano.Request) { return false } return true } -func (p *FrontendServiceListUserPrivilegeStatusArgs) Field1DeepEqual(src *TGetTablesParams) bool { +func (p *FrontendServiceSnapshotLoaderReportArgs) Field1DeepEqual(src *TSnapshotLoaderReportRequest) bool { - if !p.Params.DeepEqual(src) { + if !p.Request.DeepEqual(src) { return false } return true } -type FrontendServiceListUserPrivilegeStatusResult struct { - Success *TListPrivilegesResult_ `thrift:"success,0,optional" frugal:"0,optional,TListPrivilegesResult_" json:"success,omitempty"` +type FrontendServiceSnapshotLoaderReportResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewFrontendServiceListUserPrivilegeStatusResult() *FrontendServiceListUserPrivilegeStatusResult { - return &FrontendServiceListUserPrivilegeStatusResult{} +func NewFrontendServiceSnapshotLoaderReportResult() *FrontendServiceSnapshotLoaderReportResult { + return &FrontendServiceSnapshotLoaderReportResult{} } -func (p *FrontendServiceListUserPrivilegeStatusResult) InitDefault() { - *p = FrontendServiceListUserPrivilegeStatusResult{} +func (p *FrontendServiceSnapshotLoaderReportResult) InitDefault() { } -var FrontendServiceListUserPrivilegeStatusResult_Success_DEFAULT *TListPrivilegesResult_ +var FrontendServiceSnapshotLoaderReportResult_Success_DEFAULT *status.TStatus -func (p *FrontendServiceListUserPrivilegeStatusResult) GetSuccess() (v *TListPrivilegesResult_) { +func (p *FrontendServiceSnapshotLoaderReportResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return FrontendServiceListUserPrivilegeStatusResult_Success_DEFAULT + return FrontendServiceSnapshotLoaderReportResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceListUserPrivilegeStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TListPrivilegesResult_) +func (p *FrontendServiceSnapshotLoaderReportResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_FrontendServiceListUserPrivilegeStatusResult = map[int16]string{ +var fieldIDToName_FrontendServiceSnapshotLoaderReportResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceListUserPrivilegeStatusResult) IsSetSuccess() bool { +func (p *FrontendServiceSnapshotLoaderReportResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceListUserPrivilegeStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSnapshotLoaderReportResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -61271,17 +90671,14 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) Read(iprot thrift.TProtoc if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -61296,7 +90693,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -61306,17 +90703,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTListPrivilegesResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceSnapshotLoaderReportResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceListUserPrivilegeStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSnapshotLoaderReportResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("listUserPrivilegeStatus_result"); err != nil { + if err = oprot.WriteStructBegin("snapshotLoaderReport_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -61324,7 +90722,6 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) Write(oprot thrift.TProto fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -61343,7 +90740,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSnapshotLoaderReportResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -61362,14 +90759,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusResult) String() string { +func (p *FrontendServiceSnapshotLoaderReportResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceListUserPrivilegeStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceSnapshotLoaderReportResult(%+v)", *p) + } -func (p *FrontendServiceListUserPrivilegeStatusResult) DeepEqual(ano *FrontendServiceListUserPrivilegeStatusResult) bool { +func (p *FrontendServiceSnapshotLoaderReportResult) DeepEqual(ano *FrontendServiceSnapshotLoaderReportResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -61381,7 +90779,7 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) DeepEqual(ano *FrontendSe return true } -func (p *FrontendServiceListUserPrivilegeStatusResult) Field0DeepEqual(src *TListPrivilegesResult_) bool { +func (p *FrontendServiceSnapshotLoaderReportResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -61389,39 +90787,38 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) Field0DeepEqual(src *TLis return true } -type FrontendServiceUpdateExportTaskStatusArgs struct { - Request *TUpdateExportTaskStatusRequest `thrift:"request,1" frugal:"1,default,TUpdateExportTaskStatusRequest" json:"request"` +type FrontendServicePingArgs struct { + Request *TFrontendPingFrontendRequest `thrift:"request,1" frugal:"1,default,TFrontendPingFrontendRequest" json:"request"` } -func NewFrontendServiceUpdateExportTaskStatusArgs() *FrontendServiceUpdateExportTaskStatusArgs { - return &FrontendServiceUpdateExportTaskStatusArgs{} +func NewFrontendServicePingArgs() *FrontendServicePingArgs { + return &FrontendServicePingArgs{} } -func (p *FrontendServiceUpdateExportTaskStatusArgs) InitDefault() { - *p = FrontendServiceUpdateExportTaskStatusArgs{} +func (p *FrontendServicePingArgs) InitDefault() { } -var FrontendServiceUpdateExportTaskStatusArgs_Request_DEFAULT *TUpdateExportTaskStatusRequest +var FrontendServicePingArgs_Request_DEFAULT *TFrontendPingFrontendRequest -func (p *FrontendServiceUpdateExportTaskStatusArgs) GetRequest() (v *TUpdateExportTaskStatusRequest) { +func (p *FrontendServicePingArgs) GetRequest() (v *TFrontendPingFrontendRequest) { if !p.IsSetRequest() { - return FrontendServiceUpdateExportTaskStatusArgs_Request_DEFAULT + return FrontendServicePingArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceUpdateExportTaskStatusArgs) SetRequest(val *TUpdateExportTaskStatusRequest) { +func (p *FrontendServicePingArgs) SetRequest(val *TFrontendPingFrontendRequest) { p.Request = val } -var fieldIDToName_FrontendServiceUpdateExportTaskStatusArgs = map[int16]string{ +var fieldIDToName_FrontendServicePingArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceUpdateExportTaskStatusArgs) IsSetRequest() bool { +func (p *FrontendServicePingArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceUpdateExportTaskStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServicePingArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -61445,17 +90842,14 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) Read(iprot thrift.TProtocol) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -61470,7 +90864,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -61480,17 +90874,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTUpdateExportTaskStatusRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServicePingArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTFrontendPingFrontendRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceUpdateExportTaskStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServicePingArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("updateExportTaskStatus_args"); err != nil { + if err = oprot.WriteStructBegin("ping_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -61498,7 +90893,6 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) Write(oprot thrift.TProtocol fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -61517,7 +90911,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServicePingArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -61534,14 +90928,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusArgs) String() string { +func (p *FrontendServicePingArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceUpdateExportTaskStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServicePingArgs(%+v)", *p) + } -func (p *FrontendServiceUpdateExportTaskStatusArgs) DeepEqual(ano *FrontendServiceUpdateExportTaskStatusArgs) bool { +func (p *FrontendServicePingArgs) DeepEqual(ano *FrontendServicePingArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -61553,7 +90948,7 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) DeepEqual(ano *FrontendServi return true } -func (p *FrontendServiceUpdateExportTaskStatusArgs) Field1DeepEqual(src *TUpdateExportTaskStatusRequest) bool { +func (p *FrontendServicePingArgs) Field1DeepEqual(src *TFrontendPingFrontendRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -61561,39 +90956,38 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) Field1DeepEqual(src *TUpdate return true } -type FrontendServiceUpdateExportTaskStatusResult struct { - Success *TFeResult_ `thrift:"success,0,optional" frugal:"0,optional,TFeResult_" json:"success,omitempty"` +type FrontendServicePingResult struct { + Success *TFrontendPingFrontendResult_ `thrift:"success,0,optional" frugal:"0,optional,TFrontendPingFrontendResult_" json:"success,omitempty"` } -func NewFrontendServiceUpdateExportTaskStatusResult() *FrontendServiceUpdateExportTaskStatusResult { - return &FrontendServiceUpdateExportTaskStatusResult{} +func NewFrontendServicePingResult() *FrontendServicePingResult { + return &FrontendServicePingResult{} } -func (p *FrontendServiceUpdateExportTaskStatusResult) InitDefault() { - *p = FrontendServiceUpdateExportTaskStatusResult{} +func (p *FrontendServicePingResult) InitDefault() { } -var FrontendServiceUpdateExportTaskStatusResult_Success_DEFAULT *TFeResult_ +var FrontendServicePingResult_Success_DEFAULT *TFrontendPingFrontendResult_ -func (p *FrontendServiceUpdateExportTaskStatusResult) GetSuccess() (v *TFeResult_) { +func (p *FrontendServicePingResult) GetSuccess() (v *TFrontendPingFrontendResult_) { if !p.IsSetSuccess() { - return FrontendServiceUpdateExportTaskStatusResult_Success_DEFAULT + return FrontendServicePingResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceUpdateExportTaskStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TFeResult_) +func (p *FrontendServicePingResult) SetSuccess(x interface{}) { + p.Success = x.(*TFrontendPingFrontendResult_) } -var fieldIDToName_FrontendServiceUpdateExportTaskStatusResult = map[int16]string{ +var fieldIDToName_FrontendServicePingResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceUpdateExportTaskStatusResult) IsSetSuccess() bool { +func (p *FrontendServicePingResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceUpdateExportTaskStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServicePingResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -61617,17 +91011,14 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) Read(iprot thrift.TProtoco if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -61642,7 +91033,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -61652,17 +91043,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTFeResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServicePingResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTFrontendPingFrontendResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceUpdateExportTaskStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServicePingResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("updateExportTaskStatus_result"); err != nil { + if err = oprot.WriteStructBegin("ping_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -61670,7 +91062,6 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) Write(oprot thrift.TProtoc fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -61689,7 +91080,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServicePingResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -61708,14 +91099,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusResult) String() string { +func (p *FrontendServicePingResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceUpdateExportTaskStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServicePingResult(%+v)", *p) + } -func (p *FrontendServiceUpdateExportTaskStatusResult) DeepEqual(ano *FrontendServiceUpdateExportTaskStatusResult) bool { +func (p *FrontendServicePingResult) DeepEqual(ano *FrontendServicePingResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -61727,7 +91119,7 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) DeepEqual(ano *FrontendSer return true } -func (p *FrontendServiceUpdateExportTaskStatusResult) Field0DeepEqual(src *TFeResult_) bool { +func (p *FrontendServicePingResult) Field0DeepEqual(src *TFrontendPingFrontendResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -61735,39 +91127,38 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) Field0DeepEqual(src *TFeRe return true } -type FrontendServiceLoadTxnBeginArgs struct { - Request *TLoadTxnBeginRequest `thrift:"request,1" frugal:"1,default,TLoadTxnBeginRequest" json:"request"` +type FrontendServiceInitExternalCtlMetaArgs struct { + Request *TInitExternalCtlMetaRequest `thrift:"request,1" frugal:"1,default,TInitExternalCtlMetaRequest" json:"request"` } -func NewFrontendServiceLoadTxnBeginArgs() *FrontendServiceLoadTxnBeginArgs { - return &FrontendServiceLoadTxnBeginArgs{} +func NewFrontendServiceInitExternalCtlMetaArgs() *FrontendServiceInitExternalCtlMetaArgs { + return &FrontendServiceInitExternalCtlMetaArgs{} } -func (p *FrontendServiceLoadTxnBeginArgs) InitDefault() { - *p = FrontendServiceLoadTxnBeginArgs{} +func (p *FrontendServiceInitExternalCtlMetaArgs) InitDefault() { } -var FrontendServiceLoadTxnBeginArgs_Request_DEFAULT *TLoadTxnBeginRequest +var FrontendServiceInitExternalCtlMetaArgs_Request_DEFAULT *TInitExternalCtlMetaRequest -func (p *FrontendServiceLoadTxnBeginArgs) GetRequest() (v *TLoadTxnBeginRequest) { +func (p *FrontendServiceInitExternalCtlMetaArgs) GetRequest() (v *TInitExternalCtlMetaRequest) { if !p.IsSetRequest() { - return FrontendServiceLoadTxnBeginArgs_Request_DEFAULT + return FrontendServiceInitExternalCtlMetaArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceLoadTxnBeginArgs) SetRequest(val *TLoadTxnBeginRequest) { +func (p *FrontendServiceInitExternalCtlMetaArgs) SetRequest(val *TInitExternalCtlMetaRequest) { p.Request = val } -var fieldIDToName_FrontendServiceLoadTxnBeginArgs = map[int16]string{ +var fieldIDToName_FrontendServiceInitExternalCtlMetaArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceLoadTxnBeginArgs) IsSetRequest() bool { +func (p *FrontendServiceInitExternalCtlMetaArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceLoadTxnBeginArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInitExternalCtlMetaArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -61791,17 +91182,14 @@ func (p *FrontendServiceLoadTxnBeginArgs) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -61816,7 +91204,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -61826,17 +91214,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTLoadTxnBeginRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceInitExternalCtlMetaArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTInitExternalCtlMetaRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceLoadTxnBeginArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInitExternalCtlMetaArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnBegin_args"); err != nil { + if err = oprot.WriteStructBegin("initExternalCtlMeta_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -61844,7 +91233,6 @@ func (p *FrontendServiceLoadTxnBeginArgs) Write(oprot thrift.TProtocol) (err err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -61863,7 +91251,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInitExternalCtlMetaArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -61880,14 +91268,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginArgs) String() string { +func (p *FrontendServiceInitExternalCtlMetaArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnBeginArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceInitExternalCtlMetaArgs(%+v)", *p) + } -func (p *FrontendServiceLoadTxnBeginArgs) DeepEqual(ano *FrontendServiceLoadTxnBeginArgs) bool { +func (p *FrontendServiceInitExternalCtlMetaArgs) DeepEqual(ano *FrontendServiceInitExternalCtlMetaArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -61899,7 +91288,7 @@ func (p *FrontendServiceLoadTxnBeginArgs) DeepEqual(ano *FrontendServiceLoadTxnB return true } -func (p *FrontendServiceLoadTxnBeginArgs) Field1DeepEqual(src *TLoadTxnBeginRequest) bool { +func (p *FrontendServiceInitExternalCtlMetaArgs) Field1DeepEqual(src *TInitExternalCtlMetaRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -61907,39 +91296,38 @@ func (p *FrontendServiceLoadTxnBeginArgs) Field1DeepEqual(src *TLoadTxnBeginRequ return true } -type FrontendServiceLoadTxnBeginResult struct { - Success *TLoadTxnBeginResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnBeginResult_" json:"success,omitempty"` +type FrontendServiceInitExternalCtlMetaResult struct { + Success *TInitExternalCtlMetaResult_ `thrift:"success,0,optional" frugal:"0,optional,TInitExternalCtlMetaResult_" json:"success,omitempty"` } -func NewFrontendServiceLoadTxnBeginResult() *FrontendServiceLoadTxnBeginResult { - return &FrontendServiceLoadTxnBeginResult{} +func NewFrontendServiceInitExternalCtlMetaResult() *FrontendServiceInitExternalCtlMetaResult { + return &FrontendServiceInitExternalCtlMetaResult{} } -func (p *FrontendServiceLoadTxnBeginResult) InitDefault() { - *p = FrontendServiceLoadTxnBeginResult{} +func (p *FrontendServiceInitExternalCtlMetaResult) InitDefault() { } -var FrontendServiceLoadTxnBeginResult_Success_DEFAULT *TLoadTxnBeginResult_ +var FrontendServiceInitExternalCtlMetaResult_Success_DEFAULT *TInitExternalCtlMetaResult_ -func (p *FrontendServiceLoadTxnBeginResult) GetSuccess() (v *TLoadTxnBeginResult_) { +func (p *FrontendServiceInitExternalCtlMetaResult) GetSuccess() (v *TInitExternalCtlMetaResult_) { if !p.IsSetSuccess() { - return FrontendServiceLoadTxnBeginResult_Success_DEFAULT + return FrontendServiceInitExternalCtlMetaResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceLoadTxnBeginResult) SetSuccess(x interface{}) { - p.Success = x.(*TLoadTxnBeginResult_) +func (p *FrontendServiceInitExternalCtlMetaResult) SetSuccess(x interface{}) { + p.Success = x.(*TInitExternalCtlMetaResult_) } -var fieldIDToName_FrontendServiceLoadTxnBeginResult = map[int16]string{ +var fieldIDToName_FrontendServiceInitExternalCtlMetaResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceLoadTxnBeginResult) IsSetSuccess() bool { +func (p *FrontendServiceInitExternalCtlMetaResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceLoadTxnBeginResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInitExternalCtlMetaResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -61963,17 +91351,14 @@ func (p *FrontendServiceLoadTxnBeginResult) Read(iprot thrift.TProtocol) (err er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -61988,7 +91373,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -61998,17 +91383,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTLoadTxnBeginResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceInitExternalCtlMetaResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTInitExternalCtlMetaResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceLoadTxnBeginResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInitExternalCtlMetaResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnBegin_result"); err != nil { + if err = oprot.WriteStructBegin("initExternalCtlMeta_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -62016,7 +91402,6 @@ func (p *FrontendServiceLoadTxnBeginResult) Write(oprot thrift.TProtocol) (err e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -62035,7 +91420,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInitExternalCtlMetaResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -62054,14 +91439,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginResult) String() string { +func (p *FrontendServiceInitExternalCtlMetaResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnBeginResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceInitExternalCtlMetaResult(%+v)", *p) + } -func (p *FrontendServiceLoadTxnBeginResult) DeepEqual(ano *FrontendServiceLoadTxnBeginResult) bool { +func (p *FrontendServiceInitExternalCtlMetaResult) DeepEqual(ano *FrontendServiceInitExternalCtlMetaResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -62073,7 +91459,7 @@ func (p *FrontendServiceLoadTxnBeginResult) DeepEqual(ano *FrontendServiceLoadTx return true } -func (p *FrontendServiceLoadTxnBeginResult) Field0DeepEqual(src *TLoadTxnBeginResult_) bool { +func (p *FrontendServiceInitExternalCtlMetaResult) Field0DeepEqual(src *TInitExternalCtlMetaResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -62081,39 +91467,38 @@ func (p *FrontendServiceLoadTxnBeginResult) Field0DeepEqual(src *TLoadTxnBeginRe return true } -type FrontendServiceLoadTxnPreCommitArgs struct { - Request *TLoadTxnCommitRequest `thrift:"request,1" frugal:"1,default,TLoadTxnCommitRequest" json:"request"` +type FrontendServiceFetchSchemaTableDataArgs struct { + Request *TFetchSchemaTableDataRequest `thrift:"request,1" frugal:"1,default,TFetchSchemaTableDataRequest" json:"request"` } -func NewFrontendServiceLoadTxnPreCommitArgs() *FrontendServiceLoadTxnPreCommitArgs { - return &FrontendServiceLoadTxnPreCommitArgs{} +func NewFrontendServiceFetchSchemaTableDataArgs() *FrontendServiceFetchSchemaTableDataArgs { + return &FrontendServiceFetchSchemaTableDataArgs{} } -func (p *FrontendServiceLoadTxnPreCommitArgs) InitDefault() { - *p = FrontendServiceLoadTxnPreCommitArgs{} +func (p *FrontendServiceFetchSchemaTableDataArgs) InitDefault() { } -var FrontendServiceLoadTxnPreCommitArgs_Request_DEFAULT *TLoadTxnCommitRequest +var FrontendServiceFetchSchemaTableDataArgs_Request_DEFAULT *TFetchSchemaTableDataRequest -func (p *FrontendServiceLoadTxnPreCommitArgs) GetRequest() (v *TLoadTxnCommitRequest) { +func (p *FrontendServiceFetchSchemaTableDataArgs) GetRequest() (v *TFetchSchemaTableDataRequest) { if !p.IsSetRequest() { - return FrontendServiceLoadTxnPreCommitArgs_Request_DEFAULT + return FrontendServiceFetchSchemaTableDataArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceLoadTxnPreCommitArgs) SetRequest(val *TLoadTxnCommitRequest) { +func (p *FrontendServiceFetchSchemaTableDataArgs) SetRequest(val *TFetchSchemaTableDataRequest) { p.Request = val } -var fieldIDToName_FrontendServiceLoadTxnPreCommitArgs = map[int16]string{ +var fieldIDToName_FrontendServiceFetchSchemaTableDataArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceLoadTxnPreCommitArgs) IsSetRequest() bool { +func (p *FrontendServiceFetchSchemaTableDataArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceLoadTxnPreCommitArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSchemaTableDataArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -62137,17 +91522,14 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) Read(iprot thrift.TProtocol) (err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -62162,7 +91544,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -62172,17 +91554,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTLoadTxnCommitRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceFetchSchemaTableDataArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTFetchSchemaTableDataRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceLoadTxnPreCommitArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSchemaTableDataArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnPreCommit_args"); err != nil { + if err = oprot.WriteStructBegin("fetchSchemaTableData_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -62190,7 +91573,6 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -62209,7 +91591,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSchemaTableDataArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -62226,14 +91608,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitArgs) String() string { +func (p *FrontendServiceFetchSchemaTableDataArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnPreCommitArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchSchemaTableDataArgs(%+v)", *p) + } -func (p *FrontendServiceLoadTxnPreCommitArgs) DeepEqual(ano *FrontendServiceLoadTxnPreCommitArgs) bool { +func (p *FrontendServiceFetchSchemaTableDataArgs) DeepEqual(ano *FrontendServiceFetchSchemaTableDataArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -62245,7 +91628,7 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) DeepEqual(ano *FrontendServiceLoad return true } -func (p *FrontendServiceLoadTxnPreCommitArgs) Field1DeepEqual(src *TLoadTxnCommitRequest) bool { +func (p *FrontendServiceFetchSchemaTableDataArgs) Field1DeepEqual(src *TFetchSchemaTableDataRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -62253,39 +91636,38 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) Field1DeepEqual(src *TLoadTxnCommi return true } -type FrontendServiceLoadTxnPreCommitResult struct { - Success *TLoadTxnCommitResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnCommitResult_" json:"success,omitempty"` +type FrontendServiceFetchSchemaTableDataResult struct { + Success *TFetchSchemaTableDataResult_ `thrift:"success,0,optional" frugal:"0,optional,TFetchSchemaTableDataResult_" json:"success,omitempty"` } -func NewFrontendServiceLoadTxnPreCommitResult() *FrontendServiceLoadTxnPreCommitResult { - return &FrontendServiceLoadTxnPreCommitResult{} +func NewFrontendServiceFetchSchemaTableDataResult() *FrontendServiceFetchSchemaTableDataResult { + return &FrontendServiceFetchSchemaTableDataResult{} } -func (p *FrontendServiceLoadTxnPreCommitResult) InitDefault() { - *p = FrontendServiceLoadTxnPreCommitResult{} +func (p *FrontendServiceFetchSchemaTableDataResult) InitDefault() { } -var FrontendServiceLoadTxnPreCommitResult_Success_DEFAULT *TLoadTxnCommitResult_ +var FrontendServiceFetchSchemaTableDataResult_Success_DEFAULT *TFetchSchemaTableDataResult_ -func (p *FrontendServiceLoadTxnPreCommitResult) GetSuccess() (v *TLoadTxnCommitResult_) { +func (p *FrontendServiceFetchSchemaTableDataResult) GetSuccess() (v *TFetchSchemaTableDataResult_) { if !p.IsSetSuccess() { - return FrontendServiceLoadTxnPreCommitResult_Success_DEFAULT + return FrontendServiceFetchSchemaTableDataResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceLoadTxnPreCommitResult) SetSuccess(x interface{}) { - p.Success = x.(*TLoadTxnCommitResult_) +func (p *FrontendServiceFetchSchemaTableDataResult) SetSuccess(x interface{}) { + p.Success = x.(*TFetchSchemaTableDataResult_) } -var fieldIDToName_FrontendServiceLoadTxnPreCommitResult = map[int16]string{ +var fieldIDToName_FrontendServiceFetchSchemaTableDataResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceLoadTxnPreCommitResult) IsSetSuccess() bool { +func (p *FrontendServiceFetchSchemaTableDataResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceLoadTxnPreCommitResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSchemaTableDataResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -62309,17 +91691,14 @@ func (p *FrontendServiceLoadTxnPreCommitResult) Read(iprot thrift.TProtocol) (er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -62334,7 +91713,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -62344,17 +91723,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTLoadTxnCommitResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceFetchSchemaTableDataResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTFetchSchemaTableDataResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceLoadTxnPreCommitResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSchemaTableDataResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnPreCommit_result"); err != nil { + if err = oprot.WriteStructBegin("fetchSchemaTableData_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -62362,7 +91742,6 @@ func (p *FrontendServiceLoadTxnPreCommitResult) Write(oprot thrift.TProtocol) (e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -62381,7 +91760,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSchemaTableDataResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -62400,14 +91779,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitResult) String() string { +func (p *FrontendServiceFetchSchemaTableDataResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnPreCommitResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchSchemaTableDataResult(%+v)", *p) + } -func (p *FrontendServiceLoadTxnPreCommitResult) DeepEqual(ano *FrontendServiceLoadTxnPreCommitResult) bool { +func (p *FrontendServiceFetchSchemaTableDataResult) DeepEqual(ano *FrontendServiceFetchSchemaTableDataResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -62419,7 +91799,7 @@ func (p *FrontendServiceLoadTxnPreCommitResult) DeepEqual(ano *FrontendServiceLo return true } -func (p *FrontendServiceLoadTxnPreCommitResult) Field0DeepEqual(src *TLoadTxnCommitResult_) bool { +func (p *FrontendServiceFetchSchemaTableDataResult) Field0DeepEqual(src *TFetchSchemaTableDataResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -62427,39 +91807,19 @@ func (p *FrontendServiceLoadTxnPreCommitResult) Field0DeepEqual(src *TLoadTxnCom return true } -type FrontendServiceLoadTxn2PCArgs struct { - Request *TLoadTxn2PCRequest `thrift:"request,1" frugal:"1,default,TLoadTxn2PCRequest" json:"request"` -} - -func NewFrontendServiceLoadTxn2PCArgs() *FrontendServiceLoadTxn2PCArgs { - return &FrontendServiceLoadTxn2PCArgs{} -} - -func (p *FrontendServiceLoadTxn2PCArgs) InitDefault() { - *p = FrontendServiceLoadTxn2PCArgs{} +type FrontendServiceAcquireTokenArgs struct { } -var FrontendServiceLoadTxn2PCArgs_Request_DEFAULT *TLoadTxn2PCRequest - -func (p *FrontendServiceLoadTxn2PCArgs) GetRequest() (v *TLoadTxn2PCRequest) { - if !p.IsSetRequest() { - return FrontendServiceLoadTxn2PCArgs_Request_DEFAULT - } - return p.Request -} -func (p *FrontendServiceLoadTxn2PCArgs) SetRequest(val *TLoadTxn2PCRequest) { - p.Request = val +func NewFrontendServiceAcquireTokenArgs() *FrontendServiceAcquireTokenArgs { + return &FrontendServiceAcquireTokenArgs{} } -var fieldIDToName_FrontendServiceLoadTxn2PCArgs = map[int16]string{ - 1: "request", +func (p *FrontendServiceAcquireTokenArgs) InitDefault() { } -func (p *FrontendServiceLoadTxn2PCArgs) IsSetRequest() bool { - return p.Request != nil -} +var fieldIDToName_FrontendServiceAcquireTokenArgs = map[int16]string{} -func (p *FrontendServiceLoadTxn2PCArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAcquireTokenArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -62476,24 +91836,9 @@ func (p *FrontendServiceLoadTxn2PCArgs) Read(iprot thrift.TProtocol) (err error) if fieldTypeId == thrift.STOP { break } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -62507,10 +91852,8 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCArgs[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -62518,25 +91861,11 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTLoadTxn2PCRequest() - if err := p.Request.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *FrontendServiceLoadTxn2PCArgs) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("loadTxn2PC_args"); err != nil { +func (p *FrontendServiceAcquireTokenArgs) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("acquireToken_args"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -62547,91 +91876,61 @@ func (p *FrontendServiceLoadTxn2PCArgs) Write(oprot thrift.TProtocol) (err error return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Request.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *FrontendServiceLoadTxn2PCArgs) String() string { +func (p *FrontendServiceAcquireTokenArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxn2PCArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceAcquireTokenArgs(%+v)", *p) + } -func (p *FrontendServiceLoadTxn2PCArgs) DeepEqual(ano *FrontendServiceLoadTxn2PCArgs) bool { +func (p *FrontendServiceAcquireTokenArgs) DeepEqual(ano *FrontendServiceAcquireTokenArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Request) { - return false - } - return true -} - -func (p *FrontendServiceLoadTxn2PCArgs) Field1DeepEqual(src *TLoadTxn2PCRequest) bool { - - if !p.Request.DeepEqual(src) { - return false - } return true } -type FrontendServiceLoadTxn2PCResult struct { - Success *TLoadTxn2PCResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxn2PCResult_" json:"success,omitempty"` +type FrontendServiceAcquireTokenResult struct { + Success *TMySqlLoadAcquireTokenResult_ `thrift:"success,0,optional" frugal:"0,optional,TMySqlLoadAcquireTokenResult_" json:"success,omitempty"` } -func NewFrontendServiceLoadTxn2PCResult() *FrontendServiceLoadTxn2PCResult { - return &FrontendServiceLoadTxn2PCResult{} +func NewFrontendServiceAcquireTokenResult() *FrontendServiceAcquireTokenResult { + return &FrontendServiceAcquireTokenResult{} } -func (p *FrontendServiceLoadTxn2PCResult) InitDefault() { - *p = FrontendServiceLoadTxn2PCResult{} +func (p *FrontendServiceAcquireTokenResult) InitDefault() { } -var FrontendServiceLoadTxn2PCResult_Success_DEFAULT *TLoadTxn2PCResult_ +var FrontendServiceAcquireTokenResult_Success_DEFAULT *TMySqlLoadAcquireTokenResult_ -func (p *FrontendServiceLoadTxn2PCResult) GetSuccess() (v *TLoadTxn2PCResult_) { +func (p *FrontendServiceAcquireTokenResult) GetSuccess() (v *TMySqlLoadAcquireTokenResult_) { if !p.IsSetSuccess() { - return FrontendServiceLoadTxn2PCResult_Success_DEFAULT + return FrontendServiceAcquireTokenResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceLoadTxn2PCResult) SetSuccess(x interface{}) { - p.Success = x.(*TLoadTxn2PCResult_) +func (p *FrontendServiceAcquireTokenResult) SetSuccess(x interface{}) { + p.Success = x.(*TMySqlLoadAcquireTokenResult_) } -var fieldIDToName_FrontendServiceLoadTxn2PCResult = map[int16]string{ +var fieldIDToName_FrontendServiceAcquireTokenResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceLoadTxn2PCResult) IsSetSuccess() bool { +func (p *FrontendServiceAcquireTokenResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceLoadTxn2PCResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAcquireTokenResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -62655,17 +91954,14 @@ func (p *FrontendServiceLoadTxn2PCResult) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -62680,7 +91976,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAcquireTokenResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -62690,17 +91986,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTLoadTxn2PCResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceAcquireTokenResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTMySqlLoadAcquireTokenResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceLoadTxn2PCResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAcquireTokenResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxn2PC_result"); err != nil { + if err = oprot.WriteStructBegin("acquireToken_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -62708,7 +92005,6 @@ func (p *FrontendServiceLoadTxn2PCResult) Write(oprot thrift.TProtocol) (err err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -62727,7 +92023,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAcquireTokenResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -62746,14 +92042,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCResult) String() string { +func (p *FrontendServiceAcquireTokenResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxn2PCResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceAcquireTokenResult(%+v)", *p) + } -func (p *FrontendServiceLoadTxn2PCResult) DeepEqual(ano *FrontendServiceLoadTxn2PCResult) bool { +func (p *FrontendServiceAcquireTokenResult) DeepEqual(ano *FrontendServiceAcquireTokenResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -62765,7 +92062,7 @@ func (p *FrontendServiceLoadTxn2PCResult) DeepEqual(ano *FrontendServiceLoadTxn2 return true } -func (p *FrontendServiceLoadTxn2PCResult) Field0DeepEqual(src *TLoadTxn2PCResult_) bool { +func (p *FrontendServiceAcquireTokenResult) Field0DeepEqual(src *TMySqlLoadAcquireTokenResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -62773,39 +92070,29 @@ func (p *FrontendServiceLoadTxn2PCResult) Field0DeepEqual(src *TLoadTxn2PCResult return true } -type FrontendServiceLoadTxnCommitArgs struct { - Request *TLoadTxnCommitRequest `thrift:"request,1" frugal:"1,default,TLoadTxnCommitRequest" json:"request"` +type FrontendServiceCheckTokenArgs struct { + Token string `thrift:"token,1" frugal:"1,default,string" json:"token"` } -func NewFrontendServiceLoadTxnCommitArgs() *FrontendServiceLoadTxnCommitArgs { - return &FrontendServiceLoadTxnCommitArgs{} +func NewFrontendServiceCheckTokenArgs() *FrontendServiceCheckTokenArgs { + return &FrontendServiceCheckTokenArgs{} } -func (p *FrontendServiceLoadTxnCommitArgs) InitDefault() { - *p = FrontendServiceLoadTxnCommitArgs{} +func (p *FrontendServiceCheckTokenArgs) InitDefault() { } -var FrontendServiceLoadTxnCommitArgs_Request_DEFAULT *TLoadTxnCommitRequest - -func (p *FrontendServiceLoadTxnCommitArgs) GetRequest() (v *TLoadTxnCommitRequest) { - if !p.IsSetRequest() { - return FrontendServiceLoadTxnCommitArgs_Request_DEFAULT - } - return p.Request +func (p *FrontendServiceCheckTokenArgs) GetToken() (v string) { + return p.Token } -func (p *FrontendServiceLoadTxnCommitArgs) SetRequest(val *TLoadTxnCommitRequest) { - p.Request = val +func (p *FrontendServiceCheckTokenArgs) SetToken(val string) { + p.Token = val } -var fieldIDToName_FrontendServiceLoadTxnCommitArgs = map[int16]string{ - 1: "request", +var fieldIDToName_FrontendServiceCheckTokenArgs = map[int16]string{ + 1: "token", } -func (p *FrontendServiceLoadTxnCommitArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *FrontendServiceLoadTxnCommitArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckTokenArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -62825,21 +92112,18 @@ func (p *FrontendServiceLoadTxnCommitArgs) Read(iprot thrift.TProtocol) (err err switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -62854,7 +92138,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckTokenArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -62864,17 +92148,21 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTLoadTxnCommitRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceCheckTokenArgs) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = v } + p.Token = _field return nil } -func (p *FrontendServiceLoadTxnCommitArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckTokenArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnCommit_args"); err != nil { + if err = oprot.WriteStructBegin("checkToken_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -62882,7 +92170,6 @@ func (p *FrontendServiceLoadTxnCommitArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -62901,11 +92188,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { +func (p *FrontendServiceCheckTokenArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := p.Request.Write(oprot); err != nil { + if err := oprot.WriteString(p.Token); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -62918,66 +92205,66 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitArgs) String() string { +func (p *FrontendServiceCheckTokenArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnCommitArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceCheckTokenArgs(%+v)", *p) + } -func (p *FrontendServiceLoadTxnCommitArgs) DeepEqual(ano *FrontendServiceLoadTxnCommitArgs) bool { +func (p *FrontendServiceCheckTokenArgs) DeepEqual(ano *FrontendServiceCheckTokenArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Request) { + if !p.Field1DeepEqual(ano.Token) { return false } return true } -func (p *FrontendServiceLoadTxnCommitArgs) Field1DeepEqual(src *TLoadTxnCommitRequest) bool { +func (p *FrontendServiceCheckTokenArgs) Field1DeepEqual(src string) bool { - if !p.Request.DeepEqual(src) { + if strings.Compare(p.Token, src) != 0 { return false } return true } -type FrontendServiceLoadTxnCommitResult struct { - Success *TLoadTxnCommitResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnCommitResult_" json:"success,omitempty"` +type FrontendServiceCheckTokenResult struct { + Success *bool `thrift:"success,0,optional" frugal:"0,optional,bool" json:"success,omitempty"` } -func NewFrontendServiceLoadTxnCommitResult() *FrontendServiceLoadTxnCommitResult { - return &FrontendServiceLoadTxnCommitResult{} +func NewFrontendServiceCheckTokenResult() *FrontendServiceCheckTokenResult { + return &FrontendServiceCheckTokenResult{} } -func (p *FrontendServiceLoadTxnCommitResult) InitDefault() { - *p = FrontendServiceLoadTxnCommitResult{} +func (p *FrontendServiceCheckTokenResult) InitDefault() { } -var FrontendServiceLoadTxnCommitResult_Success_DEFAULT *TLoadTxnCommitResult_ +var FrontendServiceCheckTokenResult_Success_DEFAULT bool -func (p *FrontendServiceLoadTxnCommitResult) GetSuccess() (v *TLoadTxnCommitResult_) { +func (p *FrontendServiceCheckTokenResult) GetSuccess() (v bool) { if !p.IsSetSuccess() { - return FrontendServiceLoadTxnCommitResult_Success_DEFAULT + return FrontendServiceCheckTokenResult_Success_DEFAULT } - return p.Success + return *p.Success } -func (p *FrontendServiceLoadTxnCommitResult) SetSuccess(x interface{}) { - p.Success = x.(*TLoadTxnCommitResult_) +func (p *FrontendServiceCheckTokenResult) SetSuccess(x interface{}) { + p.Success = x.(*bool) } -var fieldIDToName_FrontendServiceLoadTxnCommitResult = map[int16]string{ +var fieldIDToName_FrontendServiceCheckTokenResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceLoadTxnCommitResult) IsSetSuccess() bool { +func (p *FrontendServiceCheckTokenResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceLoadTxnCommitResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckTokenResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -62997,21 +92284,18 @@ func (p *FrontendServiceLoadTxnCommitResult) Read(iprot thrift.TProtocol) (err e switch fieldId { case 0: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.BOOL { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -63026,7 +92310,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckTokenResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -63036,17 +92320,21 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTLoadTxnCommitResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceCheckTokenResult) ReadField0(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } + p.Success = _field return nil } -func (p *FrontendServiceLoadTxnCommitResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckTokenResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnCommit_result"); err != nil { + if err = oprot.WriteStructBegin("checkToken_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -63054,7 +92342,6 @@ func (p *FrontendServiceLoadTxnCommitResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -63073,12 +92360,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckTokenResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { - if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + if err = oprot.WriteFieldBegin("success", thrift.BOOL, 0); err != nil { goto WriteFieldBeginError } - if err := p.Success.Write(oprot); err != nil { + if err := oprot.WriteBool(*p.Success); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -63092,14 +92379,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitResult) String() string { +func (p *FrontendServiceCheckTokenResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnCommitResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceCheckTokenResult(%+v)", *p) + } -func (p *FrontendServiceLoadTxnCommitResult) DeepEqual(ano *FrontendServiceLoadTxnCommitResult) bool { +func (p *FrontendServiceCheckTokenResult) DeepEqual(ano *FrontendServiceCheckTokenResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -63111,47 +92399,51 @@ func (p *FrontendServiceLoadTxnCommitResult) DeepEqual(ano *FrontendServiceLoadT return true } -func (p *FrontendServiceLoadTxnCommitResult) Field0DeepEqual(src *TLoadTxnCommitResult_) bool { +func (p *FrontendServiceCheckTokenResult) Field0DeepEqual(src *bool) bool { - if !p.Success.DeepEqual(src) { + if p.Success == src { + return true + } else if p.Success == nil || src == nil { + return false + } + if *p.Success != *src { return false } return true } -type FrontendServiceLoadTxnRollbackArgs struct { - Request *TLoadTxnRollbackRequest `thrift:"request,1" frugal:"1,default,TLoadTxnRollbackRequest" json:"request"` +type FrontendServiceConfirmUnusedRemoteFilesArgs struct { + Request *TConfirmUnusedRemoteFilesRequest `thrift:"request,1" frugal:"1,default,TConfirmUnusedRemoteFilesRequest" json:"request"` } -func NewFrontendServiceLoadTxnRollbackArgs() *FrontendServiceLoadTxnRollbackArgs { - return &FrontendServiceLoadTxnRollbackArgs{} +func NewFrontendServiceConfirmUnusedRemoteFilesArgs() *FrontendServiceConfirmUnusedRemoteFilesArgs { + return &FrontendServiceConfirmUnusedRemoteFilesArgs{} } -func (p *FrontendServiceLoadTxnRollbackArgs) InitDefault() { - *p = FrontendServiceLoadTxnRollbackArgs{} +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) InitDefault() { } -var FrontendServiceLoadTxnRollbackArgs_Request_DEFAULT *TLoadTxnRollbackRequest +var FrontendServiceConfirmUnusedRemoteFilesArgs_Request_DEFAULT *TConfirmUnusedRemoteFilesRequest -func (p *FrontendServiceLoadTxnRollbackArgs) GetRequest() (v *TLoadTxnRollbackRequest) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) GetRequest() (v *TConfirmUnusedRemoteFilesRequest) { if !p.IsSetRequest() { - return FrontendServiceLoadTxnRollbackArgs_Request_DEFAULT + return FrontendServiceConfirmUnusedRemoteFilesArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceLoadTxnRollbackArgs) SetRequest(val *TLoadTxnRollbackRequest) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) SetRequest(val *TConfirmUnusedRemoteFilesRequest) { p.Request = val } -var fieldIDToName_FrontendServiceLoadTxnRollbackArgs = map[int16]string{ +var fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceLoadTxnRollbackArgs) IsSetRequest() bool { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceLoadTxnRollbackArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -63175,17 +92467,14 @@ func (p *FrontendServiceLoadTxnRollbackArgs) Read(iprot thrift.TProtocol) (err e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -63200,7 +92489,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -63210,17 +92499,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTLoadTxnRollbackRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTConfirmUnusedRemoteFilesRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceLoadTxnRollbackArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnRollback_args"); err != nil { + if err = oprot.WriteStructBegin("confirmUnusedRemoteFiles_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -63228,7 +92518,6 @@ func (p *FrontendServiceLoadTxnRollbackArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -63247,7 +92536,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -63264,14 +92553,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackArgs) String() string { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnRollbackArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceConfirmUnusedRemoteFilesArgs(%+v)", *p) + } -func (p *FrontendServiceLoadTxnRollbackArgs) DeepEqual(ano *FrontendServiceLoadTxnRollbackArgs) bool { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) DeepEqual(ano *FrontendServiceConfirmUnusedRemoteFilesArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -63283,7 +92573,7 @@ func (p *FrontendServiceLoadTxnRollbackArgs) DeepEqual(ano *FrontendServiceLoadT return true } -func (p *FrontendServiceLoadTxnRollbackArgs) Field1DeepEqual(src *TLoadTxnRollbackRequest) bool { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Field1DeepEqual(src *TConfirmUnusedRemoteFilesRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -63291,39 +92581,38 @@ func (p *FrontendServiceLoadTxnRollbackArgs) Field1DeepEqual(src *TLoadTxnRollba return true } -type FrontendServiceLoadTxnRollbackResult struct { - Success *TLoadTxnRollbackResult_ `thrift:"success,0,optional" frugal:"0,optional,TLoadTxnRollbackResult_" json:"success,omitempty"` +type FrontendServiceConfirmUnusedRemoteFilesResult struct { + Success *TConfirmUnusedRemoteFilesResult_ `thrift:"success,0,optional" frugal:"0,optional,TConfirmUnusedRemoteFilesResult_" json:"success,omitempty"` } -func NewFrontendServiceLoadTxnRollbackResult() *FrontendServiceLoadTxnRollbackResult { - return &FrontendServiceLoadTxnRollbackResult{} +func NewFrontendServiceConfirmUnusedRemoteFilesResult() *FrontendServiceConfirmUnusedRemoteFilesResult { + return &FrontendServiceConfirmUnusedRemoteFilesResult{} } -func (p *FrontendServiceLoadTxnRollbackResult) InitDefault() { - *p = FrontendServiceLoadTxnRollbackResult{} +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) InitDefault() { } -var FrontendServiceLoadTxnRollbackResult_Success_DEFAULT *TLoadTxnRollbackResult_ +var FrontendServiceConfirmUnusedRemoteFilesResult_Success_DEFAULT *TConfirmUnusedRemoteFilesResult_ -func (p *FrontendServiceLoadTxnRollbackResult) GetSuccess() (v *TLoadTxnRollbackResult_) { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) GetSuccess() (v *TConfirmUnusedRemoteFilesResult_) { if !p.IsSetSuccess() { - return FrontendServiceLoadTxnRollbackResult_Success_DEFAULT + return FrontendServiceConfirmUnusedRemoteFilesResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceLoadTxnRollbackResult) SetSuccess(x interface{}) { - p.Success = x.(*TLoadTxnRollbackResult_) +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) SetSuccess(x interface{}) { + p.Success = x.(*TConfirmUnusedRemoteFilesResult_) } -var fieldIDToName_FrontendServiceLoadTxnRollbackResult = map[int16]string{ +var fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceLoadTxnRollbackResult) IsSetSuccess() bool { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceLoadTxnRollbackResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -63347,17 +92636,14 @@ func (p *FrontendServiceLoadTxnRollbackResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -63372,7 +92658,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -63382,17 +92668,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTLoadTxnRollbackResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTConfirmUnusedRemoteFilesResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceLoadTxnRollbackResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("loadTxnRollback_result"); err != nil { + if err = oprot.WriteStructBegin("confirmUnusedRemoteFiles_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -63400,7 +92687,6 @@ func (p *FrontendServiceLoadTxnRollbackResult) Write(oprot thrift.TProtocol) (er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -63419,7 +92705,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -63438,14 +92724,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackResult) String() string { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceLoadTxnRollbackResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceConfirmUnusedRemoteFilesResult(%+v)", *p) + } -func (p *FrontendServiceLoadTxnRollbackResult) DeepEqual(ano *FrontendServiceLoadTxnRollbackResult) bool { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) DeepEqual(ano *FrontendServiceConfirmUnusedRemoteFilesResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -63457,7 +92744,7 @@ func (p *FrontendServiceLoadTxnRollbackResult) DeepEqual(ano *FrontendServiceLoa return true } -func (p *FrontendServiceLoadTxnRollbackResult) Field0DeepEqual(src *TLoadTxnRollbackResult_) bool { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Field0DeepEqual(src *TConfirmUnusedRemoteFilesResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -63465,39 +92752,38 @@ func (p *FrontendServiceLoadTxnRollbackResult) Field0DeepEqual(src *TLoadTxnRoll return true } -type FrontendServiceBeginTxnArgs struct { - Request *TBeginTxnRequest `thrift:"request,1" frugal:"1,default,TBeginTxnRequest" json:"request"` +type FrontendServiceCheckAuthArgs struct { + Request *TCheckAuthRequest `thrift:"request,1" frugal:"1,default,TCheckAuthRequest" json:"request"` } -func NewFrontendServiceBeginTxnArgs() *FrontendServiceBeginTxnArgs { - return &FrontendServiceBeginTxnArgs{} +func NewFrontendServiceCheckAuthArgs() *FrontendServiceCheckAuthArgs { + return &FrontendServiceCheckAuthArgs{} } -func (p *FrontendServiceBeginTxnArgs) InitDefault() { - *p = FrontendServiceBeginTxnArgs{} +func (p *FrontendServiceCheckAuthArgs) InitDefault() { } -var FrontendServiceBeginTxnArgs_Request_DEFAULT *TBeginTxnRequest +var FrontendServiceCheckAuthArgs_Request_DEFAULT *TCheckAuthRequest -func (p *FrontendServiceBeginTxnArgs) GetRequest() (v *TBeginTxnRequest) { +func (p *FrontendServiceCheckAuthArgs) GetRequest() (v *TCheckAuthRequest) { if !p.IsSetRequest() { - return FrontendServiceBeginTxnArgs_Request_DEFAULT + return FrontendServiceCheckAuthArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceBeginTxnArgs) SetRequest(val *TBeginTxnRequest) { +func (p *FrontendServiceCheckAuthArgs) SetRequest(val *TCheckAuthRequest) { p.Request = val } -var fieldIDToName_FrontendServiceBeginTxnArgs = map[int16]string{ +var fieldIDToName_FrontendServiceCheckAuthArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceBeginTxnArgs) IsSetRequest() bool { +func (p *FrontendServiceCheckAuthArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceBeginTxnArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckAuthArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -63521,17 +92807,14 @@ func (p *FrontendServiceBeginTxnArgs) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -63546,7 +92829,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -63556,17 +92839,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceBeginTxnArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTBeginTxnRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceCheckAuthArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTCheckAuthRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceBeginTxnArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckAuthArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("beginTxn_args"); err != nil { + if err = oprot.WriteStructBegin("checkAuth_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -63574,7 +92858,6 @@ func (p *FrontendServiceBeginTxnArgs) Write(oprot thrift.TProtocol) (err error) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -63593,7 +92876,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceBeginTxnArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckAuthArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -63610,14 +92893,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceBeginTxnArgs) String() string { +func (p *FrontendServiceCheckAuthArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceBeginTxnArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceCheckAuthArgs(%+v)", *p) + } -func (p *FrontendServiceBeginTxnArgs) DeepEqual(ano *FrontendServiceBeginTxnArgs) bool { +func (p *FrontendServiceCheckAuthArgs) DeepEqual(ano *FrontendServiceCheckAuthArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -63629,7 +92913,7 @@ func (p *FrontendServiceBeginTxnArgs) DeepEqual(ano *FrontendServiceBeginTxnArgs return true } -func (p *FrontendServiceBeginTxnArgs) Field1DeepEqual(src *TBeginTxnRequest) bool { +func (p *FrontendServiceCheckAuthArgs) Field1DeepEqual(src *TCheckAuthRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -63637,39 +92921,38 @@ func (p *FrontendServiceBeginTxnArgs) Field1DeepEqual(src *TBeginTxnRequest) boo return true } -type FrontendServiceBeginTxnResult struct { - Success *TBeginTxnResult_ `thrift:"success,0,optional" frugal:"0,optional,TBeginTxnResult_" json:"success,omitempty"` +type FrontendServiceCheckAuthResult struct { + Success *TCheckAuthResult_ `thrift:"success,0,optional" frugal:"0,optional,TCheckAuthResult_" json:"success,omitempty"` } -func NewFrontendServiceBeginTxnResult() *FrontendServiceBeginTxnResult { - return &FrontendServiceBeginTxnResult{} +func NewFrontendServiceCheckAuthResult() *FrontendServiceCheckAuthResult { + return &FrontendServiceCheckAuthResult{} } -func (p *FrontendServiceBeginTxnResult) InitDefault() { - *p = FrontendServiceBeginTxnResult{} +func (p *FrontendServiceCheckAuthResult) InitDefault() { } -var FrontendServiceBeginTxnResult_Success_DEFAULT *TBeginTxnResult_ +var FrontendServiceCheckAuthResult_Success_DEFAULT *TCheckAuthResult_ -func (p *FrontendServiceBeginTxnResult) GetSuccess() (v *TBeginTxnResult_) { +func (p *FrontendServiceCheckAuthResult) GetSuccess() (v *TCheckAuthResult_) { if !p.IsSetSuccess() { - return FrontendServiceBeginTxnResult_Success_DEFAULT + return FrontendServiceCheckAuthResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceBeginTxnResult) SetSuccess(x interface{}) { - p.Success = x.(*TBeginTxnResult_) +func (p *FrontendServiceCheckAuthResult) SetSuccess(x interface{}) { + p.Success = x.(*TCheckAuthResult_) } -var fieldIDToName_FrontendServiceBeginTxnResult = map[int16]string{ +var fieldIDToName_FrontendServiceCheckAuthResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceBeginTxnResult) IsSetSuccess() bool { +func (p *FrontendServiceCheckAuthResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceBeginTxnResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckAuthResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -63693,17 +92976,14 @@ func (p *FrontendServiceBeginTxnResult) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -63718,7 +92998,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -63728,17 +93008,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceBeginTxnResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTBeginTxnResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceCheckAuthResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTCheckAuthResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceBeginTxnResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckAuthResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("beginTxn_result"); err != nil { + if err = oprot.WriteStructBegin("checkAuth_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -63746,7 +93027,6 @@ func (p *FrontendServiceBeginTxnResult) Write(oprot thrift.TProtocol) (err error fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -63765,7 +93045,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceBeginTxnResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCheckAuthResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -63784,14 +93064,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceBeginTxnResult) String() string { +func (p *FrontendServiceCheckAuthResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceBeginTxnResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceCheckAuthResult(%+v)", *p) + } -func (p *FrontendServiceBeginTxnResult) DeepEqual(ano *FrontendServiceBeginTxnResult) bool { +func (p *FrontendServiceCheckAuthResult) DeepEqual(ano *FrontendServiceCheckAuthResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -63803,7 +93084,7 @@ func (p *FrontendServiceBeginTxnResult) DeepEqual(ano *FrontendServiceBeginTxnRe return true } -func (p *FrontendServiceBeginTxnResult) Field0DeepEqual(src *TBeginTxnResult_) bool { +func (p *FrontendServiceCheckAuthResult) Field0DeepEqual(src *TCheckAuthResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -63811,39 +93092,38 @@ func (p *FrontendServiceBeginTxnResult) Field0DeepEqual(src *TBeginTxnResult_) b return true } -type FrontendServiceCommitTxnArgs struct { - Request *TCommitTxnRequest `thrift:"request,1" frugal:"1,default,TCommitTxnRequest" json:"request"` +type FrontendServiceGetQueryStatsArgs struct { + Request *TGetQueryStatsRequest `thrift:"request,1" frugal:"1,default,TGetQueryStatsRequest" json:"request"` } -func NewFrontendServiceCommitTxnArgs() *FrontendServiceCommitTxnArgs { - return &FrontendServiceCommitTxnArgs{} +func NewFrontendServiceGetQueryStatsArgs() *FrontendServiceGetQueryStatsArgs { + return &FrontendServiceGetQueryStatsArgs{} } -func (p *FrontendServiceCommitTxnArgs) InitDefault() { - *p = FrontendServiceCommitTxnArgs{} +func (p *FrontendServiceGetQueryStatsArgs) InitDefault() { } -var FrontendServiceCommitTxnArgs_Request_DEFAULT *TCommitTxnRequest +var FrontendServiceGetQueryStatsArgs_Request_DEFAULT *TGetQueryStatsRequest -func (p *FrontendServiceCommitTxnArgs) GetRequest() (v *TCommitTxnRequest) { +func (p *FrontendServiceGetQueryStatsArgs) GetRequest() (v *TGetQueryStatsRequest) { if !p.IsSetRequest() { - return FrontendServiceCommitTxnArgs_Request_DEFAULT + return FrontendServiceGetQueryStatsArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceCommitTxnArgs) SetRequest(val *TCommitTxnRequest) { +func (p *FrontendServiceGetQueryStatsArgs) SetRequest(val *TGetQueryStatsRequest) { p.Request = val } -var fieldIDToName_FrontendServiceCommitTxnArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetQueryStatsArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceCommitTxnArgs) IsSetRequest() bool { +func (p *FrontendServiceGetQueryStatsArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceCommitTxnArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetQueryStatsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -63867,17 +93147,14 @@ func (p *FrontendServiceCommitTxnArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -63892,7 +93169,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -63902,17 +93179,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCommitTxnArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTCommitTxnRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetQueryStatsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetQueryStatsRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceCommitTxnArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetQueryStatsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("commitTxn_args"); err != nil { + if err = oprot.WriteStructBegin("getQueryStats_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -63920,7 +93198,6 @@ func (p *FrontendServiceCommitTxnArgs) Write(oprot thrift.TProtocol) (err error) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -63939,7 +93216,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceCommitTxnArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetQueryStatsArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -63956,14 +93233,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceCommitTxnArgs) String() string { +func (p *FrontendServiceGetQueryStatsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceCommitTxnArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetQueryStatsArgs(%+v)", *p) + } -func (p *FrontendServiceCommitTxnArgs) DeepEqual(ano *FrontendServiceCommitTxnArgs) bool { +func (p *FrontendServiceGetQueryStatsArgs) DeepEqual(ano *FrontendServiceGetQueryStatsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -63975,7 +93253,7 @@ func (p *FrontendServiceCommitTxnArgs) DeepEqual(ano *FrontendServiceCommitTxnAr return true } -func (p *FrontendServiceCommitTxnArgs) Field1DeepEqual(src *TCommitTxnRequest) bool { +func (p *FrontendServiceGetQueryStatsArgs) Field1DeepEqual(src *TGetQueryStatsRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -63983,39 +93261,38 @@ func (p *FrontendServiceCommitTxnArgs) Field1DeepEqual(src *TCommitTxnRequest) b return true } -type FrontendServiceCommitTxnResult struct { - Success *TCommitTxnResult_ `thrift:"success,0,optional" frugal:"0,optional,TCommitTxnResult_" json:"success,omitempty"` +type FrontendServiceGetQueryStatsResult struct { + Success *TQueryStatsResult_ `thrift:"success,0,optional" frugal:"0,optional,TQueryStatsResult_" json:"success,omitempty"` } -func NewFrontendServiceCommitTxnResult() *FrontendServiceCommitTxnResult { - return &FrontendServiceCommitTxnResult{} +func NewFrontendServiceGetQueryStatsResult() *FrontendServiceGetQueryStatsResult { + return &FrontendServiceGetQueryStatsResult{} } -func (p *FrontendServiceCommitTxnResult) InitDefault() { - *p = FrontendServiceCommitTxnResult{} +func (p *FrontendServiceGetQueryStatsResult) InitDefault() { } -var FrontendServiceCommitTxnResult_Success_DEFAULT *TCommitTxnResult_ +var FrontendServiceGetQueryStatsResult_Success_DEFAULT *TQueryStatsResult_ -func (p *FrontendServiceCommitTxnResult) GetSuccess() (v *TCommitTxnResult_) { +func (p *FrontendServiceGetQueryStatsResult) GetSuccess() (v *TQueryStatsResult_) { if !p.IsSetSuccess() { - return FrontendServiceCommitTxnResult_Success_DEFAULT + return FrontendServiceGetQueryStatsResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceCommitTxnResult) SetSuccess(x interface{}) { - p.Success = x.(*TCommitTxnResult_) +func (p *FrontendServiceGetQueryStatsResult) SetSuccess(x interface{}) { + p.Success = x.(*TQueryStatsResult_) } -var fieldIDToName_FrontendServiceCommitTxnResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetQueryStatsResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceCommitTxnResult) IsSetSuccess() bool { +func (p *FrontendServiceGetQueryStatsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceCommitTxnResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetQueryStatsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -64039,17 +93316,14 @@ func (p *FrontendServiceCommitTxnResult) Read(iprot thrift.TProtocol) (err error if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -64064,7 +93338,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -64074,17 +93348,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCommitTxnResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTCommitTxnResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetQueryStatsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTQueryStatsResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceCommitTxnResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetQueryStatsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("commitTxn_result"); err != nil { + if err = oprot.WriteStructBegin("getQueryStats_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -64092,7 +93367,6 @@ func (p *FrontendServiceCommitTxnResult) Write(oprot thrift.TProtocol) (err erro fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -64111,7 +93385,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceCommitTxnResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetQueryStatsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -64130,14 +93404,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceCommitTxnResult) String() string { +func (p *FrontendServiceGetQueryStatsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceCommitTxnResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetQueryStatsResult(%+v)", *p) + } -func (p *FrontendServiceCommitTxnResult) DeepEqual(ano *FrontendServiceCommitTxnResult) bool { +func (p *FrontendServiceGetQueryStatsResult) DeepEqual(ano *FrontendServiceGetQueryStatsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -64149,7 +93424,7 @@ func (p *FrontendServiceCommitTxnResult) DeepEqual(ano *FrontendServiceCommitTxn return true } -func (p *FrontendServiceCommitTxnResult) Field0DeepEqual(src *TCommitTxnResult_) bool { +func (p *FrontendServiceGetQueryStatsResult) Field0DeepEqual(src *TQueryStatsResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -64157,39 +93432,38 @@ func (p *FrontendServiceCommitTxnResult) Field0DeepEqual(src *TCommitTxnResult_) return true } -type FrontendServiceRollbackTxnArgs struct { - Request *TRollbackTxnRequest `thrift:"request,1" frugal:"1,default,TRollbackTxnRequest" json:"request"` +type FrontendServiceGetTabletReplicaInfosArgs struct { + Request *TGetTabletReplicaInfosRequest `thrift:"request,1" frugal:"1,default,TGetTabletReplicaInfosRequest" json:"request"` } -func NewFrontendServiceRollbackTxnArgs() *FrontendServiceRollbackTxnArgs { - return &FrontendServiceRollbackTxnArgs{} +func NewFrontendServiceGetTabletReplicaInfosArgs() *FrontendServiceGetTabletReplicaInfosArgs { + return &FrontendServiceGetTabletReplicaInfosArgs{} } -func (p *FrontendServiceRollbackTxnArgs) InitDefault() { - *p = FrontendServiceRollbackTxnArgs{} +func (p *FrontendServiceGetTabletReplicaInfosArgs) InitDefault() { } -var FrontendServiceRollbackTxnArgs_Request_DEFAULT *TRollbackTxnRequest +var FrontendServiceGetTabletReplicaInfosArgs_Request_DEFAULT *TGetTabletReplicaInfosRequest -func (p *FrontendServiceRollbackTxnArgs) GetRequest() (v *TRollbackTxnRequest) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) GetRequest() (v *TGetTabletReplicaInfosRequest) { if !p.IsSetRequest() { - return FrontendServiceRollbackTxnArgs_Request_DEFAULT + return FrontendServiceGetTabletReplicaInfosArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceRollbackTxnArgs) SetRequest(val *TRollbackTxnRequest) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) SetRequest(val *TGetTabletReplicaInfosRequest) { p.Request = val } -var fieldIDToName_FrontendServiceRollbackTxnArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetTabletReplicaInfosArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceRollbackTxnArgs) IsSetRequest() bool { +func (p *FrontendServiceGetTabletReplicaInfosArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceRollbackTxnArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -64213,17 +93487,14 @@ func (p *FrontendServiceRollbackTxnArgs) Read(iprot thrift.TProtocol) (err error if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -64238,7 +93509,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -64248,17 +93519,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRollbackTxnArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTRollbackTxnRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetTabletReplicaInfosArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetTabletReplicaInfosRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceRollbackTxnArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("rollbackTxn_args"); err != nil { + if err = oprot.WriteStructBegin("getTabletReplicaInfos_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -64266,7 +93538,6 @@ func (p *FrontendServiceRollbackTxnArgs) Write(oprot thrift.TProtocol) (err erro fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -64285,7 +93556,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceRollbackTxnArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -64302,14 +93573,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceRollbackTxnArgs) String() string { +func (p *FrontendServiceGetTabletReplicaInfosArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceRollbackTxnArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetTabletReplicaInfosArgs(%+v)", *p) + } -func (p *FrontendServiceRollbackTxnArgs) DeepEqual(ano *FrontendServiceRollbackTxnArgs) bool { +func (p *FrontendServiceGetTabletReplicaInfosArgs) DeepEqual(ano *FrontendServiceGetTabletReplicaInfosArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -64321,7 +93593,7 @@ func (p *FrontendServiceRollbackTxnArgs) DeepEqual(ano *FrontendServiceRollbackT return true } -func (p *FrontendServiceRollbackTxnArgs) Field1DeepEqual(src *TRollbackTxnRequest) bool { +func (p *FrontendServiceGetTabletReplicaInfosArgs) Field1DeepEqual(src *TGetTabletReplicaInfosRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -64329,39 +93601,38 @@ func (p *FrontendServiceRollbackTxnArgs) Field1DeepEqual(src *TRollbackTxnReques return true } -type FrontendServiceRollbackTxnResult struct { - Success *TRollbackTxnResult_ `thrift:"success,0,optional" frugal:"0,optional,TRollbackTxnResult_" json:"success,omitempty"` +type FrontendServiceGetTabletReplicaInfosResult struct { + Success *TGetTabletReplicaInfosResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetTabletReplicaInfosResult_" json:"success,omitempty"` } -func NewFrontendServiceRollbackTxnResult() *FrontendServiceRollbackTxnResult { - return &FrontendServiceRollbackTxnResult{} +func NewFrontendServiceGetTabletReplicaInfosResult() *FrontendServiceGetTabletReplicaInfosResult { + return &FrontendServiceGetTabletReplicaInfosResult{} } -func (p *FrontendServiceRollbackTxnResult) InitDefault() { - *p = FrontendServiceRollbackTxnResult{} +func (p *FrontendServiceGetTabletReplicaInfosResult) InitDefault() { } -var FrontendServiceRollbackTxnResult_Success_DEFAULT *TRollbackTxnResult_ +var FrontendServiceGetTabletReplicaInfosResult_Success_DEFAULT *TGetTabletReplicaInfosResult_ -func (p *FrontendServiceRollbackTxnResult) GetSuccess() (v *TRollbackTxnResult_) { +func (p *FrontendServiceGetTabletReplicaInfosResult) GetSuccess() (v *TGetTabletReplicaInfosResult_) { if !p.IsSetSuccess() { - return FrontendServiceRollbackTxnResult_Success_DEFAULT + return FrontendServiceGetTabletReplicaInfosResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceRollbackTxnResult) SetSuccess(x interface{}) { - p.Success = x.(*TRollbackTxnResult_) +func (p *FrontendServiceGetTabletReplicaInfosResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetTabletReplicaInfosResult_) } -var fieldIDToName_FrontendServiceRollbackTxnResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetTabletReplicaInfosResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceRollbackTxnResult) IsSetSuccess() bool { +func (p *FrontendServiceGetTabletReplicaInfosResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceRollbackTxnResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTabletReplicaInfosResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -64385,17 +93656,14 @@ func (p *FrontendServiceRollbackTxnResult) Read(iprot thrift.TProtocol) (err err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -64410,7 +93678,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -64420,17 +93688,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRollbackTxnResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTRollbackTxnResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetTabletReplicaInfosResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetTabletReplicaInfosResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceRollbackTxnResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTabletReplicaInfosResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("rollbackTxn_result"); err != nil { + if err = oprot.WriteStructBegin("getTabletReplicaInfos_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -64438,7 +93707,6 @@ func (p *FrontendServiceRollbackTxnResult) Write(oprot thrift.TProtocol) (err er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -64457,7 +93725,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceRollbackTxnResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetTabletReplicaInfosResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -64476,14 +93744,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceRollbackTxnResult) String() string { +func (p *FrontendServiceGetTabletReplicaInfosResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceRollbackTxnResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetTabletReplicaInfosResult(%+v)", *p) + } -func (p *FrontendServiceRollbackTxnResult) DeepEqual(ano *FrontendServiceRollbackTxnResult) bool { +func (p *FrontendServiceGetTabletReplicaInfosResult) DeepEqual(ano *FrontendServiceGetTabletReplicaInfosResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -64495,7 +93764,7 @@ func (p *FrontendServiceRollbackTxnResult) DeepEqual(ano *FrontendServiceRollbac return true } -func (p *FrontendServiceRollbackTxnResult) Field0DeepEqual(src *TRollbackTxnResult_) bool { +func (p *FrontendServiceGetTabletReplicaInfosResult) Field0DeepEqual(src *TGetTabletReplicaInfosResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -64503,39 +93772,38 @@ func (p *FrontendServiceRollbackTxnResult) Field0DeepEqual(src *TRollbackTxnResu return true } -type FrontendServiceGetBinlogArgs struct { - Request *TGetBinlogRequest `thrift:"request,1" frugal:"1,default,TGetBinlogRequest" json:"request"` +type FrontendServiceAddPlsqlStoredProcedureArgs struct { + Request *TAddPlsqlStoredProcedureRequest `thrift:"request,1" frugal:"1,default,TAddPlsqlStoredProcedureRequest" json:"request"` } -func NewFrontendServiceGetBinlogArgs() *FrontendServiceGetBinlogArgs { - return &FrontendServiceGetBinlogArgs{} +func NewFrontendServiceAddPlsqlStoredProcedureArgs() *FrontendServiceAddPlsqlStoredProcedureArgs { + return &FrontendServiceAddPlsqlStoredProcedureArgs{} } -func (p *FrontendServiceGetBinlogArgs) InitDefault() { - *p = FrontendServiceGetBinlogArgs{} +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) InitDefault() { } -var FrontendServiceGetBinlogArgs_Request_DEFAULT *TGetBinlogRequest +var FrontendServiceAddPlsqlStoredProcedureArgs_Request_DEFAULT *TAddPlsqlStoredProcedureRequest -func (p *FrontendServiceGetBinlogArgs) GetRequest() (v *TGetBinlogRequest) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) GetRequest() (v *TAddPlsqlStoredProcedureRequest) { if !p.IsSetRequest() { - return FrontendServiceGetBinlogArgs_Request_DEFAULT + return FrontendServiceAddPlsqlStoredProcedureArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetBinlogArgs) SetRequest(val *TGetBinlogRequest) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) SetRequest(val *TAddPlsqlStoredProcedureRequest) { p.Request = val } -var fieldIDToName_FrontendServiceGetBinlogArgs = map[int16]string{ +var fieldIDToName_FrontendServiceAddPlsqlStoredProcedureArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetBinlogArgs) IsSetRequest() bool { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetBinlogArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -64559,17 +93827,14 @@ func (p *FrontendServiceGetBinlogArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -64584,7 +93849,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlStoredProcedureArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -64594,17 +93859,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTGetBinlogRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTAddPlsqlStoredProcedureRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetBinlogArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getBinlog_args"); err != nil { + if err = oprot.WriteStructBegin("addPlsqlStoredProcedure_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -64612,7 +93878,6 @@ func (p *FrontendServiceGetBinlogArgs) Write(oprot thrift.TProtocol) (err error) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -64631,7 +93896,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -64648,14 +93913,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetBinlogArgs) String() string { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetBinlogArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceAddPlsqlStoredProcedureArgs(%+v)", *p) + } -func (p *FrontendServiceGetBinlogArgs) DeepEqual(ano *FrontendServiceGetBinlogArgs) bool { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) DeepEqual(ano *FrontendServiceAddPlsqlStoredProcedureArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -64667,7 +93933,7 @@ func (p *FrontendServiceGetBinlogArgs) DeepEqual(ano *FrontendServiceGetBinlogAr return true } -func (p *FrontendServiceGetBinlogArgs) Field1DeepEqual(src *TGetBinlogRequest) bool { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) Field1DeepEqual(src *TAddPlsqlStoredProcedureRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -64675,39 +93941,38 @@ func (p *FrontendServiceGetBinlogArgs) Field1DeepEqual(src *TGetBinlogRequest) b return true } -type FrontendServiceGetBinlogResult struct { - Success *TGetBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetBinlogResult_" json:"success,omitempty"` +type FrontendServiceAddPlsqlStoredProcedureResult struct { + Success *TPlsqlStoredProcedureResult_ `thrift:"success,0,optional" frugal:"0,optional,TPlsqlStoredProcedureResult_" json:"success,omitempty"` } -func NewFrontendServiceGetBinlogResult() *FrontendServiceGetBinlogResult { - return &FrontendServiceGetBinlogResult{} +func NewFrontendServiceAddPlsqlStoredProcedureResult() *FrontendServiceAddPlsqlStoredProcedureResult { + return &FrontendServiceAddPlsqlStoredProcedureResult{} } -func (p *FrontendServiceGetBinlogResult) InitDefault() { - *p = FrontendServiceGetBinlogResult{} +func (p *FrontendServiceAddPlsqlStoredProcedureResult) InitDefault() { } -var FrontendServiceGetBinlogResult_Success_DEFAULT *TGetBinlogResult_ +var FrontendServiceAddPlsqlStoredProcedureResult_Success_DEFAULT *TPlsqlStoredProcedureResult_ -func (p *FrontendServiceGetBinlogResult) GetSuccess() (v *TGetBinlogResult_) { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) GetSuccess() (v *TPlsqlStoredProcedureResult_) { if !p.IsSetSuccess() { - return FrontendServiceGetBinlogResult_Success_DEFAULT + return FrontendServiceAddPlsqlStoredProcedureResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetBinlogResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetBinlogResult_) +func (p *FrontendServiceAddPlsqlStoredProcedureResult) SetSuccess(x interface{}) { + p.Success = x.(*TPlsqlStoredProcedureResult_) } -var fieldIDToName_FrontendServiceGetBinlogResult = map[int16]string{ +var fieldIDToName_FrontendServiceAddPlsqlStoredProcedureResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetBinlogResult) IsSetSuccess() bool { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetBinlogResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -64731,17 +93996,14 @@ func (p *FrontendServiceGetBinlogResult) Read(iprot thrift.TProtocol) (err error if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -64756,7 +94018,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlStoredProcedureResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -64766,17 +94028,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetBinlogResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTPlsqlStoredProcedureResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetBinlogResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getBinlog_result"); err != nil { + if err = oprot.WriteStructBegin("addPlsqlStoredProcedure_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -64784,7 +94047,6 @@ func (p *FrontendServiceGetBinlogResult) Write(oprot thrift.TProtocol) (err erro fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -64803,7 +94065,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -64822,14 +94084,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetBinlogResult) String() string { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetBinlogResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceAddPlsqlStoredProcedureResult(%+v)", *p) + } -func (p *FrontendServiceGetBinlogResult) DeepEqual(ano *FrontendServiceGetBinlogResult) bool { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) DeepEqual(ano *FrontendServiceAddPlsqlStoredProcedureResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -64841,7 +94104,7 @@ func (p *FrontendServiceGetBinlogResult) DeepEqual(ano *FrontendServiceGetBinlog return true } -func (p *FrontendServiceGetBinlogResult) Field0DeepEqual(src *TGetBinlogResult_) bool { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) Field0DeepEqual(src *TPlsqlStoredProcedureResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -64849,39 +94112,38 @@ func (p *FrontendServiceGetBinlogResult) Field0DeepEqual(src *TGetBinlogResult_) return true } -type FrontendServiceGetSnapshotArgs struct { - Request *TGetSnapshotRequest `thrift:"request,1" frugal:"1,default,TGetSnapshotRequest" json:"request"` +type FrontendServiceDropPlsqlStoredProcedureArgs struct { + Request *TDropPlsqlStoredProcedureRequest `thrift:"request,1" frugal:"1,default,TDropPlsqlStoredProcedureRequest" json:"request"` } -func NewFrontendServiceGetSnapshotArgs() *FrontendServiceGetSnapshotArgs { - return &FrontendServiceGetSnapshotArgs{} +func NewFrontendServiceDropPlsqlStoredProcedureArgs() *FrontendServiceDropPlsqlStoredProcedureArgs { + return &FrontendServiceDropPlsqlStoredProcedureArgs{} } -func (p *FrontendServiceGetSnapshotArgs) InitDefault() { - *p = FrontendServiceGetSnapshotArgs{} +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) InitDefault() { } -var FrontendServiceGetSnapshotArgs_Request_DEFAULT *TGetSnapshotRequest +var FrontendServiceDropPlsqlStoredProcedureArgs_Request_DEFAULT *TDropPlsqlStoredProcedureRequest -func (p *FrontendServiceGetSnapshotArgs) GetRequest() (v *TGetSnapshotRequest) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) GetRequest() (v *TDropPlsqlStoredProcedureRequest) { if !p.IsSetRequest() { - return FrontendServiceGetSnapshotArgs_Request_DEFAULT + return FrontendServiceDropPlsqlStoredProcedureArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetSnapshotArgs) SetRequest(val *TGetSnapshotRequest) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) SetRequest(val *TDropPlsqlStoredProcedureRequest) { p.Request = val } -var fieldIDToName_FrontendServiceGetSnapshotArgs = map[int16]string{ +var fieldIDToName_FrontendServiceDropPlsqlStoredProcedureArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetSnapshotArgs) IsSetRequest() bool { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -64905,17 +94167,14 @@ func (p *FrontendServiceGetSnapshotArgs) Read(iprot thrift.TProtocol) (err error if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -64930,7 +94189,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlStoredProcedureArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -64940,17 +94199,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTGetSnapshotRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTDropPlsqlStoredProcedureRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getSnapshot_args"); err != nil { + if err = oprot.WriteStructBegin("dropPlsqlStoredProcedure_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -64958,7 +94218,6 @@ func (p *FrontendServiceGetSnapshotArgs) Write(oprot thrift.TProtocol) (err erro fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -64977,7 +94236,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -64994,14 +94253,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetSnapshotArgs) String() string { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetSnapshotArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceDropPlsqlStoredProcedureArgs(%+v)", *p) + } -func (p *FrontendServiceGetSnapshotArgs) DeepEqual(ano *FrontendServiceGetSnapshotArgs) bool { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) DeepEqual(ano *FrontendServiceDropPlsqlStoredProcedureArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -65013,7 +94273,7 @@ func (p *FrontendServiceGetSnapshotArgs) DeepEqual(ano *FrontendServiceGetSnapsh return true } -func (p *FrontendServiceGetSnapshotArgs) Field1DeepEqual(src *TGetSnapshotRequest) bool { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) Field1DeepEqual(src *TDropPlsqlStoredProcedureRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -65021,39 +94281,38 @@ func (p *FrontendServiceGetSnapshotArgs) Field1DeepEqual(src *TGetSnapshotReques return true } -type FrontendServiceGetSnapshotResult struct { - Success *TGetSnapshotResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetSnapshotResult_" json:"success,omitempty"` +type FrontendServiceDropPlsqlStoredProcedureResult struct { + Success *TPlsqlStoredProcedureResult_ `thrift:"success,0,optional" frugal:"0,optional,TPlsqlStoredProcedureResult_" json:"success,omitempty"` } -func NewFrontendServiceGetSnapshotResult() *FrontendServiceGetSnapshotResult { - return &FrontendServiceGetSnapshotResult{} +func NewFrontendServiceDropPlsqlStoredProcedureResult() *FrontendServiceDropPlsqlStoredProcedureResult { + return &FrontendServiceDropPlsqlStoredProcedureResult{} } -func (p *FrontendServiceGetSnapshotResult) InitDefault() { - *p = FrontendServiceGetSnapshotResult{} +func (p *FrontendServiceDropPlsqlStoredProcedureResult) InitDefault() { } -var FrontendServiceGetSnapshotResult_Success_DEFAULT *TGetSnapshotResult_ +var FrontendServiceDropPlsqlStoredProcedureResult_Success_DEFAULT *TPlsqlStoredProcedureResult_ -func (p *FrontendServiceGetSnapshotResult) GetSuccess() (v *TGetSnapshotResult_) { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) GetSuccess() (v *TPlsqlStoredProcedureResult_) { if !p.IsSetSuccess() { - return FrontendServiceGetSnapshotResult_Success_DEFAULT + return FrontendServiceDropPlsqlStoredProcedureResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetSnapshotResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetSnapshotResult_) +func (p *FrontendServiceDropPlsqlStoredProcedureResult) SetSuccess(x interface{}) { + p.Success = x.(*TPlsqlStoredProcedureResult_) } -var fieldIDToName_FrontendServiceGetSnapshotResult = map[int16]string{ +var fieldIDToName_FrontendServiceDropPlsqlStoredProcedureResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetSnapshotResult) IsSetSuccess() bool { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetSnapshotResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -65077,17 +94336,14 @@ func (p *FrontendServiceGetSnapshotResult) Read(iprot thrift.TProtocol) (err err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -65102,7 +94358,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlStoredProcedureResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -65112,17 +94368,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetSnapshotResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetSnapshotResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTPlsqlStoredProcedureResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetSnapshotResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getSnapshot_result"); err != nil { + if err = oprot.WriteStructBegin("dropPlsqlStoredProcedure_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -65130,7 +94387,6 @@ func (p *FrontendServiceGetSnapshotResult) Write(oprot thrift.TProtocol) (err er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -65149,7 +94405,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -65168,14 +94424,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetSnapshotResult) String() string { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetSnapshotResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceDropPlsqlStoredProcedureResult(%+v)", *p) + } -func (p *FrontendServiceGetSnapshotResult) DeepEqual(ano *FrontendServiceGetSnapshotResult) bool { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) DeepEqual(ano *FrontendServiceDropPlsqlStoredProcedureResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -65187,7 +94444,7 @@ func (p *FrontendServiceGetSnapshotResult) DeepEqual(ano *FrontendServiceGetSnap return true } -func (p *FrontendServiceGetSnapshotResult) Field0DeepEqual(src *TGetSnapshotResult_) bool { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) Field0DeepEqual(src *TPlsqlStoredProcedureResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -65195,39 +94452,38 @@ func (p *FrontendServiceGetSnapshotResult) Field0DeepEqual(src *TGetSnapshotResu return true } -type FrontendServiceRestoreSnapshotArgs struct { - Request *TRestoreSnapshotRequest `thrift:"request,1" frugal:"1,default,TRestoreSnapshotRequest" json:"request"` +type FrontendServiceAddPlsqlPackageArgs struct { + Request *TAddPlsqlPackageRequest `thrift:"request,1" frugal:"1,default,TAddPlsqlPackageRequest" json:"request"` } -func NewFrontendServiceRestoreSnapshotArgs() *FrontendServiceRestoreSnapshotArgs { - return &FrontendServiceRestoreSnapshotArgs{} +func NewFrontendServiceAddPlsqlPackageArgs() *FrontendServiceAddPlsqlPackageArgs { + return &FrontendServiceAddPlsqlPackageArgs{} } -func (p *FrontendServiceRestoreSnapshotArgs) InitDefault() { - *p = FrontendServiceRestoreSnapshotArgs{} +func (p *FrontendServiceAddPlsqlPackageArgs) InitDefault() { } -var FrontendServiceRestoreSnapshotArgs_Request_DEFAULT *TRestoreSnapshotRequest +var FrontendServiceAddPlsqlPackageArgs_Request_DEFAULT *TAddPlsqlPackageRequest -func (p *FrontendServiceRestoreSnapshotArgs) GetRequest() (v *TRestoreSnapshotRequest) { +func (p *FrontendServiceAddPlsqlPackageArgs) GetRequest() (v *TAddPlsqlPackageRequest) { if !p.IsSetRequest() { - return FrontendServiceRestoreSnapshotArgs_Request_DEFAULT + return FrontendServiceAddPlsqlPackageArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceRestoreSnapshotArgs) SetRequest(val *TRestoreSnapshotRequest) { +func (p *FrontendServiceAddPlsqlPackageArgs) SetRequest(val *TAddPlsqlPackageRequest) { p.Request = val } -var fieldIDToName_FrontendServiceRestoreSnapshotArgs = map[int16]string{ +var fieldIDToName_FrontendServiceAddPlsqlPackageArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceRestoreSnapshotArgs) IsSetRequest() bool { +func (p *FrontendServiceAddPlsqlPackageArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceRestoreSnapshotArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlPackageArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -65251,17 +94507,14 @@ func (p *FrontendServiceRestoreSnapshotArgs) Read(iprot thrift.TProtocol) (err e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -65276,7 +94529,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlPackageArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -65286,17 +94539,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTRestoreSnapshotRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceAddPlsqlPackageArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTAddPlsqlPackageRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceRestoreSnapshotArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlPackageArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("restoreSnapshot_args"); err != nil { + if err = oprot.WriteStructBegin("addPlsqlPackage_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -65304,7 +94558,6 @@ func (p *FrontendServiceRestoreSnapshotArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -65323,7 +94576,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlPackageArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -65340,14 +94593,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotArgs) String() string { +func (p *FrontendServiceAddPlsqlPackageArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceRestoreSnapshotArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceAddPlsqlPackageArgs(%+v)", *p) + } -func (p *FrontendServiceRestoreSnapshotArgs) DeepEqual(ano *FrontendServiceRestoreSnapshotArgs) bool { +func (p *FrontendServiceAddPlsqlPackageArgs) DeepEqual(ano *FrontendServiceAddPlsqlPackageArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -65359,7 +94613,7 @@ func (p *FrontendServiceRestoreSnapshotArgs) DeepEqual(ano *FrontendServiceResto return true } -func (p *FrontendServiceRestoreSnapshotArgs) Field1DeepEqual(src *TRestoreSnapshotRequest) bool { +func (p *FrontendServiceAddPlsqlPackageArgs) Field1DeepEqual(src *TAddPlsqlPackageRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -65367,39 +94621,38 @@ func (p *FrontendServiceRestoreSnapshotArgs) Field1DeepEqual(src *TRestoreSnapsh return true } -type FrontendServiceRestoreSnapshotResult struct { - Success *TRestoreSnapshotResult_ `thrift:"success,0,optional" frugal:"0,optional,TRestoreSnapshotResult_" json:"success,omitempty"` +type FrontendServiceAddPlsqlPackageResult struct { + Success *TPlsqlPackageResult_ `thrift:"success,0,optional" frugal:"0,optional,TPlsqlPackageResult_" json:"success,omitempty"` } -func NewFrontendServiceRestoreSnapshotResult() *FrontendServiceRestoreSnapshotResult { - return &FrontendServiceRestoreSnapshotResult{} +func NewFrontendServiceAddPlsqlPackageResult() *FrontendServiceAddPlsqlPackageResult { + return &FrontendServiceAddPlsqlPackageResult{} } -func (p *FrontendServiceRestoreSnapshotResult) InitDefault() { - *p = FrontendServiceRestoreSnapshotResult{} +func (p *FrontendServiceAddPlsqlPackageResult) InitDefault() { } -var FrontendServiceRestoreSnapshotResult_Success_DEFAULT *TRestoreSnapshotResult_ +var FrontendServiceAddPlsqlPackageResult_Success_DEFAULT *TPlsqlPackageResult_ -func (p *FrontendServiceRestoreSnapshotResult) GetSuccess() (v *TRestoreSnapshotResult_) { +func (p *FrontendServiceAddPlsqlPackageResult) GetSuccess() (v *TPlsqlPackageResult_) { if !p.IsSetSuccess() { - return FrontendServiceRestoreSnapshotResult_Success_DEFAULT + return FrontendServiceAddPlsqlPackageResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceRestoreSnapshotResult) SetSuccess(x interface{}) { - p.Success = x.(*TRestoreSnapshotResult_) +func (p *FrontendServiceAddPlsqlPackageResult) SetSuccess(x interface{}) { + p.Success = x.(*TPlsqlPackageResult_) } -var fieldIDToName_FrontendServiceRestoreSnapshotResult = map[int16]string{ +var fieldIDToName_FrontendServiceAddPlsqlPackageResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceRestoreSnapshotResult) IsSetSuccess() bool { +func (p *FrontendServiceAddPlsqlPackageResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceRestoreSnapshotResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlPackageResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -65423,17 +94676,14 @@ func (p *FrontendServiceRestoreSnapshotResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -65448,7 +94698,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlPackageResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -65458,17 +94708,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTRestoreSnapshotResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceAddPlsqlPackageResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTPlsqlPackageResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceRestoreSnapshotResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlPackageResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("restoreSnapshot_result"); err != nil { + if err = oprot.WriteStructBegin("addPlsqlPackage_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -65476,7 +94727,6 @@ func (p *FrontendServiceRestoreSnapshotResult) Write(oprot thrift.TProtocol) (er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -65495,7 +94745,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceAddPlsqlPackageResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -65514,14 +94764,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotResult) String() string { +func (p *FrontendServiceAddPlsqlPackageResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceRestoreSnapshotResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceAddPlsqlPackageResult(%+v)", *p) + } -func (p *FrontendServiceRestoreSnapshotResult) DeepEqual(ano *FrontendServiceRestoreSnapshotResult) bool { +func (p *FrontendServiceAddPlsqlPackageResult) DeepEqual(ano *FrontendServiceAddPlsqlPackageResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -65533,7 +94784,7 @@ func (p *FrontendServiceRestoreSnapshotResult) DeepEqual(ano *FrontendServiceRes return true } -func (p *FrontendServiceRestoreSnapshotResult) Field0DeepEqual(src *TRestoreSnapshotResult_) bool { +func (p *FrontendServiceAddPlsqlPackageResult) Field0DeepEqual(src *TPlsqlPackageResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -65541,39 +94792,38 @@ func (p *FrontendServiceRestoreSnapshotResult) Field0DeepEqual(src *TRestoreSnap return true } -type FrontendServiceWaitingTxnStatusArgs struct { - Request *TWaitingTxnStatusRequest `thrift:"request,1" frugal:"1,default,TWaitingTxnStatusRequest" json:"request"` +type FrontendServiceDropPlsqlPackageArgs struct { + Request *TDropPlsqlPackageRequest `thrift:"request,1" frugal:"1,default,TDropPlsqlPackageRequest" json:"request"` } -func NewFrontendServiceWaitingTxnStatusArgs() *FrontendServiceWaitingTxnStatusArgs { - return &FrontendServiceWaitingTxnStatusArgs{} +func NewFrontendServiceDropPlsqlPackageArgs() *FrontendServiceDropPlsqlPackageArgs { + return &FrontendServiceDropPlsqlPackageArgs{} } -func (p *FrontendServiceWaitingTxnStatusArgs) InitDefault() { - *p = FrontendServiceWaitingTxnStatusArgs{} +func (p *FrontendServiceDropPlsqlPackageArgs) InitDefault() { } -var FrontendServiceWaitingTxnStatusArgs_Request_DEFAULT *TWaitingTxnStatusRequest +var FrontendServiceDropPlsqlPackageArgs_Request_DEFAULT *TDropPlsqlPackageRequest -func (p *FrontendServiceWaitingTxnStatusArgs) GetRequest() (v *TWaitingTxnStatusRequest) { +func (p *FrontendServiceDropPlsqlPackageArgs) GetRequest() (v *TDropPlsqlPackageRequest) { if !p.IsSetRequest() { - return FrontendServiceWaitingTxnStatusArgs_Request_DEFAULT + return FrontendServiceDropPlsqlPackageArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceWaitingTxnStatusArgs) SetRequest(val *TWaitingTxnStatusRequest) { +func (p *FrontendServiceDropPlsqlPackageArgs) SetRequest(val *TDropPlsqlPackageRequest) { p.Request = val } -var fieldIDToName_FrontendServiceWaitingTxnStatusArgs = map[int16]string{ +var fieldIDToName_FrontendServiceDropPlsqlPackageArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceWaitingTxnStatusArgs) IsSetRequest() bool { +func (p *FrontendServiceDropPlsqlPackageArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceWaitingTxnStatusArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlPackageArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -65597,17 +94847,14 @@ func (p *FrontendServiceWaitingTxnStatusArgs) Read(iprot thrift.TProtocol) (err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -65622,7 +94869,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlPackageArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -65632,17 +94879,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTWaitingTxnStatusRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceDropPlsqlPackageArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTDropPlsqlPackageRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceWaitingTxnStatusArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlPackageArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("waitingTxnStatus_args"); err != nil { + if err = oprot.WriteStructBegin("dropPlsqlPackage_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -65650,7 +94898,6 @@ func (p *FrontendServiceWaitingTxnStatusArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -65669,7 +94916,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlPackageArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -65686,14 +94933,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusArgs) String() string { +func (p *FrontendServiceDropPlsqlPackageArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceWaitingTxnStatusArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceDropPlsqlPackageArgs(%+v)", *p) + } -func (p *FrontendServiceWaitingTxnStatusArgs) DeepEqual(ano *FrontendServiceWaitingTxnStatusArgs) bool { +func (p *FrontendServiceDropPlsqlPackageArgs) DeepEqual(ano *FrontendServiceDropPlsqlPackageArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -65705,7 +94953,7 @@ func (p *FrontendServiceWaitingTxnStatusArgs) DeepEqual(ano *FrontendServiceWait return true } -func (p *FrontendServiceWaitingTxnStatusArgs) Field1DeepEqual(src *TWaitingTxnStatusRequest) bool { +func (p *FrontendServiceDropPlsqlPackageArgs) Field1DeepEqual(src *TDropPlsqlPackageRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -65713,39 +94961,38 @@ func (p *FrontendServiceWaitingTxnStatusArgs) Field1DeepEqual(src *TWaitingTxnSt return true } -type FrontendServiceWaitingTxnStatusResult struct { - Success *TWaitingTxnStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,TWaitingTxnStatusResult_" json:"success,omitempty"` +type FrontendServiceDropPlsqlPackageResult struct { + Success *TPlsqlPackageResult_ `thrift:"success,0,optional" frugal:"0,optional,TPlsqlPackageResult_" json:"success,omitempty"` } -func NewFrontendServiceWaitingTxnStatusResult() *FrontendServiceWaitingTxnStatusResult { - return &FrontendServiceWaitingTxnStatusResult{} +func NewFrontendServiceDropPlsqlPackageResult() *FrontendServiceDropPlsqlPackageResult { + return &FrontendServiceDropPlsqlPackageResult{} } -func (p *FrontendServiceWaitingTxnStatusResult) InitDefault() { - *p = FrontendServiceWaitingTxnStatusResult{} +func (p *FrontendServiceDropPlsqlPackageResult) InitDefault() { } -var FrontendServiceWaitingTxnStatusResult_Success_DEFAULT *TWaitingTxnStatusResult_ +var FrontendServiceDropPlsqlPackageResult_Success_DEFAULT *TPlsqlPackageResult_ -func (p *FrontendServiceWaitingTxnStatusResult) GetSuccess() (v *TWaitingTxnStatusResult_) { +func (p *FrontendServiceDropPlsqlPackageResult) GetSuccess() (v *TPlsqlPackageResult_) { if !p.IsSetSuccess() { - return FrontendServiceWaitingTxnStatusResult_Success_DEFAULT + return FrontendServiceDropPlsqlPackageResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceWaitingTxnStatusResult) SetSuccess(x interface{}) { - p.Success = x.(*TWaitingTxnStatusResult_) +func (p *FrontendServiceDropPlsqlPackageResult) SetSuccess(x interface{}) { + p.Success = x.(*TPlsqlPackageResult_) } -var fieldIDToName_FrontendServiceWaitingTxnStatusResult = map[int16]string{ +var fieldIDToName_FrontendServiceDropPlsqlPackageResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceWaitingTxnStatusResult) IsSetSuccess() bool { +func (p *FrontendServiceDropPlsqlPackageResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceWaitingTxnStatusResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlPackageResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -65769,17 +95016,14 @@ func (p *FrontendServiceWaitingTxnStatusResult) Read(iprot thrift.TProtocol) (er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -65794,7 +95038,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlPackageResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -65804,17 +95048,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTWaitingTxnStatusResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceDropPlsqlPackageResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTPlsqlPackageResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceWaitingTxnStatusResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlPackageResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("waitingTxnStatus_result"); err != nil { + if err = oprot.WriteStructBegin("dropPlsqlPackage_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -65822,7 +95067,6 @@ func (p *FrontendServiceWaitingTxnStatusResult) Write(oprot thrift.TProtocol) (e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -65841,7 +95085,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceDropPlsqlPackageResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -65860,14 +95104,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusResult) String() string { +func (p *FrontendServiceDropPlsqlPackageResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceWaitingTxnStatusResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceDropPlsqlPackageResult(%+v)", *p) + } -func (p *FrontendServiceWaitingTxnStatusResult) DeepEqual(ano *FrontendServiceWaitingTxnStatusResult) bool { +func (p *FrontendServiceDropPlsqlPackageResult) DeepEqual(ano *FrontendServiceDropPlsqlPackageResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -65879,7 +95124,7 @@ func (p *FrontendServiceWaitingTxnStatusResult) DeepEqual(ano *FrontendServiceWa return true } -func (p *FrontendServiceWaitingTxnStatusResult) Field0DeepEqual(src *TWaitingTxnStatusResult_) bool { +func (p *FrontendServiceDropPlsqlPackageResult) Field0DeepEqual(src *TPlsqlPackageResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -65887,39 +95132,38 @@ func (p *FrontendServiceWaitingTxnStatusResult) Field0DeepEqual(src *TWaitingTxn return true } -type FrontendServiceStreamLoadPutArgs struct { - Request *TStreamLoadPutRequest `thrift:"request,1" frugal:"1,default,TStreamLoadPutRequest" json:"request"` +type FrontendServiceGetMasterTokenArgs struct { + Request *TGetMasterTokenRequest `thrift:"request,1" frugal:"1,default,TGetMasterTokenRequest" json:"request"` } -func NewFrontendServiceStreamLoadPutArgs() *FrontendServiceStreamLoadPutArgs { - return &FrontendServiceStreamLoadPutArgs{} +func NewFrontendServiceGetMasterTokenArgs() *FrontendServiceGetMasterTokenArgs { + return &FrontendServiceGetMasterTokenArgs{} } -func (p *FrontendServiceStreamLoadPutArgs) InitDefault() { - *p = FrontendServiceStreamLoadPutArgs{} +func (p *FrontendServiceGetMasterTokenArgs) InitDefault() { } -var FrontendServiceStreamLoadPutArgs_Request_DEFAULT *TStreamLoadPutRequest +var FrontendServiceGetMasterTokenArgs_Request_DEFAULT *TGetMasterTokenRequest -func (p *FrontendServiceStreamLoadPutArgs) GetRequest() (v *TStreamLoadPutRequest) { +func (p *FrontendServiceGetMasterTokenArgs) GetRequest() (v *TGetMasterTokenRequest) { if !p.IsSetRequest() { - return FrontendServiceStreamLoadPutArgs_Request_DEFAULT + return FrontendServiceGetMasterTokenArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceStreamLoadPutArgs) SetRequest(val *TStreamLoadPutRequest) { +func (p *FrontendServiceGetMasterTokenArgs) SetRequest(val *TGetMasterTokenRequest) { p.Request = val } -var fieldIDToName_FrontendServiceStreamLoadPutArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetMasterTokenArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceStreamLoadPutArgs) IsSetRequest() bool { +func (p *FrontendServiceGetMasterTokenArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceStreamLoadPutArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMasterTokenArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -65943,17 +95187,14 @@ func (p *FrontendServiceStreamLoadPutArgs) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -65968,7 +95209,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -65978,17 +95219,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTStreamLoadPutRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetMasterTokenArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetMasterTokenRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceStreamLoadPutArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMasterTokenArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("streamLoadPut_args"); err != nil { + if err = oprot.WriteStructBegin("getMasterToken_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -65996,7 +95238,6 @@ func (p *FrontendServiceStreamLoadPutArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -66015,7 +95256,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMasterTokenArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -66032,14 +95273,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutArgs) String() string { +func (p *FrontendServiceGetMasterTokenArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceStreamLoadPutArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetMasterTokenArgs(%+v)", *p) + } -func (p *FrontendServiceStreamLoadPutArgs) DeepEqual(ano *FrontendServiceStreamLoadPutArgs) bool { +func (p *FrontendServiceGetMasterTokenArgs) DeepEqual(ano *FrontendServiceGetMasterTokenArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -66051,7 +95293,7 @@ func (p *FrontendServiceStreamLoadPutArgs) DeepEqual(ano *FrontendServiceStreamL return true } -func (p *FrontendServiceStreamLoadPutArgs) Field1DeepEqual(src *TStreamLoadPutRequest) bool { +func (p *FrontendServiceGetMasterTokenArgs) Field1DeepEqual(src *TGetMasterTokenRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -66059,39 +95301,38 @@ func (p *FrontendServiceStreamLoadPutArgs) Field1DeepEqual(src *TStreamLoadPutRe return true } -type FrontendServiceStreamLoadPutResult struct { - Success *TStreamLoadPutResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadPutResult_" json:"success,omitempty"` +type FrontendServiceGetMasterTokenResult struct { + Success *TGetMasterTokenResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetMasterTokenResult_" json:"success,omitempty"` } -func NewFrontendServiceStreamLoadPutResult() *FrontendServiceStreamLoadPutResult { - return &FrontendServiceStreamLoadPutResult{} +func NewFrontendServiceGetMasterTokenResult() *FrontendServiceGetMasterTokenResult { + return &FrontendServiceGetMasterTokenResult{} } -func (p *FrontendServiceStreamLoadPutResult) InitDefault() { - *p = FrontendServiceStreamLoadPutResult{} +func (p *FrontendServiceGetMasterTokenResult) InitDefault() { } -var FrontendServiceStreamLoadPutResult_Success_DEFAULT *TStreamLoadPutResult_ +var FrontendServiceGetMasterTokenResult_Success_DEFAULT *TGetMasterTokenResult_ -func (p *FrontendServiceStreamLoadPutResult) GetSuccess() (v *TStreamLoadPutResult_) { +func (p *FrontendServiceGetMasterTokenResult) GetSuccess() (v *TGetMasterTokenResult_) { if !p.IsSetSuccess() { - return FrontendServiceStreamLoadPutResult_Success_DEFAULT + return FrontendServiceGetMasterTokenResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceStreamLoadPutResult) SetSuccess(x interface{}) { - p.Success = x.(*TStreamLoadPutResult_) +func (p *FrontendServiceGetMasterTokenResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetMasterTokenResult_) } -var fieldIDToName_FrontendServiceStreamLoadPutResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetMasterTokenResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceStreamLoadPutResult) IsSetSuccess() bool { +func (p *FrontendServiceGetMasterTokenResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceStreamLoadPutResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMasterTokenResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -66115,17 +95356,14 @@ func (p *FrontendServiceStreamLoadPutResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -66140,7 +95378,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -66150,17 +95388,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTStreamLoadPutResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetMasterTokenResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetMasterTokenResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceStreamLoadPutResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMasterTokenResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("streamLoadPut_result"); err != nil { + if err = oprot.WriteStructBegin("getMasterToken_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -66168,7 +95407,6 @@ func (p *FrontendServiceStreamLoadPutResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -66187,7 +95425,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMasterTokenResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -66206,14 +95444,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutResult) String() string { +func (p *FrontendServiceGetMasterTokenResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceStreamLoadPutResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetMasterTokenResult(%+v)", *p) + } -func (p *FrontendServiceStreamLoadPutResult) DeepEqual(ano *FrontendServiceStreamLoadPutResult) bool { +func (p *FrontendServiceGetMasterTokenResult) DeepEqual(ano *FrontendServiceGetMasterTokenResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -66225,7 +95464,7 @@ func (p *FrontendServiceStreamLoadPutResult) DeepEqual(ano *FrontendServiceStrea return true } -func (p *FrontendServiceStreamLoadPutResult) Field0DeepEqual(src *TStreamLoadPutResult_) bool { +func (p *FrontendServiceGetMasterTokenResult) Field0DeepEqual(src *TGetMasterTokenResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -66233,39 +95472,38 @@ func (p *FrontendServiceStreamLoadPutResult) Field0DeepEqual(src *TStreamLoadPut return true } -type FrontendServiceStreamLoadMultiTablePutArgs struct { - Request *TStreamLoadPutRequest `thrift:"request,1" frugal:"1,default,TStreamLoadPutRequest" json:"request"` +type FrontendServiceGetBinlogLagArgs struct { + Request *TGetBinlogLagRequest `thrift:"request,1" frugal:"1,default,TGetBinlogRequest" json:"request"` } -func NewFrontendServiceStreamLoadMultiTablePutArgs() *FrontendServiceStreamLoadMultiTablePutArgs { - return &FrontendServiceStreamLoadMultiTablePutArgs{} +func NewFrontendServiceGetBinlogLagArgs() *FrontendServiceGetBinlogLagArgs { + return &FrontendServiceGetBinlogLagArgs{} } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) InitDefault() { - *p = FrontendServiceStreamLoadMultiTablePutArgs{} +func (p *FrontendServiceGetBinlogLagArgs) InitDefault() { } -var FrontendServiceStreamLoadMultiTablePutArgs_Request_DEFAULT *TStreamLoadPutRequest +var FrontendServiceGetBinlogLagArgs_Request_DEFAULT *TGetBinlogLagRequest -func (p *FrontendServiceStreamLoadMultiTablePutArgs) GetRequest() (v *TStreamLoadPutRequest) { +func (p *FrontendServiceGetBinlogLagArgs) GetRequest() (v *TGetBinlogLagRequest) { if !p.IsSetRequest() { - return FrontendServiceStreamLoadMultiTablePutArgs_Request_DEFAULT + return FrontendServiceGetBinlogLagArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) SetRequest(val *TStreamLoadPutRequest) { +func (p *FrontendServiceGetBinlogLagArgs) SetRequest(val *TGetBinlogLagRequest) { p.Request = val } -var fieldIDToName_FrontendServiceStreamLoadMultiTablePutArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetBinlogLagArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) IsSetRequest() bool { +func (p *FrontendServiceGetBinlogLagArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogLagArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -66289,17 +95527,14 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) Read(iprot thrift.TProtocol if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -66314,7 +95549,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -66324,17 +95559,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTStreamLoadPutRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetBinlogLagArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetBinlogLagRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogLagArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("streamLoadMultiTablePut_args"); err != nil { + if err = oprot.WriteStructBegin("getBinlogLag_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -66342,7 +95578,6 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) Write(oprot thrift.TProtoco fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -66361,7 +95596,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogLagArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -66378,14 +95613,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) String() string { +func (p *FrontendServiceGetBinlogLagArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceStreamLoadMultiTablePutArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetBinlogLagArgs(%+v)", *p) + } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) DeepEqual(ano *FrontendServiceStreamLoadMultiTablePutArgs) bool { +func (p *FrontendServiceGetBinlogLagArgs) DeepEqual(ano *FrontendServiceGetBinlogLagArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -66397,7 +95633,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) DeepEqual(ano *FrontendServ return true } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) Field1DeepEqual(src *TStreamLoadPutRequest) bool { +func (p *FrontendServiceGetBinlogLagArgs) Field1DeepEqual(src *TGetBinlogLagRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -66405,39 +95641,38 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) Field1DeepEqual(src *TStrea return true } -type FrontendServiceStreamLoadMultiTablePutResult struct { - Success *TStreamLoadMultiTablePutResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadMultiTablePutResult_" json:"success,omitempty"` +type FrontendServiceGetBinlogLagResult struct { + Success *TGetBinlogLagResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetBinlogLagResult_" json:"success,omitempty"` } -func NewFrontendServiceStreamLoadMultiTablePutResult() *FrontendServiceStreamLoadMultiTablePutResult { - return &FrontendServiceStreamLoadMultiTablePutResult{} +func NewFrontendServiceGetBinlogLagResult() *FrontendServiceGetBinlogLagResult { + return &FrontendServiceGetBinlogLagResult{} } -func (p *FrontendServiceStreamLoadMultiTablePutResult) InitDefault() { - *p = FrontendServiceStreamLoadMultiTablePutResult{} +func (p *FrontendServiceGetBinlogLagResult) InitDefault() { } -var FrontendServiceStreamLoadMultiTablePutResult_Success_DEFAULT *TStreamLoadMultiTablePutResult_ +var FrontendServiceGetBinlogLagResult_Success_DEFAULT *TGetBinlogLagResult_ -func (p *FrontendServiceStreamLoadMultiTablePutResult) GetSuccess() (v *TStreamLoadMultiTablePutResult_) { +func (p *FrontendServiceGetBinlogLagResult) GetSuccess() (v *TGetBinlogLagResult_) { if !p.IsSetSuccess() { - return FrontendServiceStreamLoadMultiTablePutResult_Success_DEFAULT + return FrontendServiceGetBinlogLagResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceStreamLoadMultiTablePutResult) SetSuccess(x interface{}) { - p.Success = x.(*TStreamLoadMultiTablePutResult_) +func (p *FrontendServiceGetBinlogLagResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetBinlogLagResult_) } -var fieldIDToName_FrontendServiceStreamLoadMultiTablePutResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetBinlogLagResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceStreamLoadMultiTablePutResult) IsSetSuccess() bool { +func (p *FrontendServiceGetBinlogLagResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceStreamLoadMultiTablePutResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogLagResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -66461,17 +95696,14 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) Read(iprot thrift.TProtoc if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -66486,7 +95718,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -66496,17 +95728,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTStreamLoadMultiTablePutResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetBinlogLagResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetBinlogLagResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceStreamLoadMultiTablePutResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogLagResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("streamLoadMultiTablePut_result"); err != nil { + if err = oprot.WriteStructBegin("getBinlogLag_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -66514,7 +95747,6 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) Write(oprot thrift.TProto fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -66533,7 +95765,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBinlogLagResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -66552,14 +95784,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutResult) String() string { +func (p *FrontendServiceGetBinlogLagResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceStreamLoadMultiTablePutResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetBinlogLagResult(%+v)", *p) + } -func (p *FrontendServiceStreamLoadMultiTablePutResult) DeepEqual(ano *FrontendServiceStreamLoadMultiTablePutResult) bool { +func (p *FrontendServiceGetBinlogLagResult) DeepEqual(ano *FrontendServiceGetBinlogLagResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -66571,7 +95804,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) DeepEqual(ano *FrontendSe return true } -func (p *FrontendServiceStreamLoadMultiTablePutResult) Field0DeepEqual(src *TStreamLoadMultiTablePutResult_) bool { +func (p *FrontendServiceGetBinlogLagResult) Field0DeepEqual(src *TGetBinlogLagResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -66579,39 +95812,38 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) Field0DeepEqual(src *TStr return true } -type FrontendServiceSnapshotLoaderReportArgs struct { - Request *TSnapshotLoaderReportRequest `thrift:"request,1" frugal:"1,default,TSnapshotLoaderReportRequest" json:"request"` +type FrontendServiceUpdateStatsCacheArgs struct { + Request *TUpdateFollowerStatsCacheRequest `thrift:"request,1" frugal:"1,default,TUpdateFollowerStatsCacheRequest" json:"request"` } -func NewFrontendServiceSnapshotLoaderReportArgs() *FrontendServiceSnapshotLoaderReportArgs { - return &FrontendServiceSnapshotLoaderReportArgs{} +func NewFrontendServiceUpdateStatsCacheArgs() *FrontendServiceUpdateStatsCacheArgs { + return &FrontendServiceUpdateStatsCacheArgs{} } -func (p *FrontendServiceSnapshotLoaderReportArgs) InitDefault() { - *p = FrontendServiceSnapshotLoaderReportArgs{} +func (p *FrontendServiceUpdateStatsCacheArgs) InitDefault() { } -var FrontendServiceSnapshotLoaderReportArgs_Request_DEFAULT *TSnapshotLoaderReportRequest +var FrontendServiceUpdateStatsCacheArgs_Request_DEFAULT *TUpdateFollowerStatsCacheRequest -func (p *FrontendServiceSnapshotLoaderReportArgs) GetRequest() (v *TSnapshotLoaderReportRequest) { +func (p *FrontendServiceUpdateStatsCacheArgs) GetRequest() (v *TUpdateFollowerStatsCacheRequest) { if !p.IsSetRequest() { - return FrontendServiceSnapshotLoaderReportArgs_Request_DEFAULT + return FrontendServiceUpdateStatsCacheArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceSnapshotLoaderReportArgs) SetRequest(val *TSnapshotLoaderReportRequest) { +func (p *FrontendServiceUpdateStatsCacheArgs) SetRequest(val *TUpdateFollowerStatsCacheRequest) { p.Request = val } -var fieldIDToName_FrontendServiceSnapshotLoaderReportArgs = map[int16]string{ +var fieldIDToName_FrontendServiceUpdateStatsCacheArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceSnapshotLoaderReportArgs) IsSetRequest() bool { +func (p *FrontendServiceUpdateStatsCacheArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceSnapshotLoaderReportArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdateStatsCacheArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -66635,17 +95867,14 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) Read(iprot thrift.TProtocol) ( if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -66660,7 +95889,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -66670,17 +95899,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTSnapshotLoaderReportRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceUpdateStatsCacheArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTUpdateFollowerStatsCacheRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceSnapshotLoaderReportArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdateStatsCacheArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("snapshotLoaderReport_args"); err != nil { + if err = oprot.WriteStructBegin("updateStatsCache_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -66688,7 +95918,6 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) Write(oprot thrift.TProtocol) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -66707,7 +95936,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdateStatsCacheArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -66724,14 +95953,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportArgs) String() string { +func (p *FrontendServiceUpdateStatsCacheArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceSnapshotLoaderReportArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceUpdateStatsCacheArgs(%+v)", *p) + } -func (p *FrontendServiceSnapshotLoaderReportArgs) DeepEqual(ano *FrontendServiceSnapshotLoaderReportArgs) bool { +func (p *FrontendServiceUpdateStatsCacheArgs) DeepEqual(ano *FrontendServiceUpdateStatsCacheArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -66743,7 +95973,7 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) DeepEqual(ano *FrontendService return true } -func (p *FrontendServiceSnapshotLoaderReportArgs) Field1DeepEqual(src *TSnapshotLoaderReportRequest) bool { +func (p *FrontendServiceUpdateStatsCacheArgs) Field1DeepEqual(src *TUpdateFollowerStatsCacheRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -66751,39 +95981,38 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) Field1DeepEqual(src *TSnapshot return true } -type FrontendServiceSnapshotLoaderReportResult struct { +type FrontendServiceUpdateStatsCacheResult struct { Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewFrontendServiceSnapshotLoaderReportResult() *FrontendServiceSnapshotLoaderReportResult { - return &FrontendServiceSnapshotLoaderReportResult{} +func NewFrontendServiceUpdateStatsCacheResult() *FrontendServiceUpdateStatsCacheResult { + return &FrontendServiceUpdateStatsCacheResult{} } -func (p *FrontendServiceSnapshotLoaderReportResult) InitDefault() { - *p = FrontendServiceSnapshotLoaderReportResult{} +func (p *FrontendServiceUpdateStatsCacheResult) InitDefault() { } -var FrontendServiceSnapshotLoaderReportResult_Success_DEFAULT *status.TStatus +var FrontendServiceUpdateStatsCacheResult_Success_DEFAULT *status.TStatus -func (p *FrontendServiceSnapshotLoaderReportResult) GetSuccess() (v *status.TStatus) { +func (p *FrontendServiceUpdateStatsCacheResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return FrontendServiceSnapshotLoaderReportResult_Success_DEFAULT + return FrontendServiceUpdateStatsCacheResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceSnapshotLoaderReportResult) SetSuccess(x interface{}) { +func (p *FrontendServiceUpdateStatsCacheResult) SetSuccess(x interface{}) { p.Success = x.(*status.TStatus) } -var fieldIDToName_FrontendServiceSnapshotLoaderReportResult = map[int16]string{ +var fieldIDToName_FrontendServiceUpdateStatsCacheResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceSnapshotLoaderReportResult) IsSetSuccess() bool { +func (p *FrontendServiceUpdateStatsCacheResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceSnapshotLoaderReportResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdateStatsCacheResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -66807,17 +96036,14 @@ func (p *FrontendServiceSnapshotLoaderReportResult) Read(iprot thrift.TProtocol) if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -66832,7 +96058,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -66842,17 +96068,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = status.NewTStatus() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceUpdateStatsCacheResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceSnapshotLoaderReportResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdateStatsCacheResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("snapshotLoaderReport_result"); err != nil { + if err = oprot.WriteStructBegin("updateStatsCache_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -66860,7 +96087,6 @@ func (p *FrontendServiceSnapshotLoaderReportResult) Write(oprot thrift.TProtocol fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -66879,7 +96105,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdateStatsCacheResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -66898,14 +96124,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportResult) String() string { +func (p *FrontendServiceUpdateStatsCacheResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceSnapshotLoaderReportResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceUpdateStatsCacheResult(%+v)", *p) + } -func (p *FrontendServiceSnapshotLoaderReportResult) DeepEqual(ano *FrontendServiceSnapshotLoaderReportResult) bool { +func (p *FrontendServiceUpdateStatsCacheResult) DeepEqual(ano *FrontendServiceUpdateStatsCacheResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -66917,7 +96144,7 @@ func (p *FrontendServiceSnapshotLoaderReportResult) DeepEqual(ano *FrontendServi return true } -func (p *FrontendServiceSnapshotLoaderReportResult) Field0DeepEqual(src *status.TStatus) bool { +func (p *FrontendServiceUpdateStatsCacheResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -66925,39 +96152,38 @@ func (p *FrontendServiceSnapshotLoaderReportResult) Field0DeepEqual(src *status. return true } -type FrontendServicePingArgs struct { - Request *TFrontendPingFrontendRequest `thrift:"request,1" frugal:"1,default,TFrontendPingFrontendRequest" json:"request"` +type FrontendServiceGetAutoIncrementRangeArgs struct { + Request *TAutoIncrementRangeRequest `thrift:"request,1" frugal:"1,default,TAutoIncrementRangeRequest" json:"request"` } -func NewFrontendServicePingArgs() *FrontendServicePingArgs { - return &FrontendServicePingArgs{} +func NewFrontendServiceGetAutoIncrementRangeArgs() *FrontendServiceGetAutoIncrementRangeArgs { + return &FrontendServiceGetAutoIncrementRangeArgs{} } -func (p *FrontendServicePingArgs) InitDefault() { - *p = FrontendServicePingArgs{} +func (p *FrontendServiceGetAutoIncrementRangeArgs) InitDefault() { } -var FrontendServicePingArgs_Request_DEFAULT *TFrontendPingFrontendRequest +var FrontendServiceGetAutoIncrementRangeArgs_Request_DEFAULT *TAutoIncrementRangeRequest -func (p *FrontendServicePingArgs) GetRequest() (v *TFrontendPingFrontendRequest) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) GetRequest() (v *TAutoIncrementRangeRequest) { if !p.IsSetRequest() { - return FrontendServicePingArgs_Request_DEFAULT + return FrontendServiceGetAutoIncrementRangeArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServicePingArgs) SetRequest(val *TFrontendPingFrontendRequest) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) SetRequest(val *TAutoIncrementRangeRequest) { p.Request = val } -var fieldIDToName_FrontendServicePingArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetAutoIncrementRangeArgs = map[int16]string{ 1: "request", } -func (p *FrontendServicePingArgs) IsSetRequest() bool { +func (p *FrontendServiceGetAutoIncrementRangeArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServicePingArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -66981,17 +96207,14 @@ func (p *FrontendServicePingArgs) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -67006,7 +96229,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -67016,17 +96239,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServicePingArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTFrontendPingFrontendRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetAutoIncrementRangeArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTAutoIncrementRangeRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServicePingArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ping_args"); err != nil { + if err = oprot.WriteStructBegin("getAutoIncrementRange_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -67034,7 +96258,6 @@ func (p *FrontendServicePingArgs) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -67053,7 +96276,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServicePingArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -67070,14 +96293,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServicePingArgs) String() string { +func (p *FrontendServiceGetAutoIncrementRangeArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServicePingArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetAutoIncrementRangeArgs(%+v)", *p) + } -func (p *FrontendServicePingArgs) DeepEqual(ano *FrontendServicePingArgs) bool { +func (p *FrontendServiceGetAutoIncrementRangeArgs) DeepEqual(ano *FrontendServiceGetAutoIncrementRangeArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -67089,7 +96313,7 @@ func (p *FrontendServicePingArgs) DeepEqual(ano *FrontendServicePingArgs) bool { return true } -func (p *FrontendServicePingArgs) Field1DeepEqual(src *TFrontendPingFrontendRequest) bool { +func (p *FrontendServiceGetAutoIncrementRangeArgs) Field1DeepEqual(src *TAutoIncrementRangeRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -67097,39 +96321,38 @@ func (p *FrontendServicePingArgs) Field1DeepEqual(src *TFrontendPingFrontendRequ return true } -type FrontendServicePingResult struct { - Success *TFrontendPingFrontendResult_ `thrift:"success,0,optional" frugal:"0,optional,TFrontendPingFrontendResult_" json:"success,omitempty"` +type FrontendServiceGetAutoIncrementRangeResult struct { + Success *TAutoIncrementRangeResult_ `thrift:"success,0,optional" frugal:"0,optional,TAutoIncrementRangeResult_" json:"success,omitempty"` } -func NewFrontendServicePingResult() *FrontendServicePingResult { - return &FrontendServicePingResult{} +func NewFrontendServiceGetAutoIncrementRangeResult() *FrontendServiceGetAutoIncrementRangeResult { + return &FrontendServiceGetAutoIncrementRangeResult{} } -func (p *FrontendServicePingResult) InitDefault() { - *p = FrontendServicePingResult{} +func (p *FrontendServiceGetAutoIncrementRangeResult) InitDefault() { } -var FrontendServicePingResult_Success_DEFAULT *TFrontendPingFrontendResult_ +var FrontendServiceGetAutoIncrementRangeResult_Success_DEFAULT *TAutoIncrementRangeResult_ -func (p *FrontendServicePingResult) GetSuccess() (v *TFrontendPingFrontendResult_) { +func (p *FrontendServiceGetAutoIncrementRangeResult) GetSuccess() (v *TAutoIncrementRangeResult_) { if !p.IsSetSuccess() { - return FrontendServicePingResult_Success_DEFAULT + return FrontendServiceGetAutoIncrementRangeResult_Success_DEFAULT } return p.Success } -func (p *FrontendServicePingResult) SetSuccess(x interface{}) { - p.Success = x.(*TFrontendPingFrontendResult_) +func (p *FrontendServiceGetAutoIncrementRangeResult) SetSuccess(x interface{}) { + p.Success = x.(*TAutoIncrementRangeResult_) } -var fieldIDToName_FrontendServicePingResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetAutoIncrementRangeResult = map[int16]string{ 0: "success", } -func (p *FrontendServicePingResult) IsSetSuccess() bool { +func (p *FrontendServiceGetAutoIncrementRangeResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServicePingResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetAutoIncrementRangeResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -67153,17 +96376,14 @@ func (p *FrontendServicePingResult) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -67178,7 +96398,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -67188,17 +96408,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServicePingResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTFrontendPingFrontendResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetAutoIncrementRangeResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTAutoIncrementRangeResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServicePingResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetAutoIncrementRangeResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ping_result"); err != nil { + if err = oprot.WriteStructBegin("getAutoIncrementRange_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -67206,7 +96427,6 @@ func (p *FrontendServicePingResult) Write(oprot thrift.TProtocol) (err error) { fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -67225,7 +96445,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServicePingResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetAutoIncrementRangeResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -67244,14 +96464,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServicePingResult) String() string { +func (p *FrontendServiceGetAutoIncrementRangeResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServicePingResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetAutoIncrementRangeResult(%+v)", *p) + } -func (p *FrontendServicePingResult) DeepEqual(ano *FrontendServicePingResult) bool { +func (p *FrontendServiceGetAutoIncrementRangeResult) DeepEqual(ano *FrontendServiceGetAutoIncrementRangeResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -67263,7 +96484,7 @@ func (p *FrontendServicePingResult) DeepEqual(ano *FrontendServicePingResult) bo return true } -func (p *FrontendServicePingResult) Field0DeepEqual(src *TFrontendPingFrontendResult_) bool { +func (p *FrontendServiceGetAutoIncrementRangeResult) Field0DeepEqual(src *TAutoIncrementRangeResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -67271,39 +96492,38 @@ func (p *FrontendServicePingResult) Field0DeepEqual(src *TFrontendPingFrontendRe return true } -type FrontendServiceAddColumnsArgs struct { - Request *TAddColumnsRequest `thrift:"request,1" frugal:"1,default,TAddColumnsRequest" json:"request"` +type FrontendServiceCreatePartitionArgs struct { + Request *TCreatePartitionRequest `thrift:"request,1" frugal:"1,default,TCreatePartitionRequest" json:"request"` } -func NewFrontendServiceAddColumnsArgs() *FrontendServiceAddColumnsArgs { - return &FrontendServiceAddColumnsArgs{} +func NewFrontendServiceCreatePartitionArgs() *FrontendServiceCreatePartitionArgs { + return &FrontendServiceCreatePartitionArgs{} } -func (p *FrontendServiceAddColumnsArgs) InitDefault() { - *p = FrontendServiceAddColumnsArgs{} +func (p *FrontendServiceCreatePartitionArgs) InitDefault() { } -var FrontendServiceAddColumnsArgs_Request_DEFAULT *TAddColumnsRequest +var FrontendServiceCreatePartitionArgs_Request_DEFAULT *TCreatePartitionRequest -func (p *FrontendServiceAddColumnsArgs) GetRequest() (v *TAddColumnsRequest) { +func (p *FrontendServiceCreatePartitionArgs) GetRequest() (v *TCreatePartitionRequest) { if !p.IsSetRequest() { - return FrontendServiceAddColumnsArgs_Request_DEFAULT + return FrontendServiceCreatePartitionArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceAddColumnsArgs) SetRequest(val *TAddColumnsRequest) { +func (p *FrontendServiceCreatePartitionArgs) SetRequest(val *TCreatePartitionRequest) { p.Request = val } -var fieldIDToName_FrontendServiceAddColumnsArgs = map[int16]string{ +var fieldIDToName_FrontendServiceCreatePartitionArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceAddColumnsArgs) IsSetRequest() bool { +func (p *FrontendServiceCreatePartitionArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceAddColumnsArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCreatePartitionArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -67327,17 +96547,14 @@ func (p *FrontendServiceAddColumnsArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -67352,7 +96569,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddColumnsArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -67362,17 +96579,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAddColumnsArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTAddColumnsRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceCreatePartitionArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTCreatePartitionRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceAddColumnsArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCreatePartitionArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("addColumns_args"); err != nil { + if err = oprot.WriteStructBegin("createPartition_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -67380,7 +96598,6 @@ func (p *FrontendServiceAddColumnsArgs) Write(oprot thrift.TProtocol) (err error fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -67399,7 +96616,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceAddColumnsArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCreatePartitionArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -67416,14 +96633,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceAddColumnsArgs) String() string { +func (p *FrontendServiceCreatePartitionArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceAddColumnsArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceCreatePartitionArgs(%+v)", *p) + } -func (p *FrontendServiceAddColumnsArgs) DeepEqual(ano *FrontendServiceAddColumnsArgs) bool { +func (p *FrontendServiceCreatePartitionArgs) DeepEqual(ano *FrontendServiceCreatePartitionArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -67435,7 +96653,7 @@ func (p *FrontendServiceAddColumnsArgs) DeepEqual(ano *FrontendServiceAddColumns return true } -func (p *FrontendServiceAddColumnsArgs) Field1DeepEqual(src *TAddColumnsRequest) bool { +func (p *FrontendServiceCreatePartitionArgs) Field1DeepEqual(src *TCreatePartitionRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -67443,39 +96661,38 @@ func (p *FrontendServiceAddColumnsArgs) Field1DeepEqual(src *TAddColumnsRequest) return true } -type FrontendServiceAddColumnsResult struct { - Success *TAddColumnsResult_ `thrift:"success,0,optional" frugal:"0,optional,TAddColumnsResult_" json:"success,omitempty"` +type FrontendServiceCreatePartitionResult struct { + Success *TCreatePartitionResult_ `thrift:"success,0,optional" frugal:"0,optional,TCreatePartitionResult_" json:"success,omitempty"` } -func NewFrontendServiceAddColumnsResult() *FrontendServiceAddColumnsResult { - return &FrontendServiceAddColumnsResult{} +func NewFrontendServiceCreatePartitionResult() *FrontendServiceCreatePartitionResult { + return &FrontendServiceCreatePartitionResult{} } -func (p *FrontendServiceAddColumnsResult) InitDefault() { - *p = FrontendServiceAddColumnsResult{} +func (p *FrontendServiceCreatePartitionResult) InitDefault() { } -var FrontendServiceAddColumnsResult_Success_DEFAULT *TAddColumnsResult_ +var FrontendServiceCreatePartitionResult_Success_DEFAULT *TCreatePartitionResult_ -func (p *FrontendServiceAddColumnsResult) GetSuccess() (v *TAddColumnsResult_) { +func (p *FrontendServiceCreatePartitionResult) GetSuccess() (v *TCreatePartitionResult_) { if !p.IsSetSuccess() { - return FrontendServiceAddColumnsResult_Success_DEFAULT + return FrontendServiceCreatePartitionResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceAddColumnsResult) SetSuccess(x interface{}) { - p.Success = x.(*TAddColumnsResult_) +func (p *FrontendServiceCreatePartitionResult) SetSuccess(x interface{}) { + p.Success = x.(*TCreatePartitionResult_) } -var fieldIDToName_FrontendServiceAddColumnsResult = map[int16]string{ +var fieldIDToName_FrontendServiceCreatePartitionResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceAddColumnsResult) IsSetSuccess() bool { +func (p *FrontendServiceCreatePartitionResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceAddColumnsResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCreatePartitionResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -67499,17 +96716,14 @@ func (p *FrontendServiceAddColumnsResult) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -67524,7 +96738,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddColumnsResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -67534,17 +96748,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAddColumnsResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTAddColumnsResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceCreatePartitionResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTCreatePartitionResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceAddColumnsResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCreatePartitionResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("addColumns_result"); err != nil { + if err = oprot.WriteStructBegin("createPartition_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -67552,7 +96767,6 @@ func (p *FrontendServiceAddColumnsResult) Write(oprot thrift.TProtocol) (err err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -67571,7 +96785,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceAddColumnsResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceCreatePartitionResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -67590,14 +96804,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceAddColumnsResult) String() string { +func (p *FrontendServiceCreatePartitionResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceAddColumnsResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceCreatePartitionResult(%+v)", *p) + } -func (p *FrontendServiceAddColumnsResult) DeepEqual(ano *FrontendServiceAddColumnsResult) bool { +func (p *FrontendServiceCreatePartitionResult) DeepEqual(ano *FrontendServiceCreatePartitionResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -67609,7 +96824,7 @@ func (p *FrontendServiceAddColumnsResult) DeepEqual(ano *FrontendServiceAddColum return true } -func (p *FrontendServiceAddColumnsResult) Field0DeepEqual(src *TAddColumnsResult_) bool { +func (p *FrontendServiceCreatePartitionResult) Field0DeepEqual(src *TCreatePartitionResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -67617,39 +96832,38 @@ func (p *FrontendServiceAddColumnsResult) Field0DeepEqual(src *TAddColumnsResult return true } -type FrontendServiceInitExternalCtlMetaArgs struct { - Request *TInitExternalCtlMetaRequest `thrift:"request,1" frugal:"1,default,TInitExternalCtlMetaRequest" json:"request"` +type FrontendServiceReplacePartitionArgs struct { + Request *TReplacePartitionRequest `thrift:"request,1" frugal:"1,default,TReplacePartitionRequest" json:"request"` } -func NewFrontendServiceInitExternalCtlMetaArgs() *FrontendServiceInitExternalCtlMetaArgs { - return &FrontendServiceInitExternalCtlMetaArgs{} +func NewFrontendServiceReplacePartitionArgs() *FrontendServiceReplacePartitionArgs { + return &FrontendServiceReplacePartitionArgs{} } -func (p *FrontendServiceInitExternalCtlMetaArgs) InitDefault() { - *p = FrontendServiceInitExternalCtlMetaArgs{} +func (p *FrontendServiceReplacePartitionArgs) InitDefault() { } -var FrontendServiceInitExternalCtlMetaArgs_Request_DEFAULT *TInitExternalCtlMetaRequest +var FrontendServiceReplacePartitionArgs_Request_DEFAULT *TReplacePartitionRequest -func (p *FrontendServiceInitExternalCtlMetaArgs) GetRequest() (v *TInitExternalCtlMetaRequest) { +func (p *FrontendServiceReplacePartitionArgs) GetRequest() (v *TReplacePartitionRequest) { if !p.IsSetRequest() { - return FrontendServiceInitExternalCtlMetaArgs_Request_DEFAULT + return FrontendServiceReplacePartitionArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceInitExternalCtlMetaArgs) SetRequest(val *TInitExternalCtlMetaRequest) { +func (p *FrontendServiceReplacePartitionArgs) SetRequest(val *TReplacePartitionRequest) { p.Request = val } -var fieldIDToName_FrontendServiceInitExternalCtlMetaArgs = map[int16]string{ +var fieldIDToName_FrontendServiceReplacePartitionArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceInitExternalCtlMetaArgs) IsSetRequest() bool { +func (p *FrontendServiceReplacePartitionArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceInitExternalCtlMetaArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReplacePartitionArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -67673,17 +96887,14 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) Read(iprot thrift.TProtocol) (e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -67698,7 +96909,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReplacePartitionArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -67708,17 +96919,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTInitExternalCtlMetaRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceReplacePartitionArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTReplacePartitionRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceInitExternalCtlMetaArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReplacePartitionArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("initExternalCtlMeta_args"); err != nil { + if err = oprot.WriteStructBegin("replacePartition_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -67726,7 +96938,6 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) Write(oprot thrift.TProtocol) ( fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -67745,7 +96956,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReplacePartitionArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -67762,14 +96973,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaArgs) String() string { +func (p *FrontendServiceReplacePartitionArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceInitExternalCtlMetaArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceReplacePartitionArgs(%+v)", *p) + } -func (p *FrontendServiceInitExternalCtlMetaArgs) DeepEqual(ano *FrontendServiceInitExternalCtlMetaArgs) bool { +func (p *FrontendServiceReplacePartitionArgs) DeepEqual(ano *FrontendServiceReplacePartitionArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -67781,7 +96993,7 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) DeepEqual(ano *FrontendServiceI return true } -func (p *FrontendServiceInitExternalCtlMetaArgs) Field1DeepEqual(src *TInitExternalCtlMetaRequest) bool { +func (p *FrontendServiceReplacePartitionArgs) Field1DeepEqual(src *TReplacePartitionRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -67789,39 +97001,38 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) Field1DeepEqual(src *TInitExter return true } -type FrontendServiceInitExternalCtlMetaResult struct { - Success *TInitExternalCtlMetaResult_ `thrift:"success,0,optional" frugal:"0,optional,TInitExternalCtlMetaResult_" json:"success,omitempty"` +type FrontendServiceReplacePartitionResult struct { + Success *TReplacePartitionResult_ `thrift:"success,0,optional" frugal:"0,optional,TReplacePartitionResult_" json:"success,omitempty"` } -func NewFrontendServiceInitExternalCtlMetaResult() *FrontendServiceInitExternalCtlMetaResult { - return &FrontendServiceInitExternalCtlMetaResult{} +func NewFrontendServiceReplacePartitionResult() *FrontendServiceReplacePartitionResult { + return &FrontendServiceReplacePartitionResult{} } -func (p *FrontendServiceInitExternalCtlMetaResult) InitDefault() { - *p = FrontendServiceInitExternalCtlMetaResult{} +func (p *FrontendServiceReplacePartitionResult) InitDefault() { } -var FrontendServiceInitExternalCtlMetaResult_Success_DEFAULT *TInitExternalCtlMetaResult_ +var FrontendServiceReplacePartitionResult_Success_DEFAULT *TReplacePartitionResult_ -func (p *FrontendServiceInitExternalCtlMetaResult) GetSuccess() (v *TInitExternalCtlMetaResult_) { +func (p *FrontendServiceReplacePartitionResult) GetSuccess() (v *TReplacePartitionResult_) { if !p.IsSetSuccess() { - return FrontendServiceInitExternalCtlMetaResult_Success_DEFAULT + return FrontendServiceReplacePartitionResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceInitExternalCtlMetaResult) SetSuccess(x interface{}) { - p.Success = x.(*TInitExternalCtlMetaResult_) +func (p *FrontendServiceReplacePartitionResult) SetSuccess(x interface{}) { + p.Success = x.(*TReplacePartitionResult_) } -var fieldIDToName_FrontendServiceInitExternalCtlMetaResult = map[int16]string{ +var fieldIDToName_FrontendServiceReplacePartitionResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceInitExternalCtlMetaResult) IsSetSuccess() bool { +func (p *FrontendServiceReplacePartitionResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceInitExternalCtlMetaResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReplacePartitionResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -67845,17 +97056,14 @@ func (p *FrontendServiceInitExternalCtlMetaResult) Read(iprot thrift.TProtocol) if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -67870,7 +97078,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReplacePartitionResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -67880,17 +97088,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTInitExternalCtlMetaResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceReplacePartitionResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTReplacePartitionResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceInitExternalCtlMetaResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReplacePartitionResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("initExternalCtlMeta_result"); err != nil { + if err = oprot.WriteStructBegin("replacePartition_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -67898,7 +97107,6 @@ func (p *FrontendServiceInitExternalCtlMetaResult) Write(oprot thrift.TProtocol) fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -67917,7 +97125,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReplacePartitionResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -67936,14 +97144,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaResult) String() string { +func (p *FrontendServiceReplacePartitionResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceInitExternalCtlMetaResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceReplacePartitionResult(%+v)", *p) + } -func (p *FrontendServiceInitExternalCtlMetaResult) DeepEqual(ano *FrontendServiceInitExternalCtlMetaResult) bool { +func (p *FrontendServiceReplacePartitionResult) DeepEqual(ano *FrontendServiceReplacePartitionResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -67955,7 +97164,7 @@ func (p *FrontendServiceInitExternalCtlMetaResult) DeepEqual(ano *FrontendServic return true } -func (p *FrontendServiceInitExternalCtlMetaResult) Field0DeepEqual(src *TInitExternalCtlMetaResult_) bool { +func (p *FrontendServiceReplacePartitionResult) Field0DeepEqual(src *TReplacePartitionResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -67963,39 +97172,38 @@ func (p *FrontendServiceInitExternalCtlMetaResult) Field0DeepEqual(src *TInitExt return true } -type FrontendServiceFetchSchemaTableDataArgs struct { - Request *TFetchSchemaTableDataRequest `thrift:"request,1" frugal:"1,default,TFetchSchemaTableDataRequest" json:"request"` +type FrontendServiceGetMetaArgs struct { + Request *TGetMetaRequest `thrift:"request,1" frugal:"1,default,TGetMetaRequest" json:"request"` } -func NewFrontendServiceFetchSchemaTableDataArgs() *FrontendServiceFetchSchemaTableDataArgs { - return &FrontendServiceFetchSchemaTableDataArgs{} +func NewFrontendServiceGetMetaArgs() *FrontendServiceGetMetaArgs { + return &FrontendServiceGetMetaArgs{} } -func (p *FrontendServiceFetchSchemaTableDataArgs) InitDefault() { - *p = FrontendServiceFetchSchemaTableDataArgs{} +func (p *FrontendServiceGetMetaArgs) InitDefault() { } -var FrontendServiceFetchSchemaTableDataArgs_Request_DEFAULT *TFetchSchemaTableDataRequest +var FrontendServiceGetMetaArgs_Request_DEFAULT *TGetMetaRequest -func (p *FrontendServiceFetchSchemaTableDataArgs) GetRequest() (v *TFetchSchemaTableDataRequest) { +func (p *FrontendServiceGetMetaArgs) GetRequest() (v *TGetMetaRequest) { if !p.IsSetRequest() { - return FrontendServiceFetchSchemaTableDataArgs_Request_DEFAULT + return FrontendServiceGetMetaArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceFetchSchemaTableDataArgs) SetRequest(val *TFetchSchemaTableDataRequest) { +func (p *FrontendServiceGetMetaArgs) SetRequest(val *TGetMetaRequest) { p.Request = val } -var fieldIDToName_FrontendServiceFetchSchemaTableDataArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetMetaArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceFetchSchemaTableDataArgs) IsSetRequest() bool { +func (p *FrontendServiceGetMetaArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceFetchSchemaTableDataArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMetaArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68019,17 +97227,14 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) Read(iprot thrift.TProtocol) ( if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -68044,7 +97249,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMetaArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -68054,17 +97259,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTFetchSchemaTableDataRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetMetaArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetMetaRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceFetchSchemaTableDataArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMetaArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("fetchSchemaTableData_args"); err != nil { + if err = oprot.WriteStructBegin("getMeta_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -68072,7 +97278,6 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) Write(oprot thrift.TProtocol) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -68091,7 +97296,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMetaArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -68108,14 +97313,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataArgs) String() string { +func (p *FrontendServiceGetMetaArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceFetchSchemaTableDataArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetMetaArgs(%+v)", *p) + } -func (p *FrontendServiceFetchSchemaTableDataArgs) DeepEqual(ano *FrontendServiceFetchSchemaTableDataArgs) bool { +func (p *FrontendServiceGetMetaArgs) DeepEqual(ano *FrontendServiceGetMetaArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -68127,7 +97333,7 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) DeepEqual(ano *FrontendService return true } -func (p *FrontendServiceFetchSchemaTableDataArgs) Field1DeepEqual(src *TFetchSchemaTableDataRequest) bool { +func (p *FrontendServiceGetMetaArgs) Field1DeepEqual(src *TGetMetaRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -68135,39 +97341,38 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) Field1DeepEqual(src *TFetchSch return true } -type FrontendServiceFetchSchemaTableDataResult struct { - Success *TFetchSchemaTableDataResult_ `thrift:"success,0,optional" frugal:"0,optional,TFetchSchemaTableDataResult_" json:"success,omitempty"` +type FrontendServiceGetMetaResult struct { + Success *TGetMetaResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetMetaResult_" json:"success,omitempty"` } -func NewFrontendServiceFetchSchemaTableDataResult() *FrontendServiceFetchSchemaTableDataResult { - return &FrontendServiceFetchSchemaTableDataResult{} +func NewFrontendServiceGetMetaResult() *FrontendServiceGetMetaResult { + return &FrontendServiceGetMetaResult{} } -func (p *FrontendServiceFetchSchemaTableDataResult) InitDefault() { - *p = FrontendServiceFetchSchemaTableDataResult{} +func (p *FrontendServiceGetMetaResult) InitDefault() { } -var FrontendServiceFetchSchemaTableDataResult_Success_DEFAULT *TFetchSchemaTableDataResult_ +var FrontendServiceGetMetaResult_Success_DEFAULT *TGetMetaResult_ -func (p *FrontendServiceFetchSchemaTableDataResult) GetSuccess() (v *TFetchSchemaTableDataResult_) { +func (p *FrontendServiceGetMetaResult) GetSuccess() (v *TGetMetaResult_) { if !p.IsSetSuccess() { - return FrontendServiceFetchSchemaTableDataResult_Success_DEFAULT + return FrontendServiceGetMetaResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceFetchSchemaTableDataResult) SetSuccess(x interface{}) { - p.Success = x.(*TFetchSchemaTableDataResult_) +func (p *FrontendServiceGetMetaResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetMetaResult_) } -var fieldIDToName_FrontendServiceFetchSchemaTableDataResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetMetaResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceFetchSchemaTableDataResult) IsSetSuccess() bool { +func (p *FrontendServiceGetMetaResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceFetchSchemaTableDataResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMetaResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68191,17 +97396,14 @@ func (p *FrontendServiceFetchSchemaTableDataResult) Read(iprot thrift.TProtocol) if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -68216,7 +97418,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMetaResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -68226,17 +97428,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTFetchSchemaTableDataResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetMetaResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetMetaResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceFetchSchemaTableDataResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMetaResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("fetchSchemaTableData_result"); err != nil { + if err = oprot.WriteStructBegin("getMeta_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -68244,7 +97447,6 @@ func (p *FrontendServiceFetchSchemaTableDataResult) Write(oprot thrift.TProtocol fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -68263,7 +97465,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetMetaResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -68282,14 +97484,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataResult) String() string { +func (p *FrontendServiceGetMetaResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceFetchSchemaTableDataResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetMetaResult(%+v)", *p) + } -func (p *FrontendServiceFetchSchemaTableDataResult) DeepEqual(ano *FrontendServiceFetchSchemaTableDataResult) bool { +func (p *FrontendServiceGetMetaResult) DeepEqual(ano *FrontendServiceGetMetaResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -68301,7 +97504,7 @@ func (p *FrontendServiceFetchSchemaTableDataResult) DeepEqual(ano *FrontendServi return true } -func (p *FrontendServiceFetchSchemaTableDataResult) Field0DeepEqual(src *TFetchSchemaTableDataResult_) bool { +func (p *FrontendServiceGetMetaResult) Field0DeepEqual(src *TGetMetaResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -68309,20 +97512,38 @@ func (p *FrontendServiceFetchSchemaTableDataResult) Field0DeepEqual(src *TFetchS return true } -type FrontendServiceAcquireTokenArgs struct { +type FrontendServiceGetBackendMetaArgs struct { + Request *TGetBackendMetaRequest `thrift:"request,1" frugal:"1,default,TGetBackendMetaRequest" json:"request"` } -func NewFrontendServiceAcquireTokenArgs() *FrontendServiceAcquireTokenArgs { - return &FrontendServiceAcquireTokenArgs{} +func NewFrontendServiceGetBackendMetaArgs() *FrontendServiceGetBackendMetaArgs { + return &FrontendServiceGetBackendMetaArgs{} } -func (p *FrontendServiceAcquireTokenArgs) InitDefault() { - *p = FrontendServiceAcquireTokenArgs{} +func (p *FrontendServiceGetBackendMetaArgs) InitDefault() { } -var fieldIDToName_FrontendServiceAcquireTokenArgs = map[int16]string{} +var FrontendServiceGetBackendMetaArgs_Request_DEFAULT *TGetBackendMetaRequest -func (p *FrontendServiceAcquireTokenArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBackendMetaArgs) GetRequest() (v *TGetBackendMetaRequest) { + if !p.IsSetRequest() { + return FrontendServiceGetBackendMetaArgs_Request_DEFAULT + } + return p.Request +} +func (p *FrontendServiceGetBackendMetaArgs) SetRequest(val *TGetBackendMetaRequest) { + p.Request = val +} + +var fieldIDToName_FrontendServiceGetBackendMetaArgs = map[int16]string{ + 1: "request", +} + +func (p *FrontendServiceGetBackendMetaArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *FrontendServiceGetBackendMetaArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68339,10 +97560,21 @@ func (p *FrontendServiceAcquireTokenArgs) Read(iprot thrift.TProtocol) (err erro if fieldTypeId == thrift.STOP { break } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -68356,8 +97588,10 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBackendMetaArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -68365,12 +97599,25 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAcquireTokenArgs) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("acquireToken_args"); err != nil { +func (p *FrontendServiceGetBackendMetaArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetBackendMetaRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Request = _field + return nil +} + +func (p *FrontendServiceGetBackendMetaArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("getBackendMeta_args"); err != nil { goto WriteStructBeginError } if p != nil { - + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -68381,61 +97628,91 @@ func (p *FrontendServiceAcquireTokenArgs) Write(oprot thrift.TProtocol) (err err return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceAcquireTokenArgs) String() string { +func (p *FrontendServiceGetBackendMetaArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Request.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *FrontendServiceGetBackendMetaArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceAcquireTokenArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetBackendMetaArgs(%+v)", *p) + } -func (p *FrontendServiceAcquireTokenArgs) DeepEqual(ano *FrontendServiceAcquireTokenArgs) bool { +func (p *FrontendServiceGetBackendMetaArgs) DeepEqual(ano *FrontendServiceGetBackendMetaArgs) bool { if p == ano { return true } else if p == nil || ano == nil { return false } + if !p.Field1DeepEqual(ano.Request) { + return false + } return true } -type FrontendServiceAcquireTokenResult struct { - Success *TMySqlLoadAcquireTokenResult_ `thrift:"success,0,optional" frugal:"0,optional,TMySqlLoadAcquireTokenResult_" json:"success,omitempty"` +func (p *FrontendServiceGetBackendMetaArgs) Field1DeepEqual(src *TGetBackendMetaRequest) bool { + + if !p.Request.DeepEqual(src) { + return false + } + return true } -func NewFrontendServiceAcquireTokenResult() *FrontendServiceAcquireTokenResult { - return &FrontendServiceAcquireTokenResult{} +type FrontendServiceGetBackendMetaResult struct { + Success *TGetBackendMetaResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetBackendMetaResult_" json:"success,omitempty"` } -func (p *FrontendServiceAcquireTokenResult) InitDefault() { - *p = FrontendServiceAcquireTokenResult{} +func NewFrontendServiceGetBackendMetaResult() *FrontendServiceGetBackendMetaResult { + return &FrontendServiceGetBackendMetaResult{} } -var FrontendServiceAcquireTokenResult_Success_DEFAULT *TMySqlLoadAcquireTokenResult_ +func (p *FrontendServiceGetBackendMetaResult) InitDefault() { +} -func (p *FrontendServiceAcquireTokenResult) GetSuccess() (v *TMySqlLoadAcquireTokenResult_) { +var FrontendServiceGetBackendMetaResult_Success_DEFAULT *TGetBackendMetaResult_ + +func (p *FrontendServiceGetBackendMetaResult) GetSuccess() (v *TGetBackendMetaResult_) { if !p.IsSetSuccess() { - return FrontendServiceAcquireTokenResult_Success_DEFAULT + return FrontendServiceGetBackendMetaResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceAcquireTokenResult) SetSuccess(x interface{}) { - p.Success = x.(*TMySqlLoadAcquireTokenResult_) +func (p *FrontendServiceGetBackendMetaResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetBackendMetaResult_) } -var fieldIDToName_FrontendServiceAcquireTokenResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetBackendMetaResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceAcquireTokenResult) IsSetSuccess() bool { +func (p *FrontendServiceGetBackendMetaResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceAcquireTokenResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBackendMetaResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68459,17 +97736,14 @@ func (p *FrontendServiceAcquireTokenResult) Read(iprot thrift.TProtocol) (err er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -68484,7 +97758,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAcquireTokenResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBackendMetaResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -68494,17 +97768,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAcquireTokenResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTMySqlLoadAcquireTokenResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetBackendMetaResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetBackendMetaResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceAcquireTokenResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBackendMetaResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("acquireToken_result"); err != nil { + if err = oprot.WriteStructBegin("getBackendMeta_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -68512,7 +97787,6 @@ func (p *FrontendServiceAcquireTokenResult) Write(oprot thrift.TProtocol) (err e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -68531,7 +97805,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceAcquireTokenResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetBackendMetaResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -68550,14 +97824,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceAcquireTokenResult) String() string { +func (p *FrontendServiceGetBackendMetaResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceAcquireTokenResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetBackendMetaResult(%+v)", *p) + } -func (p *FrontendServiceAcquireTokenResult) DeepEqual(ano *FrontendServiceAcquireTokenResult) bool { +func (p *FrontendServiceGetBackendMetaResult) DeepEqual(ano *FrontendServiceGetBackendMetaResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -68569,7 +97844,7 @@ func (p *FrontendServiceAcquireTokenResult) DeepEqual(ano *FrontendServiceAcquir return true } -func (p *FrontendServiceAcquireTokenResult) Field0DeepEqual(src *TMySqlLoadAcquireTokenResult_) bool { +func (p *FrontendServiceGetBackendMetaResult) Field0DeepEqual(src *TGetBackendMetaResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -68577,39 +97852,38 @@ func (p *FrontendServiceAcquireTokenResult) Field0DeepEqual(src *TMySqlLoadAcqui return true } -type FrontendServiceConfirmUnusedRemoteFilesArgs struct { - Request *TConfirmUnusedRemoteFilesRequest `thrift:"request,1" frugal:"1,default,TConfirmUnusedRemoteFilesRequest" json:"request"` +type FrontendServiceGetColumnInfoArgs struct { + Request *TGetColumnInfoRequest `thrift:"request,1" frugal:"1,default,TGetColumnInfoRequest" json:"request"` } -func NewFrontendServiceConfirmUnusedRemoteFilesArgs() *FrontendServiceConfirmUnusedRemoteFilesArgs { - return &FrontendServiceConfirmUnusedRemoteFilesArgs{} +func NewFrontendServiceGetColumnInfoArgs() *FrontendServiceGetColumnInfoArgs { + return &FrontendServiceGetColumnInfoArgs{} } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) InitDefault() { - *p = FrontendServiceConfirmUnusedRemoteFilesArgs{} +func (p *FrontendServiceGetColumnInfoArgs) InitDefault() { } -var FrontendServiceConfirmUnusedRemoteFilesArgs_Request_DEFAULT *TConfirmUnusedRemoteFilesRequest +var FrontendServiceGetColumnInfoArgs_Request_DEFAULT *TGetColumnInfoRequest -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) GetRequest() (v *TConfirmUnusedRemoteFilesRequest) { +func (p *FrontendServiceGetColumnInfoArgs) GetRequest() (v *TGetColumnInfoRequest) { if !p.IsSetRequest() { - return FrontendServiceConfirmUnusedRemoteFilesArgs_Request_DEFAULT + return FrontendServiceGetColumnInfoArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) SetRequest(val *TConfirmUnusedRemoteFilesRequest) { +func (p *FrontendServiceGetColumnInfoArgs) SetRequest(val *TGetColumnInfoRequest) { p.Request = val } -var fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesArgs = map[int16]string{ +var fieldIDToName_FrontendServiceGetColumnInfoArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) IsSetRequest() bool { +func (p *FrontendServiceGetColumnInfoArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetColumnInfoArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68633,17 +97907,14 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Read(iprot thrift.TProtoco if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -68658,7 +97929,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetColumnInfoArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -68668,17 +97939,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTConfirmUnusedRemoteFilesRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceGetColumnInfoArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTGetColumnInfoRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetColumnInfoArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("confirmUnusedRemoteFiles_args"); err != nil { + if err = oprot.WriteStructBegin("getColumnInfo_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -68686,7 +97958,6 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Write(oprot thrift.TProtoc fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -68705,7 +97976,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetColumnInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -68722,14 +97993,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) String() string { +func (p *FrontendServiceGetColumnInfoArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceConfirmUnusedRemoteFilesArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetColumnInfoArgs(%+v)", *p) + } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) DeepEqual(ano *FrontendServiceConfirmUnusedRemoteFilesArgs) bool { +func (p *FrontendServiceGetColumnInfoArgs) DeepEqual(ano *FrontendServiceGetColumnInfoArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -68741,7 +98013,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) DeepEqual(ano *FrontendSer return true } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Field1DeepEqual(src *TConfirmUnusedRemoteFilesRequest) bool { +func (p *FrontendServiceGetColumnInfoArgs) Field1DeepEqual(src *TGetColumnInfoRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -68749,39 +98021,38 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) Field1DeepEqual(src *TConf return true } -type FrontendServiceConfirmUnusedRemoteFilesResult struct { - Success *TConfirmUnusedRemoteFilesResult_ `thrift:"success,0,optional" frugal:"0,optional,TConfirmUnusedRemoteFilesResult_" json:"success,omitempty"` +type FrontendServiceGetColumnInfoResult struct { + Success *TGetColumnInfoResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetColumnInfoResult_" json:"success,omitempty"` } -func NewFrontendServiceConfirmUnusedRemoteFilesResult() *FrontendServiceConfirmUnusedRemoteFilesResult { - return &FrontendServiceConfirmUnusedRemoteFilesResult{} +func NewFrontendServiceGetColumnInfoResult() *FrontendServiceGetColumnInfoResult { + return &FrontendServiceGetColumnInfoResult{} } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) InitDefault() { - *p = FrontendServiceConfirmUnusedRemoteFilesResult{} +func (p *FrontendServiceGetColumnInfoResult) InitDefault() { } -var FrontendServiceConfirmUnusedRemoteFilesResult_Success_DEFAULT *TConfirmUnusedRemoteFilesResult_ +var FrontendServiceGetColumnInfoResult_Success_DEFAULT *TGetColumnInfoResult_ -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) GetSuccess() (v *TConfirmUnusedRemoteFilesResult_) { +func (p *FrontendServiceGetColumnInfoResult) GetSuccess() (v *TGetColumnInfoResult_) { if !p.IsSetSuccess() { - return FrontendServiceConfirmUnusedRemoteFilesResult_Success_DEFAULT + return FrontendServiceGetColumnInfoResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) SetSuccess(x interface{}) { - p.Success = x.(*TConfirmUnusedRemoteFilesResult_) +func (p *FrontendServiceGetColumnInfoResult) SetSuccess(x interface{}) { + p.Success = x.(*TGetColumnInfoResult_) } -var fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesResult = map[int16]string{ +var fieldIDToName_FrontendServiceGetColumnInfoResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) IsSetSuccess() bool { +func (p *FrontendServiceGetColumnInfoResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetColumnInfoResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68805,17 +98076,14 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Read(iprot thrift.TProto if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -68830,7 +98098,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetColumnInfoResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -68840,17 +98108,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTConfirmUnusedRemoteFilesResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceGetColumnInfoResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTGetColumnInfoResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetColumnInfoResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("confirmUnusedRemoteFiles_result"); err != nil { + if err = oprot.WriteStructBegin("getColumnInfo_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -68858,7 +98127,6 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Write(oprot thrift.TProt fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -68877,7 +98145,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceGetColumnInfoResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -68896,14 +98164,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) String() string { +func (p *FrontendServiceGetColumnInfoResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceConfirmUnusedRemoteFilesResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceGetColumnInfoResult(%+v)", *p) + } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) DeepEqual(ano *FrontendServiceConfirmUnusedRemoteFilesResult) bool { +func (p *FrontendServiceGetColumnInfoResult) DeepEqual(ano *FrontendServiceGetColumnInfoResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -68915,7 +98184,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) DeepEqual(ano *FrontendS return true } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Field0DeepEqual(src *TConfirmUnusedRemoteFilesResult_) bool { +func (p *FrontendServiceGetColumnInfoResult) Field0DeepEqual(src *TGetColumnInfoResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -68923,39 +98192,38 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) Field0DeepEqual(src *TCo return true } -type FrontendServiceCheckAuthArgs struct { - Request *TCheckAuthRequest `thrift:"request,1" frugal:"1,default,TCheckAuthRequest" json:"request"` +type FrontendServiceInvalidateStatsCacheArgs struct { + Request *TInvalidateFollowerStatsCacheRequest `thrift:"request,1" frugal:"1,default,TInvalidateFollowerStatsCacheRequest" json:"request"` } -func NewFrontendServiceCheckAuthArgs() *FrontendServiceCheckAuthArgs { - return &FrontendServiceCheckAuthArgs{} +func NewFrontendServiceInvalidateStatsCacheArgs() *FrontendServiceInvalidateStatsCacheArgs { + return &FrontendServiceInvalidateStatsCacheArgs{} } -func (p *FrontendServiceCheckAuthArgs) InitDefault() { - *p = FrontendServiceCheckAuthArgs{} +func (p *FrontendServiceInvalidateStatsCacheArgs) InitDefault() { } -var FrontendServiceCheckAuthArgs_Request_DEFAULT *TCheckAuthRequest +var FrontendServiceInvalidateStatsCacheArgs_Request_DEFAULT *TInvalidateFollowerStatsCacheRequest -func (p *FrontendServiceCheckAuthArgs) GetRequest() (v *TCheckAuthRequest) { +func (p *FrontendServiceInvalidateStatsCacheArgs) GetRequest() (v *TInvalidateFollowerStatsCacheRequest) { if !p.IsSetRequest() { - return FrontendServiceCheckAuthArgs_Request_DEFAULT + return FrontendServiceInvalidateStatsCacheArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceCheckAuthArgs) SetRequest(val *TCheckAuthRequest) { +func (p *FrontendServiceInvalidateStatsCacheArgs) SetRequest(val *TInvalidateFollowerStatsCacheRequest) { p.Request = val } -var fieldIDToName_FrontendServiceCheckAuthArgs = map[int16]string{ +var fieldIDToName_FrontendServiceInvalidateStatsCacheArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceCheckAuthArgs) IsSetRequest() bool { +func (p *FrontendServiceInvalidateStatsCacheArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceCheckAuthArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInvalidateStatsCacheArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -68979,17 +98247,14 @@ func (p *FrontendServiceCheckAuthArgs) Read(iprot thrift.TProtocol) (err error) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -69004,7 +98269,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInvalidateStatsCacheArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -69014,17 +98279,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCheckAuthArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTCheckAuthRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceInvalidateStatsCacheArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTInvalidateFollowerStatsCacheRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceCheckAuthArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInvalidateStatsCacheArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("checkAuth_args"); err != nil { + if err = oprot.WriteStructBegin("invalidateStatsCache_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -69032,7 +98298,6 @@ func (p *FrontendServiceCheckAuthArgs) Write(oprot thrift.TProtocol) (err error) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -69051,7 +98316,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceCheckAuthArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInvalidateStatsCacheArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -69068,14 +98333,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceCheckAuthArgs) String() string { +func (p *FrontendServiceInvalidateStatsCacheArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceCheckAuthArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceInvalidateStatsCacheArgs(%+v)", *p) + } -func (p *FrontendServiceCheckAuthArgs) DeepEqual(ano *FrontendServiceCheckAuthArgs) bool { +func (p *FrontendServiceInvalidateStatsCacheArgs) DeepEqual(ano *FrontendServiceInvalidateStatsCacheArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -69087,7 +98353,7 @@ func (p *FrontendServiceCheckAuthArgs) DeepEqual(ano *FrontendServiceCheckAuthAr return true } -func (p *FrontendServiceCheckAuthArgs) Field1DeepEqual(src *TCheckAuthRequest) bool { +func (p *FrontendServiceInvalidateStatsCacheArgs) Field1DeepEqual(src *TInvalidateFollowerStatsCacheRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -69095,39 +98361,38 @@ func (p *FrontendServiceCheckAuthArgs) Field1DeepEqual(src *TCheckAuthRequest) b return true } -type FrontendServiceCheckAuthResult struct { - Success *TCheckAuthResult_ `thrift:"success,0,optional" frugal:"0,optional,TCheckAuthResult_" json:"success,omitempty"` +type FrontendServiceInvalidateStatsCacheResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewFrontendServiceCheckAuthResult() *FrontendServiceCheckAuthResult { - return &FrontendServiceCheckAuthResult{} +func NewFrontendServiceInvalidateStatsCacheResult() *FrontendServiceInvalidateStatsCacheResult { + return &FrontendServiceInvalidateStatsCacheResult{} } -func (p *FrontendServiceCheckAuthResult) InitDefault() { - *p = FrontendServiceCheckAuthResult{} +func (p *FrontendServiceInvalidateStatsCacheResult) InitDefault() { } -var FrontendServiceCheckAuthResult_Success_DEFAULT *TCheckAuthResult_ +var FrontendServiceInvalidateStatsCacheResult_Success_DEFAULT *status.TStatus -func (p *FrontendServiceCheckAuthResult) GetSuccess() (v *TCheckAuthResult_) { +func (p *FrontendServiceInvalidateStatsCacheResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return FrontendServiceCheckAuthResult_Success_DEFAULT + return FrontendServiceInvalidateStatsCacheResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceCheckAuthResult) SetSuccess(x interface{}) { - p.Success = x.(*TCheckAuthResult_) +func (p *FrontendServiceInvalidateStatsCacheResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_FrontendServiceCheckAuthResult = map[int16]string{ +var fieldIDToName_FrontendServiceInvalidateStatsCacheResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceCheckAuthResult) IsSetSuccess() bool { +func (p *FrontendServiceInvalidateStatsCacheResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceCheckAuthResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInvalidateStatsCacheResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -69151,17 +98416,14 @@ func (p *FrontendServiceCheckAuthResult) Read(iprot thrift.TProtocol) (err error if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -69176,7 +98438,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInvalidateStatsCacheResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -69186,17 +98448,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCheckAuthResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTCheckAuthResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceInvalidateStatsCacheResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceCheckAuthResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInvalidateStatsCacheResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("checkAuth_result"); err != nil { + if err = oprot.WriteStructBegin("invalidateStatsCache_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -69204,7 +98467,6 @@ func (p *FrontendServiceCheckAuthResult) Write(oprot thrift.TProtocol) (err erro fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -69223,7 +98485,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceCheckAuthResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceInvalidateStatsCacheResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -69242,14 +98504,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceCheckAuthResult) String() string { +func (p *FrontendServiceInvalidateStatsCacheResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceCheckAuthResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceInvalidateStatsCacheResult(%+v)", *p) + } -func (p *FrontendServiceCheckAuthResult) DeepEqual(ano *FrontendServiceCheckAuthResult) bool { +func (p *FrontendServiceInvalidateStatsCacheResult) DeepEqual(ano *FrontendServiceInvalidateStatsCacheResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -69261,7 +98524,7 @@ func (p *FrontendServiceCheckAuthResult) DeepEqual(ano *FrontendServiceCheckAuth return true } -func (p *FrontendServiceCheckAuthResult) Field0DeepEqual(src *TCheckAuthResult_) bool { +func (p *FrontendServiceInvalidateStatsCacheResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -69269,39 +98532,38 @@ func (p *FrontendServiceCheckAuthResult) Field0DeepEqual(src *TCheckAuthResult_) return true } -type FrontendServiceGetQueryStatsArgs struct { - Request *TGetQueryStatsRequest `thrift:"request,1" frugal:"1,default,TGetQueryStatsRequest" json:"request"` +type FrontendServiceShowProcessListArgs struct { + Request *TShowProcessListRequest `thrift:"request,1" frugal:"1,default,TShowProcessListRequest" json:"request"` } -func NewFrontendServiceGetQueryStatsArgs() *FrontendServiceGetQueryStatsArgs { - return &FrontendServiceGetQueryStatsArgs{} +func NewFrontendServiceShowProcessListArgs() *FrontendServiceShowProcessListArgs { + return &FrontendServiceShowProcessListArgs{} } -func (p *FrontendServiceGetQueryStatsArgs) InitDefault() { - *p = FrontendServiceGetQueryStatsArgs{} +func (p *FrontendServiceShowProcessListArgs) InitDefault() { } -var FrontendServiceGetQueryStatsArgs_Request_DEFAULT *TGetQueryStatsRequest +var FrontendServiceShowProcessListArgs_Request_DEFAULT *TShowProcessListRequest -func (p *FrontendServiceGetQueryStatsArgs) GetRequest() (v *TGetQueryStatsRequest) { +func (p *FrontendServiceShowProcessListArgs) GetRequest() (v *TShowProcessListRequest) { if !p.IsSetRequest() { - return FrontendServiceGetQueryStatsArgs_Request_DEFAULT + return FrontendServiceShowProcessListArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetQueryStatsArgs) SetRequest(val *TGetQueryStatsRequest) { +func (p *FrontendServiceShowProcessListArgs) SetRequest(val *TShowProcessListRequest) { p.Request = val } -var fieldIDToName_FrontendServiceGetQueryStatsArgs = map[int16]string{ +var fieldIDToName_FrontendServiceShowProcessListArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetQueryStatsArgs) IsSetRequest() bool { +func (p *FrontendServiceShowProcessListArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetQueryStatsArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowProcessListArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -69325,17 +98587,14 @@ func (p *FrontendServiceGetQueryStatsArgs) Read(iprot thrift.TProtocol) (err err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -69350,7 +98609,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowProcessListArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -69360,17 +98619,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTGetQueryStatsRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceShowProcessListArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTShowProcessListRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetQueryStatsArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowProcessListArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getQueryStats_args"); err != nil { + if err = oprot.WriteStructBegin("showProcessList_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -69378,7 +98638,6 @@ func (p *FrontendServiceGetQueryStatsArgs) Write(oprot thrift.TProtocol) (err er fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -69397,7 +98656,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowProcessListArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -69414,14 +98673,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsArgs) String() string { +func (p *FrontendServiceShowProcessListArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetQueryStatsArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceShowProcessListArgs(%+v)", *p) + } -func (p *FrontendServiceGetQueryStatsArgs) DeepEqual(ano *FrontendServiceGetQueryStatsArgs) bool { +func (p *FrontendServiceShowProcessListArgs) DeepEqual(ano *FrontendServiceShowProcessListArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -69433,7 +98693,7 @@ func (p *FrontendServiceGetQueryStatsArgs) DeepEqual(ano *FrontendServiceGetQuer return true } -func (p *FrontendServiceGetQueryStatsArgs) Field1DeepEqual(src *TGetQueryStatsRequest) bool { +func (p *FrontendServiceShowProcessListArgs) Field1DeepEqual(src *TShowProcessListRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -69441,39 +98701,38 @@ func (p *FrontendServiceGetQueryStatsArgs) Field1DeepEqual(src *TGetQueryStatsRe return true } -type FrontendServiceGetQueryStatsResult struct { - Success *TQueryStatsResult_ `thrift:"success,0,optional" frugal:"0,optional,TQueryStatsResult_" json:"success,omitempty"` +type FrontendServiceShowProcessListResult struct { + Success *TShowProcessListResult_ `thrift:"success,0,optional" frugal:"0,optional,TShowProcessListResult_" json:"success,omitempty"` } -func NewFrontendServiceGetQueryStatsResult() *FrontendServiceGetQueryStatsResult { - return &FrontendServiceGetQueryStatsResult{} +func NewFrontendServiceShowProcessListResult() *FrontendServiceShowProcessListResult { + return &FrontendServiceShowProcessListResult{} } -func (p *FrontendServiceGetQueryStatsResult) InitDefault() { - *p = FrontendServiceGetQueryStatsResult{} +func (p *FrontendServiceShowProcessListResult) InitDefault() { } -var FrontendServiceGetQueryStatsResult_Success_DEFAULT *TQueryStatsResult_ +var FrontendServiceShowProcessListResult_Success_DEFAULT *TShowProcessListResult_ -func (p *FrontendServiceGetQueryStatsResult) GetSuccess() (v *TQueryStatsResult_) { +func (p *FrontendServiceShowProcessListResult) GetSuccess() (v *TShowProcessListResult_) { if !p.IsSetSuccess() { - return FrontendServiceGetQueryStatsResult_Success_DEFAULT + return FrontendServiceShowProcessListResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetQueryStatsResult) SetSuccess(x interface{}) { - p.Success = x.(*TQueryStatsResult_) +func (p *FrontendServiceShowProcessListResult) SetSuccess(x interface{}) { + p.Success = x.(*TShowProcessListResult_) } -var fieldIDToName_FrontendServiceGetQueryStatsResult = map[int16]string{ +var fieldIDToName_FrontendServiceShowProcessListResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetQueryStatsResult) IsSetSuccess() bool { +func (p *FrontendServiceShowProcessListResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetQueryStatsResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowProcessListResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -69497,17 +98756,14 @@ func (p *FrontendServiceGetQueryStatsResult) Read(iprot thrift.TProtocol) (err e if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -69522,7 +98778,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowProcessListResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -69532,17 +98788,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTQueryStatsResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceShowProcessListResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTShowProcessListResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetQueryStatsResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowProcessListResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getQueryStats_result"); err != nil { + if err = oprot.WriteStructBegin("showProcessList_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -69550,7 +98807,6 @@ func (p *FrontendServiceGetQueryStatsResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -69569,7 +98825,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowProcessListResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -69588,14 +98844,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsResult) String() string { +func (p *FrontendServiceShowProcessListResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetQueryStatsResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceShowProcessListResult(%+v)", *p) + } -func (p *FrontendServiceGetQueryStatsResult) DeepEqual(ano *FrontendServiceGetQueryStatsResult) bool { +func (p *FrontendServiceShowProcessListResult) DeepEqual(ano *FrontendServiceShowProcessListResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -69607,7 +98864,7 @@ func (p *FrontendServiceGetQueryStatsResult) DeepEqual(ano *FrontendServiceGetQu return true } -func (p *FrontendServiceGetQueryStatsResult) Field0DeepEqual(src *TQueryStatsResult_) bool { +func (p *FrontendServiceShowProcessListResult) Field0DeepEqual(src *TShowProcessListResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -69615,39 +98872,38 @@ func (p *FrontendServiceGetQueryStatsResult) Field0DeepEqual(src *TQueryStatsRes return true } -type FrontendServiceGetTabletReplicaInfosArgs struct { - Request *TGetTabletReplicaInfosRequest `thrift:"request,1" frugal:"1,default,TGetTabletReplicaInfosRequest" json:"request"` +type FrontendServiceReportCommitTxnResultArgs struct { + Request *TReportCommitTxnResultRequest `thrift:"request,1" frugal:"1,default,TReportCommitTxnResultRequest" json:"request"` } -func NewFrontendServiceGetTabletReplicaInfosArgs() *FrontendServiceGetTabletReplicaInfosArgs { - return &FrontendServiceGetTabletReplicaInfosArgs{} +func NewFrontendServiceReportCommitTxnResultArgs() *FrontendServiceReportCommitTxnResultArgs { + return &FrontendServiceReportCommitTxnResultArgs{} } -func (p *FrontendServiceGetTabletReplicaInfosArgs) InitDefault() { - *p = FrontendServiceGetTabletReplicaInfosArgs{} +func (p *FrontendServiceReportCommitTxnResultArgs) InitDefault() { } -var FrontendServiceGetTabletReplicaInfosArgs_Request_DEFAULT *TGetTabletReplicaInfosRequest +var FrontendServiceReportCommitTxnResultArgs_Request_DEFAULT *TReportCommitTxnResultRequest -func (p *FrontendServiceGetTabletReplicaInfosArgs) GetRequest() (v *TGetTabletReplicaInfosRequest) { +func (p *FrontendServiceReportCommitTxnResultArgs) GetRequest() (v *TReportCommitTxnResultRequest) { if !p.IsSetRequest() { - return FrontendServiceGetTabletReplicaInfosArgs_Request_DEFAULT + return FrontendServiceReportCommitTxnResultArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetTabletReplicaInfosArgs) SetRequest(val *TGetTabletReplicaInfosRequest) { +func (p *FrontendServiceReportCommitTxnResultArgs) SetRequest(val *TReportCommitTxnResultRequest) { p.Request = val } -var fieldIDToName_FrontendServiceGetTabletReplicaInfosArgs = map[int16]string{ +var fieldIDToName_FrontendServiceReportCommitTxnResultArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetTabletReplicaInfosArgs) IsSetRequest() bool { +func (p *FrontendServiceReportCommitTxnResultArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetTabletReplicaInfosArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportCommitTxnResultArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -69671,17 +98927,14 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) Read(iprot thrift.TProtocol) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -69696,7 +98949,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportCommitTxnResultArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -69706,17 +98959,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTGetTabletReplicaInfosRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceReportCommitTxnResultArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTReportCommitTxnResultRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetTabletReplicaInfosArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportCommitTxnResultArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getTabletReplicaInfos_args"); err != nil { + if err = oprot.WriteStructBegin("reportCommitTxnResult_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -69724,7 +98978,6 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) Write(oprot thrift.TProtocol) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -69743,7 +98996,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportCommitTxnResultArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -69760,14 +99013,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosArgs) String() string { +func (p *FrontendServiceReportCommitTxnResultArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetTabletReplicaInfosArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceReportCommitTxnResultArgs(%+v)", *p) + } -func (p *FrontendServiceGetTabletReplicaInfosArgs) DeepEqual(ano *FrontendServiceGetTabletReplicaInfosArgs) bool { +func (p *FrontendServiceReportCommitTxnResultArgs) DeepEqual(ano *FrontendServiceReportCommitTxnResultArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -69779,7 +99033,7 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) DeepEqual(ano *FrontendServic return true } -func (p *FrontendServiceGetTabletReplicaInfosArgs) Field1DeepEqual(src *TGetTabletReplicaInfosRequest) bool { +func (p *FrontendServiceReportCommitTxnResultArgs) Field1DeepEqual(src *TReportCommitTxnResultRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -69787,39 +99041,38 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) Field1DeepEqual(src *TGetTabl return true } -type FrontendServiceGetTabletReplicaInfosResult struct { - Success *TGetTabletReplicaInfosResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetTabletReplicaInfosResult_" json:"success,omitempty"` +type FrontendServiceReportCommitTxnResultResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewFrontendServiceGetTabletReplicaInfosResult() *FrontendServiceGetTabletReplicaInfosResult { - return &FrontendServiceGetTabletReplicaInfosResult{} +func NewFrontendServiceReportCommitTxnResultResult() *FrontendServiceReportCommitTxnResultResult { + return &FrontendServiceReportCommitTxnResultResult{} } -func (p *FrontendServiceGetTabletReplicaInfosResult) InitDefault() { - *p = FrontendServiceGetTabletReplicaInfosResult{} +func (p *FrontendServiceReportCommitTxnResultResult) InitDefault() { } -var FrontendServiceGetTabletReplicaInfosResult_Success_DEFAULT *TGetTabletReplicaInfosResult_ +var FrontendServiceReportCommitTxnResultResult_Success_DEFAULT *status.TStatus -func (p *FrontendServiceGetTabletReplicaInfosResult) GetSuccess() (v *TGetTabletReplicaInfosResult_) { +func (p *FrontendServiceReportCommitTxnResultResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return FrontendServiceGetTabletReplicaInfosResult_Success_DEFAULT + return FrontendServiceReportCommitTxnResultResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetTabletReplicaInfosResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetTabletReplicaInfosResult_) +func (p *FrontendServiceReportCommitTxnResultResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_FrontendServiceGetTabletReplicaInfosResult = map[int16]string{ +var fieldIDToName_FrontendServiceReportCommitTxnResultResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetTabletReplicaInfosResult) IsSetSuccess() bool { +func (p *FrontendServiceReportCommitTxnResultResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetTabletReplicaInfosResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportCommitTxnResultResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -69843,17 +99096,14 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) Read(iprot thrift.TProtocol if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -69868,7 +99118,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportCommitTxnResultResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -69878,17 +99128,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetTabletReplicaInfosResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceReportCommitTxnResultResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetTabletReplicaInfosResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportCommitTxnResultResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getTabletReplicaInfos_result"); err != nil { + if err = oprot.WriteStructBegin("reportCommitTxnResult_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -69896,7 +99147,6 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) Write(oprot thrift.TProtoco fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -69915,7 +99165,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceReportCommitTxnResultResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -69934,14 +99184,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosResult) String() string { +func (p *FrontendServiceReportCommitTxnResultResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetTabletReplicaInfosResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceReportCommitTxnResultResult(%+v)", *p) + } -func (p *FrontendServiceGetTabletReplicaInfosResult) DeepEqual(ano *FrontendServiceGetTabletReplicaInfosResult) bool { +func (p *FrontendServiceReportCommitTxnResultResult) DeepEqual(ano *FrontendServiceReportCommitTxnResultResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -69953,7 +99204,7 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) DeepEqual(ano *FrontendServ return true } -func (p *FrontendServiceGetTabletReplicaInfosResult) Field0DeepEqual(src *TGetTabletReplicaInfosResult_) bool { +func (p *FrontendServiceReportCommitTxnResultResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -69961,39 +99212,38 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) Field0DeepEqual(src *TGetTa return true } -type FrontendServiceGetMasterTokenArgs struct { - Request *TGetMasterTokenRequest `thrift:"request,1" frugal:"1,default,TGetMasterTokenRequest" json:"request"` +type FrontendServiceShowUserArgs struct { + Request *TShowUserRequest `thrift:"request,1" frugal:"1,default,TShowUserRequest" json:"request"` } -func NewFrontendServiceGetMasterTokenArgs() *FrontendServiceGetMasterTokenArgs { - return &FrontendServiceGetMasterTokenArgs{} +func NewFrontendServiceShowUserArgs() *FrontendServiceShowUserArgs { + return &FrontendServiceShowUserArgs{} } -func (p *FrontendServiceGetMasterTokenArgs) InitDefault() { - *p = FrontendServiceGetMasterTokenArgs{} +func (p *FrontendServiceShowUserArgs) InitDefault() { } -var FrontendServiceGetMasterTokenArgs_Request_DEFAULT *TGetMasterTokenRequest +var FrontendServiceShowUserArgs_Request_DEFAULT *TShowUserRequest -func (p *FrontendServiceGetMasterTokenArgs) GetRequest() (v *TGetMasterTokenRequest) { +func (p *FrontendServiceShowUserArgs) GetRequest() (v *TShowUserRequest) { if !p.IsSetRequest() { - return FrontendServiceGetMasterTokenArgs_Request_DEFAULT + return FrontendServiceShowUserArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetMasterTokenArgs) SetRequest(val *TGetMasterTokenRequest) { +func (p *FrontendServiceShowUserArgs) SetRequest(val *TShowUserRequest) { p.Request = val } -var fieldIDToName_FrontendServiceGetMasterTokenArgs = map[int16]string{ +var fieldIDToName_FrontendServiceShowUserArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetMasterTokenArgs) IsSetRequest() bool { +func (p *FrontendServiceShowUserArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetMasterTokenArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowUserArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -70017,17 +99267,14 @@ func (p *FrontendServiceGetMasterTokenArgs) Read(iprot thrift.TProtocol) (err er if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -70042,7 +99289,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowUserArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -70052,17 +99299,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTGetMasterTokenRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceShowUserArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTShowUserRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetMasterTokenArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowUserArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getMasterToken_args"); err != nil { + if err = oprot.WriteStructBegin("showUser_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -70070,7 +99318,6 @@ func (p *FrontendServiceGetMasterTokenArgs) Write(oprot thrift.TProtocol) (err e fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70089,7 +99336,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowUserArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -70106,14 +99353,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenArgs) String() string { +func (p *FrontendServiceShowUserArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetMasterTokenArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceShowUserArgs(%+v)", *p) + } -func (p *FrontendServiceGetMasterTokenArgs) DeepEqual(ano *FrontendServiceGetMasterTokenArgs) bool { +func (p *FrontendServiceShowUserArgs) DeepEqual(ano *FrontendServiceShowUserArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -70125,7 +99373,7 @@ func (p *FrontendServiceGetMasterTokenArgs) DeepEqual(ano *FrontendServiceGetMas return true } -func (p *FrontendServiceGetMasterTokenArgs) Field1DeepEqual(src *TGetMasterTokenRequest) bool { +func (p *FrontendServiceShowUserArgs) Field1DeepEqual(src *TShowUserRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -70133,39 +99381,38 @@ func (p *FrontendServiceGetMasterTokenArgs) Field1DeepEqual(src *TGetMasterToken return true } -type FrontendServiceGetMasterTokenResult struct { - Success *TGetMasterTokenResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetMasterTokenResult_" json:"success,omitempty"` +type FrontendServiceShowUserResult struct { + Success *TShowUserResult_ `thrift:"success,0,optional" frugal:"0,optional,TShowUserResult_" json:"success,omitempty"` } -func NewFrontendServiceGetMasterTokenResult() *FrontendServiceGetMasterTokenResult { - return &FrontendServiceGetMasterTokenResult{} +func NewFrontendServiceShowUserResult() *FrontendServiceShowUserResult { + return &FrontendServiceShowUserResult{} } -func (p *FrontendServiceGetMasterTokenResult) InitDefault() { - *p = FrontendServiceGetMasterTokenResult{} +func (p *FrontendServiceShowUserResult) InitDefault() { } -var FrontendServiceGetMasterTokenResult_Success_DEFAULT *TGetMasterTokenResult_ +var FrontendServiceShowUserResult_Success_DEFAULT *TShowUserResult_ -func (p *FrontendServiceGetMasterTokenResult) GetSuccess() (v *TGetMasterTokenResult_) { +func (p *FrontendServiceShowUserResult) GetSuccess() (v *TShowUserResult_) { if !p.IsSetSuccess() { - return FrontendServiceGetMasterTokenResult_Success_DEFAULT + return FrontendServiceShowUserResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetMasterTokenResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetMasterTokenResult_) +func (p *FrontendServiceShowUserResult) SetSuccess(x interface{}) { + p.Success = x.(*TShowUserResult_) } -var fieldIDToName_FrontendServiceGetMasterTokenResult = map[int16]string{ +var fieldIDToName_FrontendServiceShowUserResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetMasterTokenResult) IsSetSuccess() bool { +func (p *FrontendServiceShowUserResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetMasterTokenResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowUserResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -70189,17 +99436,14 @@ func (p *FrontendServiceGetMasterTokenResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -70214,7 +99458,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowUserResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -70224,17 +99468,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetMasterTokenResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceShowUserResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTShowUserResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetMasterTokenResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowUserResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getMasterToken_result"); err != nil { + if err = oprot.WriteStructBegin("showUser_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -70242,7 +99487,6 @@ func (p *FrontendServiceGetMasterTokenResult) Write(oprot thrift.TProtocol) (err fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70261,7 +99505,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceShowUserResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -70280,14 +99524,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenResult) String() string { +func (p *FrontendServiceShowUserResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetMasterTokenResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceShowUserResult(%+v)", *p) + } -func (p *FrontendServiceGetMasterTokenResult) DeepEqual(ano *FrontendServiceGetMasterTokenResult) bool { +func (p *FrontendServiceShowUserResult) DeepEqual(ano *FrontendServiceShowUserResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -70299,7 +99544,7 @@ func (p *FrontendServiceGetMasterTokenResult) DeepEqual(ano *FrontendServiceGetM return true } -func (p *FrontendServiceGetMasterTokenResult) Field0DeepEqual(src *TGetMasterTokenResult_) bool { +func (p *FrontendServiceShowUserResult) Field0DeepEqual(src *TShowUserResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -70307,39 +99552,38 @@ func (p *FrontendServiceGetMasterTokenResult) Field0DeepEqual(src *TGetMasterTok return true } -type FrontendServiceGetBinlogLagArgs struct { - Request *TGetBinlogLagRequest `thrift:"request,1" frugal:"1,default,TGetBinlogRequest" json:"request"` +type FrontendServiceSyncQueryColumnsArgs struct { + Request *TSyncQueryColumns `thrift:"request,1" frugal:"1,default,TSyncQueryColumns" json:"request"` } -func NewFrontendServiceGetBinlogLagArgs() *FrontendServiceGetBinlogLagArgs { - return &FrontendServiceGetBinlogLagArgs{} +func NewFrontendServiceSyncQueryColumnsArgs() *FrontendServiceSyncQueryColumnsArgs { + return &FrontendServiceSyncQueryColumnsArgs{} } -func (p *FrontendServiceGetBinlogLagArgs) InitDefault() { - *p = FrontendServiceGetBinlogLagArgs{} +func (p *FrontendServiceSyncQueryColumnsArgs) InitDefault() { } -var FrontendServiceGetBinlogLagArgs_Request_DEFAULT *TGetBinlogLagRequest +var FrontendServiceSyncQueryColumnsArgs_Request_DEFAULT *TSyncQueryColumns -func (p *FrontendServiceGetBinlogLagArgs) GetRequest() (v *TGetBinlogLagRequest) { +func (p *FrontendServiceSyncQueryColumnsArgs) GetRequest() (v *TSyncQueryColumns) { if !p.IsSetRequest() { - return FrontendServiceGetBinlogLagArgs_Request_DEFAULT + return FrontendServiceSyncQueryColumnsArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetBinlogLagArgs) SetRequest(val *TGetBinlogLagRequest) { +func (p *FrontendServiceSyncQueryColumnsArgs) SetRequest(val *TSyncQueryColumns) { p.Request = val } -var fieldIDToName_FrontendServiceGetBinlogLagArgs = map[int16]string{ +var fieldIDToName_FrontendServiceSyncQueryColumnsArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetBinlogLagArgs) IsSetRequest() bool { +func (p *FrontendServiceSyncQueryColumnsArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetBinlogLagArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSyncQueryColumnsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -70363,17 +99607,14 @@ func (p *FrontendServiceGetBinlogLagArgs) Read(iprot thrift.TProtocol) (err erro if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -70388,7 +99629,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSyncQueryColumnsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -70398,17 +99639,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTGetBinlogLagRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceSyncQueryColumnsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTSyncQueryColumns() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetBinlogLagArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSyncQueryColumnsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getBinlogLag_args"); err != nil { + if err = oprot.WriteStructBegin("syncQueryColumns_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -70416,7 +99658,6 @@ func (p *FrontendServiceGetBinlogLagArgs) Write(oprot thrift.TProtocol) (err err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70435,7 +99676,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSyncQueryColumnsArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -70452,14 +99693,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagArgs) String() string { +func (p *FrontendServiceSyncQueryColumnsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetBinlogLagArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceSyncQueryColumnsArgs(%+v)", *p) + } -func (p *FrontendServiceGetBinlogLagArgs) DeepEqual(ano *FrontendServiceGetBinlogLagArgs) bool { +func (p *FrontendServiceSyncQueryColumnsArgs) DeepEqual(ano *FrontendServiceSyncQueryColumnsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -70471,7 +99713,7 @@ func (p *FrontendServiceGetBinlogLagArgs) DeepEqual(ano *FrontendServiceGetBinlo return true } -func (p *FrontendServiceGetBinlogLagArgs) Field1DeepEqual(src *TGetBinlogLagRequest) bool { +func (p *FrontendServiceSyncQueryColumnsArgs) Field1DeepEqual(src *TSyncQueryColumns) bool { if !p.Request.DeepEqual(src) { return false @@ -70479,39 +99721,38 @@ func (p *FrontendServiceGetBinlogLagArgs) Field1DeepEqual(src *TGetBinlogLagRequ return true } -type FrontendServiceGetBinlogLagResult struct { - Success *TGetBinlogLagResult_ `thrift:"success,0,optional" frugal:"0,optional,TGetBinlogLagResult_" json:"success,omitempty"` +type FrontendServiceSyncQueryColumnsResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewFrontendServiceGetBinlogLagResult() *FrontendServiceGetBinlogLagResult { - return &FrontendServiceGetBinlogLagResult{} +func NewFrontendServiceSyncQueryColumnsResult() *FrontendServiceSyncQueryColumnsResult { + return &FrontendServiceSyncQueryColumnsResult{} } -func (p *FrontendServiceGetBinlogLagResult) InitDefault() { - *p = FrontendServiceGetBinlogLagResult{} +func (p *FrontendServiceSyncQueryColumnsResult) InitDefault() { } -var FrontendServiceGetBinlogLagResult_Success_DEFAULT *TGetBinlogLagResult_ +var FrontendServiceSyncQueryColumnsResult_Success_DEFAULT *status.TStatus -func (p *FrontendServiceGetBinlogLagResult) GetSuccess() (v *TGetBinlogLagResult_) { +func (p *FrontendServiceSyncQueryColumnsResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return FrontendServiceGetBinlogLagResult_Success_DEFAULT + return FrontendServiceSyncQueryColumnsResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetBinlogLagResult) SetSuccess(x interface{}) { - p.Success = x.(*TGetBinlogLagResult_) +func (p *FrontendServiceSyncQueryColumnsResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_FrontendServiceGetBinlogLagResult = map[int16]string{ +var fieldIDToName_FrontendServiceSyncQueryColumnsResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetBinlogLagResult) IsSetSuccess() bool { +func (p *FrontendServiceSyncQueryColumnsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetBinlogLagResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSyncQueryColumnsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -70535,17 +99776,14 @@ func (p *FrontendServiceGetBinlogLagResult) Read(iprot thrift.TProtocol) (err er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -70560,7 +99798,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSyncQueryColumnsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -70570,17 +99808,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTGetBinlogLagResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceSyncQueryColumnsResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetBinlogLagResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSyncQueryColumnsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getBinlogLag_result"); err != nil { + if err = oprot.WriteStructBegin("syncQueryColumns_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -70588,7 +99827,6 @@ func (p *FrontendServiceGetBinlogLagResult) Write(oprot thrift.TProtocol) (err e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70607,7 +99845,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceSyncQueryColumnsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -70626,14 +99864,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagResult) String() string { +func (p *FrontendServiceSyncQueryColumnsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetBinlogLagResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceSyncQueryColumnsResult(%+v)", *p) + } -func (p *FrontendServiceGetBinlogLagResult) DeepEqual(ano *FrontendServiceGetBinlogLagResult) bool { +func (p *FrontendServiceSyncQueryColumnsResult) DeepEqual(ano *FrontendServiceSyncQueryColumnsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -70645,7 +99884,7 @@ func (p *FrontendServiceGetBinlogLagResult) DeepEqual(ano *FrontendServiceGetBin return true } -func (p *FrontendServiceGetBinlogLagResult) Field0DeepEqual(src *TGetBinlogLagResult_) bool { +func (p *FrontendServiceSyncQueryColumnsResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -70653,39 +99892,38 @@ func (p *FrontendServiceGetBinlogLagResult) Field0DeepEqual(src *TGetBinlogLagRe return true } -type FrontendServiceUpdateStatsCacheArgs struct { - Request *TUpdateFollowerStatsCacheRequest `thrift:"request,1" frugal:"1,default,TUpdateFollowerStatsCacheRequest" json:"request"` +type FrontendServiceFetchSplitBatchArgs struct { + Request *TFetchSplitBatchRequest `thrift:"request,1" frugal:"1,default,TFetchSplitBatchRequest" json:"request"` } -func NewFrontendServiceUpdateStatsCacheArgs() *FrontendServiceUpdateStatsCacheArgs { - return &FrontendServiceUpdateStatsCacheArgs{} +func NewFrontendServiceFetchSplitBatchArgs() *FrontendServiceFetchSplitBatchArgs { + return &FrontendServiceFetchSplitBatchArgs{} } -func (p *FrontendServiceUpdateStatsCacheArgs) InitDefault() { - *p = FrontendServiceUpdateStatsCacheArgs{} +func (p *FrontendServiceFetchSplitBatchArgs) InitDefault() { } -var FrontendServiceUpdateStatsCacheArgs_Request_DEFAULT *TUpdateFollowerStatsCacheRequest +var FrontendServiceFetchSplitBatchArgs_Request_DEFAULT *TFetchSplitBatchRequest -func (p *FrontendServiceUpdateStatsCacheArgs) GetRequest() (v *TUpdateFollowerStatsCacheRequest) { +func (p *FrontendServiceFetchSplitBatchArgs) GetRequest() (v *TFetchSplitBatchRequest) { if !p.IsSetRequest() { - return FrontendServiceUpdateStatsCacheArgs_Request_DEFAULT + return FrontendServiceFetchSplitBatchArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceUpdateStatsCacheArgs) SetRequest(val *TUpdateFollowerStatsCacheRequest) { +func (p *FrontendServiceFetchSplitBatchArgs) SetRequest(val *TFetchSplitBatchRequest) { p.Request = val } -var fieldIDToName_FrontendServiceUpdateStatsCacheArgs = map[int16]string{ +var fieldIDToName_FrontendServiceFetchSplitBatchArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceUpdateStatsCacheArgs) IsSetRequest() bool { +func (p *FrontendServiceFetchSplitBatchArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceUpdateStatsCacheArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSplitBatchArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -70709,17 +99947,14 @@ func (p *FrontendServiceUpdateStatsCacheArgs) Read(iprot thrift.TProtocol) (err if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -70734,7 +99969,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSplitBatchArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -70744,17 +99979,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTUpdateFollowerStatsCacheRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceFetchSplitBatchArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTFetchSplitBatchRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceUpdateStatsCacheArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSplitBatchArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("updateStatsCache_args"); err != nil { + if err = oprot.WriteStructBegin("fetchSplitBatch_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -70762,7 +99998,6 @@ func (p *FrontendServiceUpdateStatsCacheArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70781,7 +100016,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSplitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -70798,14 +100033,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheArgs) String() string { +func (p *FrontendServiceFetchSplitBatchArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceUpdateStatsCacheArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchSplitBatchArgs(%+v)", *p) + } -func (p *FrontendServiceUpdateStatsCacheArgs) DeepEqual(ano *FrontendServiceUpdateStatsCacheArgs) bool { +func (p *FrontendServiceFetchSplitBatchArgs) DeepEqual(ano *FrontendServiceFetchSplitBatchArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -70817,7 +100053,7 @@ func (p *FrontendServiceUpdateStatsCacheArgs) DeepEqual(ano *FrontendServiceUpda return true } -func (p *FrontendServiceUpdateStatsCacheArgs) Field1DeepEqual(src *TUpdateFollowerStatsCacheRequest) bool { +func (p *FrontendServiceFetchSplitBatchArgs) Field1DeepEqual(src *TFetchSplitBatchRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -70825,39 +100061,38 @@ func (p *FrontendServiceUpdateStatsCacheArgs) Field1DeepEqual(src *TUpdateFollow return true } -type FrontendServiceUpdateStatsCacheResult struct { - Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` +type FrontendServiceFetchSplitBatchResult struct { + Success *TFetchSplitBatchResult_ `thrift:"success,0,optional" frugal:"0,optional,TFetchSplitBatchResult_" json:"success,omitempty"` } -func NewFrontendServiceUpdateStatsCacheResult() *FrontendServiceUpdateStatsCacheResult { - return &FrontendServiceUpdateStatsCacheResult{} +func NewFrontendServiceFetchSplitBatchResult() *FrontendServiceFetchSplitBatchResult { + return &FrontendServiceFetchSplitBatchResult{} } -func (p *FrontendServiceUpdateStatsCacheResult) InitDefault() { - *p = FrontendServiceUpdateStatsCacheResult{} +func (p *FrontendServiceFetchSplitBatchResult) InitDefault() { } -var FrontendServiceUpdateStatsCacheResult_Success_DEFAULT *status.TStatus +var FrontendServiceFetchSplitBatchResult_Success_DEFAULT *TFetchSplitBatchResult_ -func (p *FrontendServiceUpdateStatsCacheResult) GetSuccess() (v *status.TStatus) { +func (p *FrontendServiceFetchSplitBatchResult) GetSuccess() (v *TFetchSplitBatchResult_) { if !p.IsSetSuccess() { - return FrontendServiceUpdateStatsCacheResult_Success_DEFAULT + return FrontendServiceFetchSplitBatchResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceUpdateStatsCacheResult) SetSuccess(x interface{}) { - p.Success = x.(*status.TStatus) +func (p *FrontendServiceFetchSplitBatchResult) SetSuccess(x interface{}) { + p.Success = x.(*TFetchSplitBatchResult_) } -var fieldIDToName_FrontendServiceUpdateStatsCacheResult = map[int16]string{ +var fieldIDToName_FrontendServiceFetchSplitBatchResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceUpdateStatsCacheResult) IsSetSuccess() bool { +func (p *FrontendServiceFetchSplitBatchResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceUpdateStatsCacheResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSplitBatchResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -70881,17 +100116,14 @@ func (p *FrontendServiceUpdateStatsCacheResult) Read(iprot thrift.TProtocol) (er if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -70906,7 +100138,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSplitBatchResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -70916,17 +100148,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = status.NewTStatus() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceFetchSplitBatchResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTFetchSplitBatchResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceUpdateStatsCacheResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSplitBatchResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("updateStatsCache_result"); err != nil { + if err = oprot.WriteStructBegin("fetchSplitBatch_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -70934,7 +100167,6 @@ func (p *FrontendServiceUpdateStatsCacheResult) Write(oprot thrift.TProtocol) (e fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70953,7 +100185,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchSplitBatchResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -70972,14 +100204,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheResult) String() string { +func (p *FrontendServiceFetchSplitBatchResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceUpdateStatsCacheResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchSplitBatchResult(%+v)", *p) + } -func (p *FrontendServiceUpdateStatsCacheResult) DeepEqual(ano *FrontendServiceUpdateStatsCacheResult) bool { +func (p *FrontendServiceFetchSplitBatchResult) DeepEqual(ano *FrontendServiceFetchSplitBatchResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -70991,7 +100224,7 @@ func (p *FrontendServiceUpdateStatsCacheResult) DeepEqual(ano *FrontendServiceUp return true } -func (p *FrontendServiceUpdateStatsCacheResult) Field0DeepEqual(src *status.TStatus) bool { +func (p *FrontendServiceFetchSplitBatchResult) Field0DeepEqual(src *TFetchSplitBatchResult_) bool { if !p.Success.DeepEqual(src) { return false @@ -70999,39 +100232,38 @@ func (p *FrontendServiceUpdateStatsCacheResult) Field0DeepEqual(src *status.TSta return true } -type FrontendServiceGetAutoIncrementRangeArgs struct { - Request *TAutoIncrementRangeRequest `thrift:"request,1" frugal:"1,default,TAutoIncrementRangeRequest" json:"request"` +type FrontendServiceUpdatePartitionStatsCacheArgs struct { + Request *TUpdateFollowerPartitionStatsCacheRequest `thrift:"request,1" frugal:"1,default,TUpdateFollowerPartitionStatsCacheRequest" json:"request"` } -func NewFrontendServiceGetAutoIncrementRangeArgs() *FrontendServiceGetAutoIncrementRangeArgs { - return &FrontendServiceGetAutoIncrementRangeArgs{} +func NewFrontendServiceUpdatePartitionStatsCacheArgs() *FrontendServiceUpdatePartitionStatsCacheArgs { + return &FrontendServiceUpdatePartitionStatsCacheArgs{} } -func (p *FrontendServiceGetAutoIncrementRangeArgs) InitDefault() { - *p = FrontendServiceGetAutoIncrementRangeArgs{} +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) InitDefault() { } -var FrontendServiceGetAutoIncrementRangeArgs_Request_DEFAULT *TAutoIncrementRangeRequest +var FrontendServiceUpdatePartitionStatsCacheArgs_Request_DEFAULT *TUpdateFollowerPartitionStatsCacheRequest -func (p *FrontendServiceGetAutoIncrementRangeArgs) GetRequest() (v *TAutoIncrementRangeRequest) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) GetRequest() (v *TUpdateFollowerPartitionStatsCacheRequest) { if !p.IsSetRequest() { - return FrontendServiceGetAutoIncrementRangeArgs_Request_DEFAULT + return FrontendServiceUpdatePartitionStatsCacheArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceGetAutoIncrementRangeArgs) SetRequest(val *TAutoIncrementRangeRequest) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) SetRequest(val *TUpdateFollowerPartitionStatsCacheRequest) { p.Request = val } -var fieldIDToName_FrontendServiceGetAutoIncrementRangeArgs = map[int16]string{ +var fieldIDToName_FrontendServiceUpdatePartitionStatsCacheArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceGetAutoIncrementRangeArgs) IsSetRequest() bool { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceGetAutoIncrementRangeArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -71055,17 +100287,14 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) Read(iprot thrift.TProtocol) if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -71080,7 +100309,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdatePartitionStatsCacheArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -71090,17 +100319,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTAutoIncrementRangeRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTUpdateFollowerPartitionStatsCacheRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceGetAutoIncrementRangeArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getAutoIncrementRange_args"); err != nil { + if err = oprot.WriteStructBegin("updatePartitionStatsCache_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -71108,7 +100338,6 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) Write(oprot thrift.TProtocol) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -71127,7 +100356,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -71144,14 +100373,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeArgs) String() string { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetAutoIncrementRangeArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceUpdatePartitionStatsCacheArgs(%+v)", *p) + } -func (p *FrontendServiceGetAutoIncrementRangeArgs) DeepEqual(ano *FrontendServiceGetAutoIncrementRangeArgs) bool { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) DeepEqual(ano *FrontendServiceUpdatePartitionStatsCacheArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -71163,7 +100393,7 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) DeepEqual(ano *FrontendServic return true } -func (p *FrontendServiceGetAutoIncrementRangeArgs) Field1DeepEqual(src *TAutoIncrementRangeRequest) bool { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) Field1DeepEqual(src *TUpdateFollowerPartitionStatsCacheRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -71171,39 +100401,38 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) Field1DeepEqual(src *TAutoInc return true } -type FrontendServiceGetAutoIncrementRangeResult struct { - Success *TAutoIncrementRangeResult_ `thrift:"success,0,optional" frugal:"0,optional,TAutoIncrementRangeResult_" json:"success,omitempty"` +type FrontendServiceUpdatePartitionStatsCacheResult struct { + Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"` } -func NewFrontendServiceGetAutoIncrementRangeResult() *FrontendServiceGetAutoIncrementRangeResult { - return &FrontendServiceGetAutoIncrementRangeResult{} +func NewFrontendServiceUpdatePartitionStatsCacheResult() *FrontendServiceUpdatePartitionStatsCacheResult { + return &FrontendServiceUpdatePartitionStatsCacheResult{} } -func (p *FrontendServiceGetAutoIncrementRangeResult) InitDefault() { - *p = FrontendServiceGetAutoIncrementRangeResult{} +func (p *FrontendServiceUpdatePartitionStatsCacheResult) InitDefault() { } -var FrontendServiceGetAutoIncrementRangeResult_Success_DEFAULT *TAutoIncrementRangeResult_ +var FrontendServiceUpdatePartitionStatsCacheResult_Success_DEFAULT *status.TStatus -func (p *FrontendServiceGetAutoIncrementRangeResult) GetSuccess() (v *TAutoIncrementRangeResult_) { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) GetSuccess() (v *status.TStatus) { if !p.IsSetSuccess() { - return FrontendServiceGetAutoIncrementRangeResult_Success_DEFAULT + return FrontendServiceUpdatePartitionStatsCacheResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceGetAutoIncrementRangeResult) SetSuccess(x interface{}) { - p.Success = x.(*TAutoIncrementRangeResult_) +func (p *FrontendServiceUpdatePartitionStatsCacheResult) SetSuccess(x interface{}) { + p.Success = x.(*status.TStatus) } -var fieldIDToName_FrontendServiceGetAutoIncrementRangeResult = map[int16]string{ +var fieldIDToName_FrontendServiceUpdatePartitionStatsCacheResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceGetAutoIncrementRangeResult) IsSetSuccess() bool { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceGetAutoIncrementRangeResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -71227,17 +100456,14 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) Read(iprot thrift.TProtocol if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -71252,7 +100478,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdatePartitionStatsCacheResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -71262,17 +100488,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTAutoIncrementRangeResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) ReadField0(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceGetAutoIncrementRangeResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("getAutoIncrementRange_result"); err != nil { + if err = oprot.WriteStructBegin("updatePartitionStatsCache_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -71280,7 +100507,6 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) Write(oprot thrift.TProtoco fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -71299,7 +100525,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -71318,14 +100544,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeResult) String() string { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceGetAutoIncrementRangeResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceUpdatePartitionStatsCacheResult(%+v)", *p) + } -func (p *FrontendServiceGetAutoIncrementRangeResult) DeepEqual(ano *FrontendServiceGetAutoIncrementRangeResult) bool { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) DeepEqual(ano *FrontendServiceUpdatePartitionStatsCacheResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -71337,7 +100564,7 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) DeepEqual(ano *FrontendServ return true } -func (p *FrontendServiceGetAutoIncrementRangeResult) Field0DeepEqual(src *TAutoIncrementRangeResult_) bool { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) Field0DeepEqual(src *status.TStatus) bool { if !p.Success.DeepEqual(src) { return false @@ -71345,39 +100572,38 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) Field0DeepEqual(src *TAutoI return true } -type FrontendServiceCreatePartitionArgs struct { - Request *TCreatePartitionRequest `thrift:"request,1" frugal:"1,default,TCreatePartitionRequest" json:"request"` +type FrontendServiceFetchRunningQueriesArgs struct { + Request *TFetchRunningQueriesRequest `thrift:"request,1" frugal:"1,default,TFetchRunningQueriesRequest" json:"request"` } -func NewFrontendServiceCreatePartitionArgs() *FrontendServiceCreatePartitionArgs { - return &FrontendServiceCreatePartitionArgs{} +func NewFrontendServiceFetchRunningQueriesArgs() *FrontendServiceFetchRunningQueriesArgs { + return &FrontendServiceFetchRunningQueriesArgs{} } -func (p *FrontendServiceCreatePartitionArgs) InitDefault() { - *p = FrontendServiceCreatePartitionArgs{} +func (p *FrontendServiceFetchRunningQueriesArgs) InitDefault() { } -var FrontendServiceCreatePartitionArgs_Request_DEFAULT *TCreatePartitionRequest +var FrontendServiceFetchRunningQueriesArgs_Request_DEFAULT *TFetchRunningQueriesRequest -func (p *FrontendServiceCreatePartitionArgs) GetRequest() (v *TCreatePartitionRequest) { +func (p *FrontendServiceFetchRunningQueriesArgs) GetRequest() (v *TFetchRunningQueriesRequest) { if !p.IsSetRequest() { - return FrontendServiceCreatePartitionArgs_Request_DEFAULT + return FrontendServiceFetchRunningQueriesArgs_Request_DEFAULT } return p.Request } -func (p *FrontendServiceCreatePartitionArgs) SetRequest(val *TCreatePartitionRequest) { +func (p *FrontendServiceFetchRunningQueriesArgs) SetRequest(val *TFetchRunningQueriesRequest) { p.Request = val } -var fieldIDToName_FrontendServiceCreatePartitionArgs = map[int16]string{ +var fieldIDToName_FrontendServiceFetchRunningQueriesArgs = map[int16]string{ 1: "request", } -func (p *FrontendServiceCreatePartitionArgs) IsSetRequest() bool { +func (p *FrontendServiceFetchRunningQueriesArgs) IsSetRequest() bool { return p.Request != nil } -func (p *FrontendServiceCreatePartitionArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchRunningQueriesArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -71401,17 +100627,14 @@ func (p *FrontendServiceCreatePartitionArgs) Read(iprot thrift.TProtocol) (err e if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -71426,7 +100649,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchRunningQueriesArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -71436,17 +100659,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCreatePartitionArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = NewTCreatePartitionRequest() - if err := p.Request.Read(iprot); err != nil { +func (p *FrontendServiceFetchRunningQueriesArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTFetchRunningQueriesRequest() + if err := _field.Read(iprot); err != nil { return err } + p.Request = _field return nil } -func (p *FrontendServiceCreatePartitionArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchRunningQueriesArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("createPartition_args"); err != nil { + if err = oprot.WriteStructBegin("fetchRunningQueries_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -71454,7 +100678,6 @@ func (p *FrontendServiceCreatePartitionArgs) Write(oprot thrift.TProtocol) (err fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -71473,7 +100696,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceCreatePartitionArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchRunningQueriesArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -71490,14 +100713,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *FrontendServiceCreatePartitionArgs) String() string { +func (p *FrontendServiceFetchRunningQueriesArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceCreatePartitionArgs(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchRunningQueriesArgs(%+v)", *p) + } -func (p *FrontendServiceCreatePartitionArgs) DeepEqual(ano *FrontendServiceCreatePartitionArgs) bool { +func (p *FrontendServiceFetchRunningQueriesArgs) DeepEqual(ano *FrontendServiceFetchRunningQueriesArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -71509,7 +100733,7 @@ func (p *FrontendServiceCreatePartitionArgs) DeepEqual(ano *FrontendServiceCreat return true } -func (p *FrontendServiceCreatePartitionArgs) Field1DeepEqual(src *TCreatePartitionRequest) bool { +func (p *FrontendServiceFetchRunningQueriesArgs) Field1DeepEqual(src *TFetchRunningQueriesRequest) bool { if !p.Request.DeepEqual(src) { return false @@ -71517,39 +100741,38 @@ func (p *FrontendServiceCreatePartitionArgs) Field1DeepEqual(src *TCreatePartiti return true } -type FrontendServiceCreatePartitionResult struct { - Success *TCreatePartitionResult_ `thrift:"success,0,optional" frugal:"0,optional,TCreatePartitionResult_" json:"success,omitempty"` +type FrontendServiceFetchRunningQueriesResult struct { + Success *TFetchRunningQueriesResult_ `thrift:"success,0,optional" frugal:"0,optional,TFetchRunningQueriesResult_" json:"success,omitempty"` } -func NewFrontendServiceCreatePartitionResult() *FrontendServiceCreatePartitionResult { - return &FrontendServiceCreatePartitionResult{} +func NewFrontendServiceFetchRunningQueriesResult() *FrontendServiceFetchRunningQueriesResult { + return &FrontendServiceFetchRunningQueriesResult{} } -func (p *FrontendServiceCreatePartitionResult) InitDefault() { - *p = FrontendServiceCreatePartitionResult{} +func (p *FrontendServiceFetchRunningQueriesResult) InitDefault() { } -var FrontendServiceCreatePartitionResult_Success_DEFAULT *TCreatePartitionResult_ +var FrontendServiceFetchRunningQueriesResult_Success_DEFAULT *TFetchRunningQueriesResult_ -func (p *FrontendServiceCreatePartitionResult) GetSuccess() (v *TCreatePartitionResult_) { +func (p *FrontendServiceFetchRunningQueriesResult) GetSuccess() (v *TFetchRunningQueriesResult_) { if !p.IsSetSuccess() { - return FrontendServiceCreatePartitionResult_Success_DEFAULT + return FrontendServiceFetchRunningQueriesResult_Success_DEFAULT } return p.Success } -func (p *FrontendServiceCreatePartitionResult) SetSuccess(x interface{}) { - p.Success = x.(*TCreatePartitionResult_) +func (p *FrontendServiceFetchRunningQueriesResult) SetSuccess(x interface{}) { + p.Success = x.(*TFetchRunningQueriesResult_) } -var fieldIDToName_FrontendServiceCreatePartitionResult = map[int16]string{ +var fieldIDToName_FrontendServiceFetchRunningQueriesResult = map[int16]string{ 0: "success", } -func (p *FrontendServiceCreatePartitionResult) IsSetSuccess() bool { +func (p *FrontendServiceFetchRunningQueriesResult) IsSetSuccess() bool { return p.Success != nil } -func (p *FrontendServiceCreatePartitionResult) Read(iprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchRunningQueriesResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -71573,17 +100796,14 @@ func (p *FrontendServiceCreatePartitionResult) Read(iprot thrift.TProtocol) (err if err = p.ReadField0(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -71598,7 +100818,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchRunningQueriesResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -71608,17 +100828,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCreatePartitionResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = NewTCreatePartitionResult_() - if err := p.Success.Read(iprot); err != nil { +func (p *FrontendServiceFetchRunningQueriesResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTFetchRunningQueriesResult_() + if err := _field.Read(iprot); err != nil { return err } + p.Success = _field return nil } -func (p *FrontendServiceCreatePartitionResult) Write(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchRunningQueriesResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("createPartition_result"); err != nil { + if err = oprot.WriteStructBegin("fetchRunningQueries_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -71626,7 +100847,6 @@ func (p *FrontendServiceCreatePartitionResult) Write(oprot thrift.TProtocol) (er fieldId = 0 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -71645,7 +100865,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *FrontendServiceCreatePartitionResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *FrontendServiceFetchRunningQueriesResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -71664,14 +100884,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *FrontendServiceCreatePartitionResult) String() string { +func (p *FrontendServiceFetchRunningQueriesResult) String() string { if p == nil { return "" } - return fmt.Sprintf("FrontendServiceCreatePartitionResult(%+v)", *p) + return fmt.Sprintf("FrontendServiceFetchRunningQueriesResult(%+v)", *p) + } -func (p *FrontendServiceCreatePartitionResult) DeepEqual(ano *FrontendServiceCreatePartitionResult) bool { +func (p *FrontendServiceFetchRunningQueriesResult) DeepEqual(ano *FrontendServiceFetchRunningQueriesResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -71683,7 +100904,7 @@ func (p *FrontendServiceCreatePartitionResult) DeepEqual(ano *FrontendServiceCre return true } -func (p *FrontendServiceCreatePartitionResult) Field0DeepEqual(src *TCreatePartitionResult_) bool { +func (p *FrontendServiceFetchRunningQueriesResult) Field0DeepEqual(src *TFetchRunningQueriesResult_) bool { if !p.Success.DeepEqual(src) { return false diff --git a/pkg/rpc/kitex_gen/frontendservice/frontendservice/client.go b/pkg/rpc/kitex_gen/frontendservice/frontendservice/client.go index b738569b..92a89cb2 100644 --- a/pkg/rpc/kitex_gen/frontendservice/frontendservice/client.go +++ b/pkg/rpc/kitex_gen/frontendservice/frontendservice/client.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package frontendservice @@ -45,19 +45,35 @@ type Client interface { StreamLoadMultiTablePut(ctx context.Context, request *frontendservice.TStreamLoadPutRequest, callOptions ...callopt.Option) (r *frontendservice.TStreamLoadMultiTablePutResult_, err error) SnapshotLoaderReport(ctx context.Context, request *frontendservice.TSnapshotLoaderReportRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) Ping(ctx context.Context, request *frontendservice.TFrontendPingFrontendRequest, callOptions ...callopt.Option) (r *frontendservice.TFrontendPingFrontendResult_, err error) - AddColumns(ctx context.Context, request *frontendservice.TAddColumnsRequest, callOptions ...callopt.Option) (r *frontendservice.TAddColumnsResult_, err error) InitExternalCtlMeta(ctx context.Context, request *frontendservice.TInitExternalCtlMetaRequest, callOptions ...callopt.Option) (r *frontendservice.TInitExternalCtlMetaResult_, err error) FetchSchemaTableData(ctx context.Context, request *frontendservice.TFetchSchemaTableDataRequest, callOptions ...callopt.Option) (r *frontendservice.TFetchSchemaTableDataResult_, err error) AcquireToken(ctx context.Context, callOptions ...callopt.Option) (r *frontendservice.TMySqlLoadAcquireTokenResult_, err error) + CheckToken(ctx context.Context, token string, callOptions ...callopt.Option) (r bool, err error) ConfirmUnusedRemoteFiles(ctx context.Context, request *frontendservice.TConfirmUnusedRemoteFilesRequest, callOptions ...callopt.Option) (r *frontendservice.TConfirmUnusedRemoteFilesResult_, err error) CheckAuth(ctx context.Context, request *frontendservice.TCheckAuthRequest, callOptions ...callopt.Option) (r *frontendservice.TCheckAuthResult_, err error) GetQueryStats(ctx context.Context, request *frontendservice.TGetQueryStatsRequest, callOptions ...callopt.Option) (r *frontendservice.TQueryStatsResult_, err error) GetTabletReplicaInfos(ctx context.Context, request *frontendservice.TGetTabletReplicaInfosRequest, callOptions ...callopt.Option) (r *frontendservice.TGetTabletReplicaInfosResult_, err error) + AddPlsqlStoredProcedure(ctx context.Context, request *frontendservice.TAddPlsqlStoredProcedureRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlStoredProcedureResult_, err error) + DropPlsqlStoredProcedure(ctx context.Context, request *frontendservice.TDropPlsqlStoredProcedureRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlStoredProcedureResult_, err error) + AddPlsqlPackage(ctx context.Context, request *frontendservice.TAddPlsqlPackageRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlPackageResult_, err error) + DropPlsqlPackage(ctx context.Context, request *frontendservice.TDropPlsqlPackageRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlPackageResult_, err error) GetMasterToken(ctx context.Context, request *frontendservice.TGetMasterTokenRequest, callOptions ...callopt.Option) (r *frontendservice.TGetMasterTokenResult_, err error) GetBinlogLag(ctx context.Context, request *frontendservice.TGetBinlogLagRequest, callOptions ...callopt.Option) (r *frontendservice.TGetBinlogLagResult_, err error) UpdateStatsCache(ctx context.Context, request *frontendservice.TUpdateFollowerStatsCacheRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) GetAutoIncrementRange(ctx context.Context, request *frontendservice.TAutoIncrementRangeRequest, callOptions ...callopt.Option) (r *frontendservice.TAutoIncrementRangeResult_, err error) CreatePartition(ctx context.Context, request *frontendservice.TCreatePartitionRequest, callOptions ...callopt.Option) (r *frontendservice.TCreatePartitionResult_, err error) + ReplacePartition(ctx context.Context, request *frontendservice.TReplacePartitionRequest, callOptions ...callopt.Option) (r *frontendservice.TReplacePartitionResult_, err error) + GetMeta(ctx context.Context, request *frontendservice.TGetMetaRequest, callOptions ...callopt.Option) (r *frontendservice.TGetMetaResult_, err error) + GetBackendMeta(ctx context.Context, request *frontendservice.TGetBackendMetaRequest, callOptions ...callopt.Option) (r *frontendservice.TGetBackendMetaResult_, err error) + GetColumnInfo(ctx context.Context, request *frontendservice.TGetColumnInfoRequest, callOptions ...callopt.Option) (r *frontendservice.TGetColumnInfoResult_, err error) + InvalidateStatsCache(ctx context.Context, request *frontendservice.TInvalidateFollowerStatsCacheRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) + ShowProcessList(ctx context.Context, request *frontendservice.TShowProcessListRequest, callOptions ...callopt.Option) (r *frontendservice.TShowProcessListResult_, err error) + ReportCommitTxnResult_(ctx context.Context, request *frontendservice.TReportCommitTxnResultRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) + ShowUser(ctx context.Context, request *frontendservice.TShowUserRequest, callOptions ...callopt.Option) (r *frontendservice.TShowUserResult_, err error) + SyncQueryColumns(ctx context.Context, request *frontendservice.TSyncQueryColumns, callOptions ...callopt.Option) (r *status.TStatus, err error) + FetchSplitBatch(ctx context.Context, request *frontendservice.TFetchSplitBatchRequest, callOptions ...callopt.Option) (r *frontendservice.TFetchSplitBatchResult_, err error) + UpdatePartitionStatsCache(ctx context.Context, request *frontendservice.TUpdateFollowerPartitionStatsCacheRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) + FetchRunningQueries(ctx context.Context, request *frontendservice.TFetchRunningQueriesRequest, callOptions ...callopt.Option) (r *frontendservice.TFetchRunningQueriesResult_, err error) } // NewClient creates a client for the service defined in IDL. @@ -249,11 +265,6 @@ func (p *kFrontendServiceClient) Ping(ctx context.Context, request *frontendserv return p.kClient.Ping(ctx, request) } -func (p *kFrontendServiceClient) AddColumns(ctx context.Context, request *frontendservice.TAddColumnsRequest, callOptions ...callopt.Option) (r *frontendservice.TAddColumnsResult_, err error) { - ctx = client.NewCtxWithCallOptions(ctx, callOptions) - return p.kClient.AddColumns(ctx, request) -} - func (p *kFrontendServiceClient) InitExternalCtlMeta(ctx context.Context, request *frontendservice.TInitExternalCtlMetaRequest, callOptions ...callopt.Option) (r *frontendservice.TInitExternalCtlMetaResult_, err error) { ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.InitExternalCtlMeta(ctx, request) @@ -269,6 +280,11 @@ func (p *kFrontendServiceClient) AcquireToken(ctx context.Context, callOptions . return p.kClient.AcquireToken(ctx) } +func (p *kFrontendServiceClient) CheckToken(ctx context.Context, token string, callOptions ...callopt.Option) (r bool, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CheckToken(ctx, token) +} + func (p *kFrontendServiceClient) ConfirmUnusedRemoteFiles(ctx context.Context, request *frontendservice.TConfirmUnusedRemoteFilesRequest, callOptions ...callopt.Option) (r *frontendservice.TConfirmUnusedRemoteFilesResult_, err error) { ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.ConfirmUnusedRemoteFiles(ctx, request) @@ -289,6 +305,26 @@ func (p *kFrontendServiceClient) GetTabletReplicaInfos(ctx context.Context, requ return p.kClient.GetTabletReplicaInfos(ctx, request) } +func (p *kFrontendServiceClient) AddPlsqlStoredProcedure(ctx context.Context, request *frontendservice.TAddPlsqlStoredProcedureRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlStoredProcedureResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.AddPlsqlStoredProcedure(ctx, request) +} + +func (p *kFrontendServiceClient) DropPlsqlStoredProcedure(ctx context.Context, request *frontendservice.TDropPlsqlStoredProcedureRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlStoredProcedureResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.DropPlsqlStoredProcedure(ctx, request) +} + +func (p *kFrontendServiceClient) AddPlsqlPackage(ctx context.Context, request *frontendservice.TAddPlsqlPackageRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlPackageResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.AddPlsqlPackage(ctx, request) +} + +func (p *kFrontendServiceClient) DropPlsqlPackage(ctx context.Context, request *frontendservice.TDropPlsqlPackageRequest, callOptions ...callopt.Option) (r *frontendservice.TPlsqlPackageResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.DropPlsqlPackage(ctx, request) +} + func (p *kFrontendServiceClient) GetMasterToken(ctx context.Context, request *frontendservice.TGetMasterTokenRequest, callOptions ...callopt.Option) (r *frontendservice.TGetMasterTokenResult_, err error) { ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.GetMasterToken(ctx, request) @@ -313,3 +349,63 @@ func (p *kFrontendServiceClient) CreatePartition(ctx context.Context, request *f ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.CreatePartition(ctx, request) } + +func (p *kFrontendServiceClient) ReplacePartition(ctx context.Context, request *frontendservice.TReplacePartitionRequest, callOptions ...callopt.Option) (r *frontendservice.TReplacePartitionResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ReplacePartition(ctx, request) +} + +func (p *kFrontendServiceClient) GetMeta(ctx context.Context, request *frontendservice.TGetMetaRequest, callOptions ...callopt.Option) (r *frontendservice.TGetMetaResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetMeta(ctx, request) +} + +func (p *kFrontendServiceClient) GetBackendMeta(ctx context.Context, request *frontendservice.TGetBackendMetaRequest, callOptions ...callopt.Option) (r *frontendservice.TGetBackendMetaResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetBackendMeta(ctx, request) +} + +func (p *kFrontendServiceClient) GetColumnInfo(ctx context.Context, request *frontendservice.TGetColumnInfoRequest, callOptions ...callopt.Option) (r *frontendservice.TGetColumnInfoResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetColumnInfo(ctx, request) +} + +func (p *kFrontendServiceClient) InvalidateStatsCache(ctx context.Context, request *frontendservice.TInvalidateFollowerStatsCacheRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.InvalidateStatsCache(ctx, request) +} + +func (p *kFrontendServiceClient) ShowProcessList(ctx context.Context, request *frontendservice.TShowProcessListRequest, callOptions ...callopt.Option) (r *frontendservice.TShowProcessListResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ShowProcessList(ctx, request) +} + +func (p *kFrontendServiceClient) ReportCommitTxnResult_(ctx context.Context, request *frontendservice.TReportCommitTxnResultRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ReportCommitTxnResult_(ctx, request) +} + +func (p *kFrontendServiceClient) ShowUser(ctx context.Context, request *frontendservice.TShowUserRequest, callOptions ...callopt.Option) (r *frontendservice.TShowUserResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ShowUser(ctx, request) +} + +func (p *kFrontendServiceClient) SyncQueryColumns(ctx context.Context, request *frontendservice.TSyncQueryColumns, callOptions ...callopt.Option) (r *status.TStatus, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.SyncQueryColumns(ctx, request) +} + +func (p *kFrontendServiceClient) FetchSplitBatch(ctx context.Context, request *frontendservice.TFetchSplitBatchRequest, callOptions ...callopt.Option) (r *frontendservice.TFetchSplitBatchResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.FetchSplitBatch(ctx, request) +} + +func (p *kFrontendServiceClient) UpdatePartitionStatsCache(ctx context.Context, request *frontendservice.TUpdateFollowerPartitionStatsCacheRequest, callOptions ...callopt.Option) (r *status.TStatus, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.UpdatePartitionStatsCache(ctx, request) +} + +func (p *kFrontendServiceClient) FetchRunningQueries(ctx context.Context, request *frontendservice.TFetchRunningQueriesRequest, callOptions ...callopt.Option) (r *frontendservice.TFetchRunningQueriesResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.FetchRunningQueries(ctx, request) +} diff --git a/pkg/rpc/kitex_gen/frontendservice/frontendservice/frontendservice.go b/pkg/rpc/kitex_gen/frontendservice/frontendservice/frontendservice.go index 96090774..e50e899d 100644 --- a/pkg/rpc/kitex_gen/frontendservice/frontendservice/frontendservice.go +++ b/pkg/rpc/kitex_gen/frontendservice/frontendservice/frontendservice.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package frontendservice @@ -53,29 +53,46 @@ func NewServiceInfo() *kitex.ServiceInfo { "streamLoadMultiTablePut": kitex.NewMethodInfo(streamLoadMultiTablePutHandler, newFrontendServiceStreamLoadMultiTablePutArgs, newFrontendServiceStreamLoadMultiTablePutResult, false), "snapshotLoaderReport": kitex.NewMethodInfo(snapshotLoaderReportHandler, newFrontendServiceSnapshotLoaderReportArgs, newFrontendServiceSnapshotLoaderReportResult, false), "ping": kitex.NewMethodInfo(pingHandler, newFrontendServicePingArgs, newFrontendServicePingResult, false), - "addColumns": kitex.NewMethodInfo(addColumnsHandler, newFrontendServiceAddColumnsArgs, newFrontendServiceAddColumnsResult, false), "initExternalCtlMeta": kitex.NewMethodInfo(initExternalCtlMetaHandler, newFrontendServiceInitExternalCtlMetaArgs, newFrontendServiceInitExternalCtlMetaResult, false), "fetchSchemaTableData": kitex.NewMethodInfo(fetchSchemaTableDataHandler, newFrontendServiceFetchSchemaTableDataArgs, newFrontendServiceFetchSchemaTableDataResult, false), "acquireToken": kitex.NewMethodInfo(acquireTokenHandler, newFrontendServiceAcquireTokenArgs, newFrontendServiceAcquireTokenResult, false), + "checkToken": kitex.NewMethodInfo(checkTokenHandler, newFrontendServiceCheckTokenArgs, newFrontendServiceCheckTokenResult, false), "confirmUnusedRemoteFiles": kitex.NewMethodInfo(confirmUnusedRemoteFilesHandler, newFrontendServiceConfirmUnusedRemoteFilesArgs, newFrontendServiceConfirmUnusedRemoteFilesResult, false), "checkAuth": kitex.NewMethodInfo(checkAuthHandler, newFrontendServiceCheckAuthArgs, newFrontendServiceCheckAuthResult, false), "getQueryStats": kitex.NewMethodInfo(getQueryStatsHandler, newFrontendServiceGetQueryStatsArgs, newFrontendServiceGetQueryStatsResult, false), "getTabletReplicaInfos": kitex.NewMethodInfo(getTabletReplicaInfosHandler, newFrontendServiceGetTabletReplicaInfosArgs, newFrontendServiceGetTabletReplicaInfosResult, false), + "addPlsqlStoredProcedure": kitex.NewMethodInfo(addPlsqlStoredProcedureHandler, newFrontendServiceAddPlsqlStoredProcedureArgs, newFrontendServiceAddPlsqlStoredProcedureResult, false), + "dropPlsqlStoredProcedure": kitex.NewMethodInfo(dropPlsqlStoredProcedureHandler, newFrontendServiceDropPlsqlStoredProcedureArgs, newFrontendServiceDropPlsqlStoredProcedureResult, false), + "addPlsqlPackage": kitex.NewMethodInfo(addPlsqlPackageHandler, newFrontendServiceAddPlsqlPackageArgs, newFrontendServiceAddPlsqlPackageResult, false), + "dropPlsqlPackage": kitex.NewMethodInfo(dropPlsqlPackageHandler, newFrontendServiceDropPlsqlPackageArgs, newFrontendServiceDropPlsqlPackageResult, false), "getMasterToken": kitex.NewMethodInfo(getMasterTokenHandler, newFrontendServiceGetMasterTokenArgs, newFrontendServiceGetMasterTokenResult, false), "getBinlogLag": kitex.NewMethodInfo(getBinlogLagHandler, newFrontendServiceGetBinlogLagArgs, newFrontendServiceGetBinlogLagResult, false), "updateStatsCache": kitex.NewMethodInfo(updateStatsCacheHandler, newFrontendServiceUpdateStatsCacheArgs, newFrontendServiceUpdateStatsCacheResult, false), "getAutoIncrementRange": kitex.NewMethodInfo(getAutoIncrementRangeHandler, newFrontendServiceGetAutoIncrementRangeArgs, newFrontendServiceGetAutoIncrementRangeResult, false), "createPartition": kitex.NewMethodInfo(createPartitionHandler, newFrontendServiceCreatePartitionArgs, newFrontendServiceCreatePartitionResult, false), + "replacePartition": kitex.NewMethodInfo(replacePartitionHandler, newFrontendServiceReplacePartitionArgs, newFrontendServiceReplacePartitionResult, false), + "getMeta": kitex.NewMethodInfo(getMetaHandler, newFrontendServiceGetMetaArgs, newFrontendServiceGetMetaResult, false), + "getBackendMeta": kitex.NewMethodInfo(getBackendMetaHandler, newFrontendServiceGetBackendMetaArgs, newFrontendServiceGetBackendMetaResult, false), + "getColumnInfo": kitex.NewMethodInfo(getColumnInfoHandler, newFrontendServiceGetColumnInfoArgs, newFrontendServiceGetColumnInfoResult, false), + "invalidateStatsCache": kitex.NewMethodInfo(invalidateStatsCacheHandler, newFrontendServiceInvalidateStatsCacheArgs, newFrontendServiceInvalidateStatsCacheResult, false), + "showProcessList": kitex.NewMethodInfo(showProcessListHandler, newFrontendServiceShowProcessListArgs, newFrontendServiceShowProcessListResult, false), + "reportCommitTxnResult": kitex.NewMethodInfo(reportCommitTxnResult_Handler, newFrontendServiceReportCommitTxnResultArgs, newFrontendServiceReportCommitTxnResultResult, false), + "showUser": kitex.NewMethodInfo(showUserHandler, newFrontendServiceShowUserArgs, newFrontendServiceShowUserResult, false), + "syncQueryColumns": kitex.NewMethodInfo(syncQueryColumnsHandler, newFrontendServiceSyncQueryColumnsArgs, newFrontendServiceSyncQueryColumnsResult, false), + "fetchSplitBatch": kitex.NewMethodInfo(fetchSplitBatchHandler, newFrontendServiceFetchSplitBatchArgs, newFrontendServiceFetchSplitBatchResult, false), + "updatePartitionStatsCache": kitex.NewMethodInfo(updatePartitionStatsCacheHandler, newFrontendServiceUpdatePartitionStatsCacheArgs, newFrontendServiceUpdatePartitionStatsCacheResult, false), + "fetchRunningQueries": kitex.NewMethodInfo(fetchRunningQueriesHandler, newFrontendServiceFetchRunningQueriesArgs, newFrontendServiceFetchRunningQueriesResult, false), } extra := map[string]interface{}{ - "PackageName": "frontendservice", + "PackageName": "frontendservice", + "ServiceFilePath": `thrift/FrontendService.thrift`, } svcInfo := &kitex.ServiceInfo{ ServiceName: serviceName, HandlerType: handlerType, Methods: methods, PayloadCodec: kitex.Thrift, - KiteXGenVersion: "v0.4.4", + KiteXGenVersion: "v0.8.0", Extra: extra, } return svcInfo @@ -657,24 +674,6 @@ func newFrontendServicePingResult() interface{} { return frontendservice.NewFrontendServicePingResult() } -func addColumnsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { - realArg := arg.(*frontendservice.FrontendServiceAddColumnsArgs) - realResult := result.(*frontendservice.FrontendServiceAddColumnsResult) - success, err := handler.(frontendservice.FrontendService).AddColumns(ctx, realArg.Request) - if err != nil { - return err - } - realResult.Success = success - return nil -} -func newFrontendServiceAddColumnsArgs() interface{} { - return frontendservice.NewFrontendServiceAddColumnsArgs() -} - -func newFrontendServiceAddColumnsResult() interface{} { - return frontendservice.NewFrontendServiceAddColumnsResult() -} - func initExternalCtlMetaHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { realArg := arg.(*frontendservice.FrontendServiceInitExternalCtlMetaArgs) realResult := result.(*frontendservice.FrontendServiceInitExternalCtlMetaResult) @@ -729,6 +728,24 @@ func newFrontendServiceAcquireTokenResult() interface{} { return frontendservice.NewFrontendServiceAcquireTokenResult() } +func checkTokenHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceCheckTokenArgs) + realResult := result.(*frontendservice.FrontendServiceCheckTokenResult) + success, err := handler.(frontendservice.FrontendService).CheckToken(ctx, realArg.Token) + if err != nil { + return err + } + realResult.Success = &success + return nil +} +func newFrontendServiceCheckTokenArgs() interface{} { + return frontendservice.NewFrontendServiceCheckTokenArgs() +} + +func newFrontendServiceCheckTokenResult() interface{} { + return frontendservice.NewFrontendServiceCheckTokenResult() +} + func confirmUnusedRemoteFilesHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { realArg := arg.(*frontendservice.FrontendServiceConfirmUnusedRemoteFilesArgs) realResult := result.(*frontendservice.FrontendServiceConfirmUnusedRemoteFilesResult) @@ -801,6 +818,78 @@ func newFrontendServiceGetTabletReplicaInfosResult() interface{} { return frontendservice.NewFrontendServiceGetTabletReplicaInfosResult() } +func addPlsqlStoredProcedureHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceAddPlsqlStoredProcedureArgs) + realResult := result.(*frontendservice.FrontendServiceAddPlsqlStoredProcedureResult) + success, err := handler.(frontendservice.FrontendService).AddPlsqlStoredProcedure(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceAddPlsqlStoredProcedureArgs() interface{} { + return frontendservice.NewFrontendServiceAddPlsqlStoredProcedureArgs() +} + +func newFrontendServiceAddPlsqlStoredProcedureResult() interface{} { + return frontendservice.NewFrontendServiceAddPlsqlStoredProcedureResult() +} + +func dropPlsqlStoredProcedureHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceDropPlsqlStoredProcedureArgs) + realResult := result.(*frontendservice.FrontendServiceDropPlsqlStoredProcedureResult) + success, err := handler.(frontendservice.FrontendService).DropPlsqlStoredProcedure(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceDropPlsqlStoredProcedureArgs() interface{} { + return frontendservice.NewFrontendServiceDropPlsqlStoredProcedureArgs() +} + +func newFrontendServiceDropPlsqlStoredProcedureResult() interface{} { + return frontendservice.NewFrontendServiceDropPlsqlStoredProcedureResult() +} + +func addPlsqlPackageHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceAddPlsqlPackageArgs) + realResult := result.(*frontendservice.FrontendServiceAddPlsqlPackageResult) + success, err := handler.(frontendservice.FrontendService).AddPlsqlPackage(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceAddPlsqlPackageArgs() interface{} { + return frontendservice.NewFrontendServiceAddPlsqlPackageArgs() +} + +func newFrontendServiceAddPlsqlPackageResult() interface{} { + return frontendservice.NewFrontendServiceAddPlsqlPackageResult() +} + +func dropPlsqlPackageHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceDropPlsqlPackageArgs) + realResult := result.(*frontendservice.FrontendServiceDropPlsqlPackageResult) + success, err := handler.(frontendservice.FrontendService).DropPlsqlPackage(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceDropPlsqlPackageArgs() interface{} { + return frontendservice.NewFrontendServiceDropPlsqlPackageArgs() +} + +func newFrontendServiceDropPlsqlPackageResult() interface{} { + return frontendservice.NewFrontendServiceDropPlsqlPackageResult() +} + func getMasterTokenHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { realArg := arg.(*frontendservice.FrontendServiceGetMasterTokenArgs) realResult := result.(*frontendservice.FrontendServiceGetMasterTokenResult) @@ -891,6 +980,222 @@ func newFrontendServiceCreatePartitionResult() interface{} { return frontendservice.NewFrontendServiceCreatePartitionResult() } +func replacePartitionHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceReplacePartitionArgs) + realResult := result.(*frontendservice.FrontendServiceReplacePartitionResult) + success, err := handler.(frontendservice.FrontendService).ReplacePartition(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceReplacePartitionArgs() interface{} { + return frontendservice.NewFrontendServiceReplacePartitionArgs() +} + +func newFrontendServiceReplacePartitionResult() interface{} { + return frontendservice.NewFrontendServiceReplacePartitionResult() +} + +func getMetaHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceGetMetaArgs) + realResult := result.(*frontendservice.FrontendServiceGetMetaResult) + success, err := handler.(frontendservice.FrontendService).GetMeta(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceGetMetaArgs() interface{} { + return frontendservice.NewFrontendServiceGetMetaArgs() +} + +func newFrontendServiceGetMetaResult() interface{} { + return frontendservice.NewFrontendServiceGetMetaResult() +} + +func getBackendMetaHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceGetBackendMetaArgs) + realResult := result.(*frontendservice.FrontendServiceGetBackendMetaResult) + success, err := handler.(frontendservice.FrontendService).GetBackendMeta(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceGetBackendMetaArgs() interface{} { + return frontendservice.NewFrontendServiceGetBackendMetaArgs() +} + +func newFrontendServiceGetBackendMetaResult() interface{} { + return frontendservice.NewFrontendServiceGetBackendMetaResult() +} + +func getColumnInfoHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceGetColumnInfoArgs) + realResult := result.(*frontendservice.FrontendServiceGetColumnInfoResult) + success, err := handler.(frontendservice.FrontendService).GetColumnInfo(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceGetColumnInfoArgs() interface{} { + return frontendservice.NewFrontendServiceGetColumnInfoArgs() +} + +func newFrontendServiceGetColumnInfoResult() interface{} { + return frontendservice.NewFrontendServiceGetColumnInfoResult() +} + +func invalidateStatsCacheHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceInvalidateStatsCacheArgs) + realResult := result.(*frontendservice.FrontendServiceInvalidateStatsCacheResult) + success, err := handler.(frontendservice.FrontendService).InvalidateStatsCache(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceInvalidateStatsCacheArgs() interface{} { + return frontendservice.NewFrontendServiceInvalidateStatsCacheArgs() +} + +func newFrontendServiceInvalidateStatsCacheResult() interface{} { + return frontendservice.NewFrontendServiceInvalidateStatsCacheResult() +} + +func showProcessListHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceShowProcessListArgs) + realResult := result.(*frontendservice.FrontendServiceShowProcessListResult) + success, err := handler.(frontendservice.FrontendService).ShowProcessList(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceShowProcessListArgs() interface{} { + return frontendservice.NewFrontendServiceShowProcessListArgs() +} + +func newFrontendServiceShowProcessListResult() interface{} { + return frontendservice.NewFrontendServiceShowProcessListResult() +} + +func reportCommitTxnResult_Handler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceReportCommitTxnResultArgs) + realResult := result.(*frontendservice.FrontendServiceReportCommitTxnResultResult) + success, err := handler.(frontendservice.FrontendService).ReportCommitTxnResult_(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceReportCommitTxnResultArgs() interface{} { + return frontendservice.NewFrontendServiceReportCommitTxnResultArgs() +} + +func newFrontendServiceReportCommitTxnResultResult() interface{} { + return frontendservice.NewFrontendServiceReportCommitTxnResultResult() +} + +func showUserHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceShowUserArgs) + realResult := result.(*frontendservice.FrontendServiceShowUserResult) + success, err := handler.(frontendservice.FrontendService).ShowUser(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceShowUserArgs() interface{} { + return frontendservice.NewFrontendServiceShowUserArgs() +} + +func newFrontendServiceShowUserResult() interface{} { + return frontendservice.NewFrontendServiceShowUserResult() +} + +func syncQueryColumnsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceSyncQueryColumnsArgs) + realResult := result.(*frontendservice.FrontendServiceSyncQueryColumnsResult) + success, err := handler.(frontendservice.FrontendService).SyncQueryColumns(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceSyncQueryColumnsArgs() interface{} { + return frontendservice.NewFrontendServiceSyncQueryColumnsArgs() +} + +func newFrontendServiceSyncQueryColumnsResult() interface{} { + return frontendservice.NewFrontendServiceSyncQueryColumnsResult() +} + +func fetchSplitBatchHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceFetchSplitBatchArgs) + realResult := result.(*frontendservice.FrontendServiceFetchSplitBatchResult) + success, err := handler.(frontendservice.FrontendService).FetchSplitBatch(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceFetchSplitBatchArgs() interface{} { + return frontendservice.NewFrontendServiceFetchSplitBatchArgs() +} + +func newFrontendServiceFetchSplitBatchResult() interface{} { + return frontendservice.NewFrontendServiceFetchSplitBatchResult() +} + +func updatePartitionStatsCacheHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceUpdatePartitionStatsCacheArgs) + realResult := result.(*frontendservice.FrontendServiceUpdatePartitionStatsCacheResult) + success, err := handler.(frontendservice.FrontendService).UpdatePartitionStatsCache(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceUpdatePartitionStatsCacheArgs() interface{} { + return frontendservice.NewFrontendServiceUpdatePartitionStatsCacheArgs() +} + +func newFrontendServiceUpdatePartitionStatsCacheResult() interface{} { + return frontendservice.NewFrontendServiceUpdatePartitionStatsCacheResult() +} + +func fetchRunningQueriesHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*frontendservice.FrontendServiceFetchRunningQueriesArgs) + realResult := result.(*frontendservice.FrontendServiceFetchRunningQueriesResult) + success, err := handler.(frontendservice.FrontendService).FetchRunningQueries(ctx, realArg.Request) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newFrontendServiceFetchRunningQueriesArgs() interface{} { + return frontendservice.NewFrontendServiceFetchRunningQueriesArgs() +} + +func newFrontendServiceFetchRunningQueriesResult() interface{} { + return frontendservice.NewFrontendServiceFetchRunningQueriesResult() +} + type kClient struct { c client.Client } @@ -1220,16 +1525,6 @@ func (p *kClient) Ping(ctx context.Context, request *frontendservice.TFrontendPi return _result.GetSuccess(), nil } -func (p *kClient) AddColumns(ctx context.Context, request *frontendservice.TAddColumnsRequest) (r *frontendservice.TAddColumnsResult_, err error) { - var _args frontendservice.FrontendServiceAddColumnsArgs - _args.Request = request - var _result frontendservice.FrontendServiceAddColumnsResult - if err = p.c.Call(ctx, "addColumns", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil -} - func (p *kClient) InitExternalCtlMeta(ctx context.Context, request *frontendservice.TInitExternalCtlMetaRequest) (r *frontendservice.TInitExternalCtlMetaResult_, err error) { var _args frontendservice.FrontendServiceInitExternalCtlMetaArgs _args.Request = request @@ -1259,6 +1554,16 @@ func (p *kClient) AcquireToken(ctx context.Context) (r *frontendservice.TMySqlLo return _result.GetSuccess(), nil } +func (p *kClient) CheckToken(ctx context.Context, token string) (r bool, err error) { + var _args frontendservice.FrontendServiceCheckTokenArgs + _args.Token = token + var _result frontendservice.FrontendServiceCheckTokenResult + if err = p.c.Call(ctx, "checkToken", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + func (p *kClient) ConfirmUnusedRemoteFiles(ctx context.Context, request *frontendservice.TConfirmUnusedRemoteFilesRequest) (r *frontendservice.TConfirmUnusedRemoteFilesResult_, err error) { var _args frontendservice.FrontendServiceConfirmUnusedRemoteFilesArgs _args.Request = request @@ -1299,6 +1604,46 @@ func (p *kClient) GetTabletReplicaInfos(ctx context.Context, request *frontendse return _result.GetSuccess(), nil } +func (p *kClient) AddPlsqlStoredProcedure(ctx context.Context, request *frontendservice.TAddPlsqlStoredProcedureRequest) (r *frontendservice.TPlsqlStoredProcedureResult_, err error) { + var _args frontendservice.FrontendServiceAddPlsqlStoredProcedureArgs + _args.Request = request + var _result frontendservice.FrontendServiceAddPlsqlStoredProcedureResult + if err = p.c.Call(ctx, "addPlsqlStoredProcedure", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) DropPlsqlStoredProcedure(ctx context.Context, request *frontendservice.TDropPlsqlStoredProcedureRequest) (r *frontendservice.TPlsqlStoredProcedureResult_, err error) { + var _args frontendservice.FrontendServiceDropPlsqlStoredProcedureArgs + _args.Request = request + var _result frontendservice.FrontendServiceDropPlsqlStoredProcedureResult + if err = p.c.Call(ctx, "dropPlsqlStoredProcedure", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) AddPlsqlPackage(ctx context.Context, request *frontendservice.TAddPlsqlPackageRequest) (r *frontendservice.TPlsqlPackageResult_, err error) { + var _args frontendservice.FrontendServiceAddPlsqlPackageArgs + _args.Request = request + var _result frontendservice.FrontendServiceAddPlsqlPackageResult + if err = p.c.Call(ctx, "addPlsqlPackage", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) DropPlsqlPackage(ctx context.Context, request *frontendservice.TDropPlsqlPackageRequest) (r *frontendservice.TPlsqlPackageResult_, err error) { + var _args frontendservice.FrontendServiceDropPlsqlPackageArgs + _args.Request = request + var _result frontendservice.FrontendServiceDropPlsqlPackageResult + if err = p.c.Call(ctx, "dropPlsqlPackage", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + func (p *kClient) GetMasterToken(ctx context.Context, request *frontendservice.TGetMasterTokenRequest) (r *frontendservice.TGetMasterTokenResult_, err error) { var _args frontendservice.FrontendServiceGetMasterTokenArgs _args.Request = request @@ -1348,3 +1693,123 @@ func (p *kClient) CreatePartition(ctx context.Context, request *frontendservice. } return _result.GetSuccess(), nil } + +func (p *kClient) ReplacePartition(ctx context.Context, request *frontendservice.TReplacePartitionRequest) (r *frontendservice.TReplacePartitionResult_, err error) { + var _args frontendservice.FrontendServiceReplacePartitionArgs + _args.Request = request + var _result frontendservice.FrontendServiceReplacePartitionResult + if err = p.c.Call(ctx, "replacePartition", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetMeta(ctx context.Context, request *frontendservice.TGetMetaRequest) (r *frontendservice.TGetMetaResult_, err error) { + var _args frontendservice.FrontendServiceGetMetaArgs + _args.Request = request + var _result frontendservice.FrontendServiceGetMetaResult + if err = p.c.Call(ctx, "getMeta", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetBackendMeta(ctx context.Context, request *frontendservice.TGetBackendMetaRequest) (r *frontendservice.TGetBackendMetaResult_, err error) { + var _args frontendservice.FrontendServiceGetBackendMetaArgs + _args.Request = request + var _result frontendservice.FrontendServiceGetBackendMetaResult + if err = p.c.Call(ctx, "getBackendMeta", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetColumnInfo(ctx context.Context, request *frontendservice.TGetColumnInfoRequest) (r *frontendservice.TGetColumnInfoResult_, err error) { + var _args frontendservice.FrontendServiceGetColumnInfoArgs + _args.Request = request + var _result frontendservice.FrontendServiceGetColumnInfoResult + if err = p.c.Call(ctx, "getColumnInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) InvalidateStatsCache(ctx context.Context, request *frontendservice.TInvalidateFollowerStatsCacheRequest) (r *status.TStatus, err error) { + var _args frontendservice.FrontendServiceInvalidateStatsCacheArgs + _args.Request = request + var _result frontendservice.FrontendServiceInvalidateStatsCacheResult + if err = p.c.Call(ctx, "invalidateStatsCache", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ShowProcessList(ctx context.Context, request *frontendservice.TShowProcessListRequest) (r *frontendservice.TShowProcessListResult_, err error) { + var _args frontendservice.FrontendServiceShowProcessListArgs + _args.Request = request + var _result frontendservice.FrontendServiceShowProcessListResult + if err = p.c.Call(ctx, "showProcessList", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ReportCommitTxnResult_(ctx context.Context, request *frontendservice.TReportCommitTxnResultRequest) (r *status.TStatus, err error) { + var _args frontendservice.FrontendServiceReportCommitTxnResultArgs + _args.Request = request + var _result frontendservice.FrontendServiceReportCommitTxnResultResult + if err = p.c.Call(ctx, "reportCommitTxnResult", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ShowUser(ctx context.Context, request *frontendservice.TShowUserRequest) (r *frontendservice.TShowUserResult_, err error) { + var _args frontendservice.FrontendServiceShowUserArgs + _args.Request = request + var _result frontendservice.FrontendServiceShowUserResult + if err = p.c.Call(ctx, "showUser", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) SyncQueryColumns(ctx context.Context, request *frontendservice.TSyncQueryColumns) (r *status.TStatus, err error) { + var _args frontendservice.FrontendServiceSyncQueryColumnsArgs + _args.Request = request + var _result frontendservice.FrontendServiceSyncQueryColumnsResult + if err = p.c.Call(ctx, "syncQueryColumns", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) FetchSplitBatch(ctx context.Context, request *frontendservice.TFetchSplitBatchRequest) (r *frontendservice.TFetchSplitBatchResult_, err error) { + var _args frontendservice.FrontendServiceFetchSplitBatchArgs + _args.Request = request + var _result frontendservice.FrontendServiceFetchSplitBatchResult + if err = p.c.Call(ctx, "fetchSplitBatch", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) UpdatePartitionStatsCache(ctx context.Context, request *frontendservice.TUpdateFollowerPartitionStatsCacheRequest) (r *status.TStatus, err error) { + var _args frontendservice.FrontendServiceUpdatePartitionStatsCacheArgs + _args.Request = request + var _result frontendservice.FrontendServiceUpdatePartitionStatsCacheResult + if err = p.c.Call(ctx, "updatePartitionStatsCache", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) FetchRunningQueries(ctx context.Context, request *frontendservice.TFetchRunningQueriesRequest) (r *frontendservice.TFetchRunningQueriesResult_, err error) { + var _args frontendservice.FrontendServiceFetchRunningQueriesArgs + _args.Request = request + var _result frontendservice.FrontendServiceFetchRunningQueriesResult + if err = p.c.Call(ctx, "fetchRunningQueries", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/pkg/rpc/kitex_gen/frontendservice/frontendservice/invoker.go b/pkg/rpc/kitex_gen/frontendservice/frontendservice/invoker.go index 6e39978e..70d3086b 100644 --- a/pkg/rpc/kitex_gen/frontendservice/frontendservice/invoker.go +++ b/pkg/rpc/kitex_gen/frontendservice/frontendservice/invoker.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package frontendservice diff --git a/pkg/rpc/kitex_gen/frontendservice/frontendservice/server.go b/pkg/rpc/kitex_gen/frontendservice/frontendservice/server.go index f1600a2c..c2cd6f90 100644 --- a/pkg/rpc/kitex_gen/frontendservice/frontendservice/server.go +++ b/pkg/rpc/kitex_gen/frontendservice/frontendservice/server.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package frontendservice import ( diff --git a/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go b/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go index 7dd45b4c..aa1acef3 100644 --- a/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go +++ b/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package frontendservice @@ -11,10 +11,13 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/data" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/datasinks" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/masterservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/planner" @@ -34,8 +37,10 @@ var ( _ = bthrift.BinaryWriter(nil) _ = agentservice.KitexUnusedProtection _ = data.KitexUnusedProtection + _ = datasinks.KitexUnusedProtection _ = descriptors.KitexUnusedProtection _ = exprs.KitexUnusedProtection + _ = heartbeatservice.KitexUnusedProtection _ = masterservice.KitexUnusedProtection _ = palointernalservice.KitexUnusedProtection _ = planner.KitexUnusedProtection @@ -2619,7 +2624,7 @@ func (p *TShowVariableResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -2677,36 +2682,41 @@ RequiredFieldNotSetError: func (p *TShowVariableResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.Variables = make(map[string]string, size) + p.Variables = make([][]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l + } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem1 string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l - _key = v + _elem1 = v - } + } - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - _val = v - } - p.Variables[_key] = _val + p.Variables = append(p.Variables, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -2743,477 +2753,483 @@ func (p *TShowVariableResult_) BLength() int { func (p *TShowVariableResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "variables", thrift.MAP, 1) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "variables", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) var length int - for k, v := range p.Variables { + for _, v := range p.Variables { length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range v { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } func (p *TShowVariableResult_) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("variables", thrift.MAP, 1) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Variables)) - for k, v := range p.Variables { + l += bthrift.Binary.FieldBeginLength("variables", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.Variables)) + for _, v := range p.Variables { + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(v)) + for _, v := range v { + l += bthrift.Binary.StringLengthNocopy(v) - l += bthrift.Binary.StringLengthNocopy(k) + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} - l += bthrift.Binary.StringLengthNocopy(v) +func (p *TTableRowFormat) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - l += bthrift.Binary.MapEndLength() + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableRowFormat[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableRowFormat) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FieldTerminator = &v + + } + return offset, nil +} + +func (p *TTableRowFormat) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LineTerminator = &v + + } + return offset, nil +} + +func (p *TTableRowFormat) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EscapedBy = &v + + } + return offset, nil +} + +// for compatibility +func (p *TTableRowFormat) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTableRowFormat) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableRowFormat") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTableRowFormat) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTableRowFormat") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTableRowFormat) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFieldTerminator() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "field_terminator", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FieldTerminator) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableRowFormat) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLineTerminator() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "line_terminator", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LineTerminator) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableRowFormat) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEscapedBy() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "escaped_by", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.EscapedBy) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableRowFormat) field1Length() int { + l := 0 + if p.IsSetFieldTerminator() { + l += bthrift.Binary.FieldBeginLength("field_terminator", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.FieldTerminator) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableRowFormat) field2Length() int { + l := 0 + if p.IsSetLineTerminator() { + l += bthrift.Binary.FieldBeginLength("line_terminator", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.LineTerminator) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableRowFormat) field3Length() int { + l := 0 + if p.IsSetEscapedBy() { + l += bthrift.Binary.FieldBeginLength("escaped_by", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.EscapedBy) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPartitionKeyValue) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetName bool = false + var issetValue bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetName = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetValue = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetName { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetValue { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionKeyValue[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPartitionKeyValue[fieldId])) +} + +func (p *TPartitionKeyValue) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Name = v + + } + return offset, nil +} + +func (p *TPartitionKeyValue) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Value = v + + } + return offset, nil +} + +// for compatibility +func (p *TPartitionKeyValue) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPartitionKeyValue) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPartitionKeyValue") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPartitionKeyValue) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPartitionKeyValue") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPartitionKeyValue) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPartitionKeyValue) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Value) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPartitionKeyValue) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.Name) + l += bthrift.Binary.FieldEndLength() return l } -func (p *TTableRowFormat) FastRead(buf []byte) (int, error) { +func (p *TPartitionKeyValue) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.Value) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TSessionState) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableRowFormat[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TTableRowFormat) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FieldTerminator = &v - - } - return offset, nil -} - -func (p *TTableRowFormat) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LineTerminator = &v - - } - return offset, nil -} - -func (p *TTableRowFormat) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.EscapedBy = &v - - } - return offset, nil -} - -// for compatibility -func (p *TTableRowFormat) FastWrite(buf []byte) int { - return 0 -} - -func (p *TTableRowFormat) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableRowFormat") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TTableRowFormat) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TTableRowFormat") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TTableRowFormat) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFieldTerminator() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "field_terminator", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FieldTerminator) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TTableRowFormat) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLineTerminator() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "line_terminator", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LineTerminator) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TTableRowFormat) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEscapedBy() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "escaped_by", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.EscapedBy) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TTableRowFormat) field1Length() int { - l := 0 - if p.IsSetFieldTerminator() { - l += bthrift.Binary.FieldBeginLength("field_terminator", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.FieldTerminator) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TTableRowFormat) field2Length() int { - l := 0 - if p.IsSetLineTerminator() { - l += bthrift.Binary.FieldBeginLength("line_terminator", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.LineTerminator) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TTableRowFormat) field3Length() int { - l := 0 - if p.IsSetEscapedBy() { - l += bthrift.Binary.FieldBeginLength("escaped_by", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.EscapedBy) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPartitionKeyValue) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetName bool = false - var issetValue bool = false - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetName = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetValue = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - if !issetName { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetValue { - fieldId = 2 - goto RequiredFieldNotSetError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionKeyValue[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPartitionKeyValue[fieldId])) -} - -func (p *TPartitionKeyValue) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Name = v - - } - return offset, nil -} - -func (p *TPartitionKeyValue) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Value = v - - } - return offset, nil -} - -// for compatibility -func (p *TPartitionKeyValue) FastWrite(buf []byte) int { - return 0 -} - -func (p *TPartitionKeyValue) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPartitionKeyValue") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TPartitionKeyValue) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TPartitionKeyValue") - if p != nil { - l += p.field1Length() - l += p.field2Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TPartitionKeyValue) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Name) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPartitionKeyValue) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Value) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPartitionKeyValue) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.Name) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPartitionKeyValue) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.Value) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TSessionState) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetDatabase bool = false - var issetUser bool = false - var issetConnectionId bool = false + var issetDatabase bool = false + var issetUser bool = false + var issetConnectionId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -4456,7 +4472,7 @@ func (p *TCommonDdlParams) FastRead(buf []byte) (int, error) { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l if err != nil { - goto SkipFieldTypeError + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -4476,9 +4492,8 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: @@ -9787,6 +9802,20 @@ func (p *TDetailedReportParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -9861,6 +9890,19 @@ func (p *TDetailedReportParams) FastReadField3(buf []byte) (int, error) { return offset, nil } +func (p *TDetailedReportParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsFragmentLevel = &v + + } + return offset, nil +} + // for compatibility func (p *TDetailedReportParams) FastWrite(buf []byte) int { return 0 @@ -9870,6 +9912,7 @@ func (p *TDetailedReportParams) FastWriteNocopy(buf []byte, binaryWriter bthrift offset := 0 offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDetailedReportParams") if p != nil { + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -9886,6 +9929,7 @@ func (p *TDetailedReportParams) BLength() int { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -9922,6 +9966,17 @@ func (p *TDetailedReportParams) fastWriteField3(buf []byte, binaryWriter bthrift return offset } +func (p *TDetailedReportParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsFragmentLevel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_fragment_level", thrift.BOOL, 4) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsFragmentLevel) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TDetailedReportParams) field1Length() int { l := 0 if p.IsSetFragmentInstanceId() { @@ -9952,13 +10007,23 @@ func (p *TDetailedReportParams) field3Length() int { return l } -func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { +func (p *TDetailedReportParams) field4Length() int { + l := 0 + if p.IsSetIsFragmentLevel() { + l += bthrift.Binary.FieldBeginLength("is_fragment_level", thrift.BOOL, 4) + l += bthrift.Binary.BoolLength(*p.IsFragmentLevel) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryStatistics) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -9976,13 +10041,12 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetProtocolVersion = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -9991,7 +10055,7 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -10005,7 +10069,7 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -10019,7 +10083,7 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -10033,7 +10097,7 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } } case 5: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { @@ -10047,7 +10111,7 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } } case 6: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { @@ -10061,7 +10125,7 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { } } case 7: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { @@ -10074,93 +10138,9 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 9: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField13(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField14(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 15: + case 8: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField15(buf[offset:]) + l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -10172,9 +10152,9 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 16: + case 9: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField16(buf[offset:]) + l, err = p.FastReadField9(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -10186,79 +10166,9 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 17: + case 10: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField17(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField18(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField19(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField20(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField21(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField22(buf[offset:]) + l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -10270,9 +10180,9 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 23: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField23(buf[offset:]) + case 11: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField11(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -10304,1051 +10214,1142 @@ func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportExecStatusParams[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryStatistics[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReportExecStatusParams[fieldId])) } -func (p *TReportExecStatusParams) FastReadField1(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.ProtocolVersion = FrontendServiceVersion(v) + p.ScanRows = &v } return offset, nil } -func (p *TReportExecStatusParams) FastReadField2(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.ScanBytes = &v + } - p.QueryId = tmp return offset, nil } -func (p *TReportExecStatusParams) FastReadField3(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BackendNum = &v + p.ReturnedRows = &v } return offset, nil } -func (p *TReportExecStatusParams) FastReadField4(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField4(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.CpuMs = &v + } - p.FragmentInstanceId = tmp return offset, nil } -func (p *TReportExecStatusParams) FastReadField5(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField5(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.MaxPeakMemoryBytes = &v + } - p.Status = tmp return offset, nil } -func (p *TReportExecStatusParams) FastReadField6(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Done = &v + p.CurrentUsedMemoryBytes = &v } return offset, nil } -func (p *TReportExecStatusParams) FastReadField7(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField7(buf []byte) (int, error) { offset := 0 - tmp := runtimeprofile.NewTRuntimeProfileTree() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.WorkloadGroupId = &v + } - p.Profile = tmp return offset, nil } -func (p *TReportExecStatusParams) FastReadField9(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField8(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.ErrorLog = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } - - p.ErrorLog = append(p.ErrorLog, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.ShuffleSendBytes = &v + } return offset, nil } -func (p *TReportExecStatusParams) FastReadField10(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField9(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.DeltaUrls = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } - - p.DeltaUrls = append(p.DeltaUrls, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.ShuffleSendRows = &v + } return offset, nil } -func (p *TReportExecStatusParams) FastReadField11(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField10(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.LoadCounters = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _val = v - - } - - p.LoadCounters[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.ScanBytesFromLocalStorage = &v + } return offset, nil } -func (p *TReportExecStatusParams) FastReadField12(buf []byte) (int, error) { +func (p *TQueryStatistics) FastReadField11(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TrackingUrl = &v + p.ScanBytesFromRemoteStorage = &v } return offset, nil } -func (p *TReportExecStatusParams) FastReadField13(buf []byte) (int, error) { - offset := 0 +// for compatibility +func (p *TQueryStatistics) FastWrite(buf []byte) int { + return 0 +} - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err +func (p *TQueryStatistics) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryStatistics") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) } - p.ExportFiles = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - p.ExportFiles = append(p.ExportFiles, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TQueryStatistics) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TQueryStatistics") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TReportExecStatusParams) FastReadField14(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetScanRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_rows", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ScanRows) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTTabletCommitInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.CommitInfos = append(p.CommitInfos, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField15(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetScanBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_bytes", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ScanBytes) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LoadedRows = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField16(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetReturnedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "returned_rows", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReturnedRows) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.BackendId = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField17(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetCpuMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cpu_ms", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CpuMs) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LoadedBytes = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField18(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMaxPeakMemoryBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_peak_memory_bytes", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.MaxPeakMemoryBytes) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.ErrorTabletInfos = make([]*types.TErrorTabletInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTErrorTabletInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + return offset +} - p.ErrorTabletInfos = append(p.ErrorTabletInfos, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TQueryStatistics) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentUsedMemoryBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_used_memory_bytes", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CurrentUsedMemoryBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField19(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetWorkloadGroupId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_group_id", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.WorkloadGroupId) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FragmentId = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField20(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetShuffleSendBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "shuffle_send_bytes", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ShuffleSendBytes) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - tmp := palointernalservice.TQueryType(v) - p.QueryType = &tmp - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField21(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetShuffleSendRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "shuffle_send_rows", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ShuffleSendRows) - tmp := runtimeprofile.NewTRuntimeProfileTree() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.LoadChannelProfile = tmp - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField22(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetScanBytesFromLocalStorage() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_bytes_from_local_storage", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ScanBytesFromLocalStorage) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FinishedScanRanges = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TReportExecStatusParams) FastReadField23(buf []byte) (int, error) { +func (p *TQueryStatistics) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetScanBytesFromRemoteStorage() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_bytes_from_remote_storage", thrift.I64, 11) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ScanBytesFromRemoteStorage) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.DetailedReport = make([]*TDetailedReportParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTDetailedReportParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.DetailedReport = append(p.DetailedReport, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -// for compatibility -func (p *TReportExecStatusParams) FastWrite(buf []byte) int { - return 0 -} +func (p *TQueryStatistics) field1Length() int { + l := 0 + if p.IsSetScanRows() { + l += bthrift.Binary.FieldBeginLength("scan_rows", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.ScanRows) -func (p *TReportExecStatusParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TReportExecStatusParams") - if p != nil { - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) - offset += p.fastWriteField17(buf[offset:], binaryWriter) - offset += p.fastWriteField19(buf[offset:], binaryWriter) - offset += p.fastWriteField22(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) - offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField18(buf[offset:], binaryWriter) - offset += p.fastWriteField20(buf[offset:], binaryWriter) - offset += p.fastWriteField21(buf[offset:], binaryWriter) - offset += p.fastWriteField23(buf[offset:], binaryWriter) + l += bthrift.Binary.FieldEndLength() } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset + return l } -func (p *TReportExecStatusParams) BLength() int { +func (p *TQueryStatistics) field2Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("TReportExecStatusParams") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() - l += p.field13Length() - l += p.field14Length() - l += p.field15Length() - l += p.field16Length() - l += p.field17Length() - l += p.field18Length() - l += p.field19Length() - l += p.field20Length() - l += p.field21Length() - l += p.field22Length() - l += p.field23Length() + if p.IsSetScanBytes() { + l += bthrift.Binary.FieldBeginLength("scan_bytes", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.ScanBytes) + + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() return l } -func (p *TReportExecStatusParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocol_version", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} +func (p *TQueryStatistics) field3Length() int { + l := 0 + if p.IsSetReturnedRows() { + l += bthrift.Binary.FieldBeginLength("returned_rows", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.ReturnedRows) -func (p *TReportExecStatusParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetQueryId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 2) - offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBackendNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_num", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.BackendNum) +func (p *TQueryStatistics) field4Length() int { + l := 0 + if p.IsSetCpuMs() { + l += bthrift.Binary.FieldBeginLength("cpu_ms", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.CpuMs) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFragmentInstanceId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 4) - offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} +func (p *TQueryStatistics) field5Length() int { + l := 0 + if p.IsSetMaxPeakMemoryBytes() { + l += bthrift.Binary.FieldBeginLength("max_peak_memory_bytes", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.MaxPeakMemoryBytes) -func (p *TReportExecStatusParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 5) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDone() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "done", thrift.BOOL, 6) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.Done) +func (p *TQueryStatistics) field6Length() int { + l := 0 + if p.IsSetCurrentUsedMemoryBytes() { + l += bthrift.Binary.FieldBeginLength("current_used_memory_bytes", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.CurrentUsedMemoryBytes) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetProfile() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "profile", thrift.STRUCT, 7) - offset += p.Profile.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TQueryStatistics) field7Length() int { + l := 0 + if p.IsSetWorkloadGroupId() { + l += bthrift.Binary.FieldBeginLength("workload_group_id", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.WorkloadGroupId) + + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetErrorLog() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "error_log", thrift.LIST, 9) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ErrorLog { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) +func (p *TQueryStatistics) field8Length() int { + l := 0 + if p.IsSetShuffleSendBytes() { + l += bthrift.Binary.FieldBeginLength("shuffle_send_bytes", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.ShuffleSendBytes) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDeltaUrls() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delta_urls", thrift.LIST, 10) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.DeltaUrls { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) +func (p *TQueryStatistics) field9Length() int { + l := 0 + if p.IsSetShuffleSendRows() { + l += bthrift.Binary.FieldBeginLength("shuffle_send_rows", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.ShuffleSendRows) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadCounters() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_counters", thrift.MAP, 11) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) - var length int - for k, v := range p.LoadCounters { - length++ - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) +func (p *TQueryStatistics) field10Length() int { + l := 0 + if p.IsSetScanBytesFromLocalStorage() { + l += bthrift.Binary.FieldBeginLength("scan_bytes_from_local_storage", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.ScanBytesFromLocalStorage) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTrackingUrl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tracking_url", thrift.STRING, 12) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrackingUrl) +func (p *TQueryStatistics) field11Length() int { + l := 0 + if p.IsSetScanBytesFromRemoteStorage() { + l += bthrift.Binary.FieldBeginLength("scan_bytes_from_remote_storage", thrift.I64, 11) + l += bthrift.Binary.I64Length(*p.ScanBytesFromRemoteStorage) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TReportExecStatusParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetExportFiles() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "export_files", thrift.LIST, 13) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ExportFiles { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) +func (p *TReportWorkloadRuntimeStatusParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} -func (p *TReportExecStatusParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCommitInfos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commitInfos", thrift.LIST, 14) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.CommitInfos { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset -} - -func (p *TReportExecStatusParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadedRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_rows", thrift.I64, 15) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportWorkloadRuntimeStatusParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TReportExecStatusParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportWorkloadRuntimeStatusParams) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetBackendId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 16) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + } - return offset + return offset, nil } -func (p *TReportExecStatusParams) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportWorkloadRuntimeStatusParams) FastReadField2(buf []byte) (int, error) { offset := 0 - if p.IsSetLoadedBytes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_bytes", thrift.I64, 17) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedBytes) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return offset -} + p.QueryStatisticsMap = make(map[string]*TQueryStatistics, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v -func (p *TReportExecStatusParams) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetErrorTabletInfos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "errorTabletInfos", thrift.LIST, 18) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.ErrorTabletInfos { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _val := NewTQueryStatistics() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.QueryStatisticsMap[_key] = _val } - return offset + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TReportExecStatusParams) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFragmentId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_id", thrift.I32, 19) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset +// for compatibility +func (p *TReportWorkloadRuntimeStatusParams) FastWrite(buf []byte) int { + return 0 } -func (p *TReportExecStatusParams) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportWorkloadRuntimeStatusParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_type", thrift.I32, 20) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.QueryType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TReportWorkloadRuntimeStatusParams") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TReportExecStatusParams) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadChannelProfile() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadChannelProfile", thrift.STRUCT, 21) - offset += p.LoadChannelProfile.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TReportWorkloadRuntimeStatusParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TReportWorkloadRuntimeStatusParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() } - return offset + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TReportExecStatusParams) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportWorkloadRuntimeStatusParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFinishedScanRanges() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "finished_scan_ranges", thrift.I32, 22) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.FinishedScanRanges) + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TReportExecStatusParams) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportWorkloadRuntimeStatusParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDetailedReport() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "detailed_report", thrift.LIST, 23) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + if p.IsSetQueryStatisticsMap() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_statistics_map", thrift.MAP, 2) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRUCT, 0) var length int - for _, v := range p.DetailedReport { + for k, v := range p.QueryStatisticsMap { length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TReportExecStatusParams) field1Length() int { +func (p *TReportWorkloadRuntimeStatusParams) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) - - l += bthrift.Binary.FieldEndLength() - return l -} + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.BackendId) -func (p *TReportExecStatusParams) field2Length() int { - l := 0 - if p.IsSetQueryId() { - l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 2) - l += p.QueryId.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TReportExecStatusParams) field3Length() int { +func (p *TReportWorkloadRuntimeStatusParams) field2Length() int { l := 0 - if p.IsSetBackendNum() { - l += bthrift.Binary.FieldBeginLength("backend_num", thrift.I32, 3) - l += bthrift.Binary.I32Length(*p.BackendNum) + if p.IsSetQueryStatisticsMap() { + l += bthrift.Binary.FieldBeginLength("query_statistics_map", thrift.MAP, 2) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRUCT, len(p.QueryStatisticsMap)) + for k, v := range p.QueryStatisticsMap { + l += bthrift.Binary.StringLengthNocopy(k) + + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TReportExecStatusParams) field4Length() int { - l := 0 - if p.IsSetFragmentInstanceId() { - l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 4) - l += p.FragmentInstanceId.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} +func (p *TQueryProfile) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } -func (p *TReportExecStatusParams) field5Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 5) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryProfile[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TReportExecStatusParams) field6Length() int { - l := 0 - if p.IsSetDone() { - l += bthrift.Binary.FieldBeginLength("done", thrift.BOOL, 6) - l += bthrift.Binary.BoolLength(*p.Done) +func (p *TQueryProfile) FastReadField1(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + p.QueryId = tmp + return offset, nil } -func (p *TReportExecStatusParams) field7Length() int { - l := 0 - if p.IsSetProfile() { - l += bthrift.Binary.FieldBeginLength("profile", thrift.STRUCT, 7) - l += p.Profile.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TQueryProfile) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return l -} + p.FragmentIdToProfile = make(map[int32][]*TDetailedReportParams, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TReportExecStatusParams) field9Length() int { - l := 0 - if p.IsSetErrorLog() { - l += bthrift.Binary.FieldBeginLength("error_log", thrift.LIST, 9) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ErrorLog)) - for _, v := range p.ErrorLog { - l += bthrift.Binary.StringLengthNocopy(v) + _key = v } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} -func (p *TReportExecStatusParams) field10Length() int { - l := 0 - if p.IsSetDeltaUrls() { - l += bthrift.Binary.FieldBeginLength("delta_urls", thrift.LIST, 10) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.DeltaUrls)) - for _, v := range p.DeltaUrls { - l += bthrift.Binary.StringLengthNocopy(v) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _val := make([]*TDetailedReportParams, 0, size) + for i := 0; i < size; i++ { + _elem := NewTDetailedReportParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + _val = append(_val, _elem) } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FragmentIdToProfile[_key] = _val } - return l + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TReportExecStatusParams) field11Length() int { - l := 0 - if p.IsSetLoadCounters() { - l += bthrift.Binary.FieldBeginLength("load_counters", thrift.MAP, 11) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.LoadCounters)) - for k, v := range p.LoadCounters { +func (p *TQueryProfile) FastReadField3(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.StringLengthNocopy(k) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FragmentInstanceIds = make([]*types.TUniqueId, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTUniqueId() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } - l += bthrift.Binary.StringLengthNocopy(v) + p.FragmentInstanceIds = append(p.FragmentInstanceIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} +func (p *TQueryProfile) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.InstanceProfiles = make([]*runtimeprofile.TRuntimeProfileTree, 0, size) + for i := 0; i < size; i++ { + _elem := runtimeprofile.NewTRuntimeProfileTree() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() + + p.InstanceProfiles = append(p.InstanceProfiles, _elem) } - return l + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TReportExecStatusParams) field12Length() int { - l := 0 - if p.IsSetTrackingUrl() { - l += bthrift.Binary.FieldBeginLength("tracking_url", thrift.STRING, 12) - l += bthrift.Binary.StringLengthNocopy(*p.TrackingUrl) +func (p *TQueryProfile) FastReadField5(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return l + p.LoadChannelProfiles = make([]*runtimeprofile.TRuntimeProfileTree, 0, size) + for i := 0; i < size; i++ { + _elem := runtimeprofile.NewTRuntimeProfileTree() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.LoadChannelProfiles = append(p.LoadChannelProfiles, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TReportExecStatusParams) field13Length() int { - l := 0 - if p.IsSetExportFiles() { - l += bthrift.Binary.FieldBeginLength("export_files", thrift.LIST, 13) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ExportFiles)) - for _, v := range p.ExportFiles { - l += bthrift.Binary.StringLengthNocopy(v) +// for compatibility +func (p *TQueryProfile) FastWrite(buf []byte) int { + return 0 +} - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *TQueryProfile) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryProfile") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TReportExecStatusParams) field14Length() int { +func (p *TQueryProfile) BLength() int { l := 0 - if p.IsSetCommitInfos() { - l += bthrift.Binary.FieldBeginLength("commitInfos", thrift.LIST, 14) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.CommitInfos)) - for _, v := range p.CommitInfos { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TQueryProfile") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TReportExecStatusParams) field15Length() int { - l := 0 - if p.IsSetLoadedRows() { - l += bthrift.Binary.FieldBeginLength("loaded_rows", thrift.I64, 15) - l += bthrift.Binary.I64Length(*p.LoadedRows) - - l += bthrift.Binary.FieldEndLength() +func (p *TQueryProfile) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 1) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TReportExecStatusParams) field16Length() int { - l := 0 - if p.IsSetBackendId() { - l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 16) - l += bthrift.Binary.I64Length(*p.BackendId) +func (p *TQueryProfile) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentIdToProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_id_to_profile", thrift.MAP, 2) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, 0) + var length int + for k, v := range p.FragmentIdToProfile { + length++ - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.LIST, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TReportExecStatusParams) field17Length() int { - l := 0 - if p.IsSetLoadedBytes() { - l += bthrift.Binary.FieldBeginLength("loaded_bytes", thrift.I64, 17) - l += bthrift.Binary.I64Length(*p.LoadedBytes) +func (p *TQueryProfile) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentInstanceIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_ids", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.FragmentInstanceIds { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - l += bthrift.Binary.FieldEndLength() +func (p *TQueryProfile) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInstanceProfiles() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "instance_profiles", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.InstanceProfiles { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TReportExecStatusParams) field18Length() int { - l := 0 - if p.IsSetErrorTabletInfos() { - l += bthrift.Binary.FieldBeginLength("errorTabletInfos", thrift.LIST, 18) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ErrorTabletInfos)) - for _, v := range p.ErrorTabletInfos { - l += v.BLength() +func (p *TQueryProfile) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadChannelProfiles() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_channel_profiles", thrift.LIST, 5) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.LoadChannelProfiles { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TReportExecStatusParams) field19Length() int { +func (p *TQueryProfile) field1Length() int { l := 0 - if p.IsSetFragmentId() { - l += bthrift.Binary.FieldBeginLength("fragment_id", thrift.I32, 19) - l += bthrift.Binary.I32Length(*p.FragmentId) - + if p.IsSetQueryId() { + l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 1) + l += p.QueryId.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TReportExecStatusParams) field20Length() int { +func (p *TQueryProfile) field2Length() int { l := 0 - if p.IsSetQueryType() { - l += bthrift.Binary.FieldBeginLength("query_type", thrift.I32, 20) - l += bthrift.Binary.I32Length(int32(*p.QueryType)) + if p.IsSetFragmentIdToProfile() { + l += bthrift.Binary.FieldBeginLength("fragment_id_to_profile", thrift.MAP, 2) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, len(p.FragmentIdToProfile)) + for k, v := range p.FragmentIdToProfile { + l += bthrift.Binary.I32Length(k) + + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TReportExecStatusParams) field21Length() int { +func (p *TQueryProfile) field3Length() int { l := 0 - if p.IsSetLoadChannelProfile() { - l += bthrift.Binary.FieldBeginLength("loadChannelProfile", thrift.STRUCT, 21) - l += p.LoadChannelProfile.BLength() + if p.IsSetFragmentInstanceIds() { + l += bthrift.Binary.FieldBeginLength("fragment_instance_ids", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FragmentInstanceIds)) + for _, v := range p.FragmentInstanceIds { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TReportExecStatusParams) field22Length() int { +func (p *TQueryProfile) field4Length() int { l := 0 - if p.IsSetFinishedScanRanges() { - l += bthrift.Binary.FieldBeginLength("finished_scan_ranges", thrift.I32, 22) - l += bthrift.Binary.I32Length(*p.FinishedScanRanges) - + if p.IsSetInstanceProfiles() { + l += bthrift.Binary.FieldBeginLength("instance_profiles", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.InstanceProfiles)) + for _, v := range p.InstanceProfiles { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TReportExecStatusParams) field23Length() int { +func (p *TQueryProfile) field5Length() int { l := 0 - if p.IsSetDetailedReport() { - l += bthrift.Binary.FieldBeginLength("detailed_report", thrift.LIST, 23) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DetailedReport)) - for _, v := range p.DetailedReport { + if p.IsSetLoadChannelProfiles() { + l += bthrift.Binary.FieldBeginLength("load_channel_profiles", thrift.LIST, 5) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.LoadChannelProfiles)) + for _, v := range p.LoadChannelProfiles { l += v.BLength() } l += bthrift.Binary.ListEndLength() @@ -11357,14 +11358,12 @@ func (p *TReportExecStatusParams) field23Length() int { return l } -func (p *TFeResult_) FastRead(buf []byte) (int, error) { +func (p *TFragmentInstanceReport) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -11382,13 +11381,12 @@ func (p *TFeResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetProtocolVersion = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11397,13 +11395,40 @@ func (p *TFeResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11431,131 +11456,199 @@ func (p *TFeResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetStatus { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFeResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFragmentInstanceReport[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFeResult_[fieldId])) } -func (p *TFeResult_) FastReadField1(buf []byte) (int, error) { +func (p *TFragmentInstanceReport) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l + } + p.FragmentInstanceId = tmp + return offset, nil +} - p.ProtocolVersion = FrontendServiceVersion(v) +func (p *TFragmentInstanceReport) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumFinishedRange = &v } return offset, nil } -func (p *TFeResult_) FastReadField2(buf []byte) (int, error) { +func (p *TFragmentInstanceReport) FastReadField3(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.LoadedRows = &v + + } + return offset, nil +} + +func (p *TFragmentInstanceReport) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadedBytes = &v + } - p.Status = tmp return offset, nil } // for compatibility -func (p *TFeResult_) FastWrite(buf []byte) int { +func (p *TFragmentInstanceReport) FastWrite(buf []byte) int { return 0 } -func (p *TFeResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFragmentInstanceReport) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFeResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFragmentInstanceReport") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFeResult_) BLength() int { +func (p *TFragmentInstanceReport) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFeResult") + l += bthrift.Binary.StructBeginLength("TFragmentInstanceReport") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFeResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFragmentInstanceReport) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocolVersion", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) + if p.IsSetFragmentInstanceId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 1) + offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TFragmentInstanceReport) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumFinishedRange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_finished_range", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumFinishedRange) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFeResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFragmentInstanceReport) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 2) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetLoadedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_rows", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFeResult_) field1Length() int { +func (p *TFragmentInstanceReport) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadedBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_bytes", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFragmentInstanceReport) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("protocolVersion", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) + if p.IsSetFragmentInstanceId() { + l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 1) + l += p.FragmentInstanceId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l += bthrift.Binary.FieldEndLength() +func (p *TFragmentInstanceReport) field2Length() int { + l := 0 + if p.IsSetNumFinishedRange() { + l += bthrift.Binary.FieldBeginLength("num_finished_range", thrift.I32, 2) + l += bthrift.Binary.I32Length(*p.NumFinishedRange) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TFeResult_) field2Length() int { +func (p *TFragmentInstanceReport) field3Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 2) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetLoadedRows() { + l += bthrift.Binary.FieldBeginLength("loaded_rows", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.LoadedRows) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { +func (p *TFragmentInstanceReport) field4Length() int { + l := 0 + if p.IsSetLoadedBytes() { + l += bthrift.Binary.FieldBeginLength("loaded_bytes", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.LoadedBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReportExecStatusParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetDb bool = false - var issetSql bool = false + var issetProtocolVersion bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -11573,13 +11666,13 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetUser = true + issetProtocolVersion = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11588,13 +11681,12 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetDb = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11603,13 +11695,12 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetSql = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11632,7 +11723,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 5: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { @@ -11646,7 +11737,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 6: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { @@ -11660,7 +11751,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 7: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { @@ -11673,22 +11764,8 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 8: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } case 9: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField9(buf[offset:]) offset += l if err != nil { @@ -11702,7 +11779,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 10: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { @@ -11716,7 +11793,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 11: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.MAP { l, err = p.FastReadField11(buf[offset:]) offset += l if err != nil { @@ -11730,7 +11807,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 12: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField12(buf[offset:]) offset += l if err != nil { @@ -11744,7 +11821,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 13: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField13(buf[offset:]) offset += l if err != nil { @@ -11758,7 +11835,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 14: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField14(buf[offset:]) offset += l if err != nil { @@ -11772,7 +11849,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 15: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField15(buf[offset:]) offset += l if err != nil { @@ -11786,7 +11863,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 16: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField16(buf[offset:]) offset += l if err != nil { @@ -11800,7 +11877,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 17: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField17(buf[offset:]) offset += l if err != nil { @@ -11814,7 +11891,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 18: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField18(buf[offset:]) offset += l if err != nil { @@ -11828,7 +11905,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 19: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField19(buf[offset:]) offset += l if err != nil { @@ -11842,7 +11919,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 20: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField20(buf[offset:]) offset += l if err != nil { @@ -11856,7 +11933,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 21: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField21(buf[offset:]) offset += l if err != nil { @@ -11870,7 +11947,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 22: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField22(buf[offset:]) offset += l if err != nil { @@ -11884,7 +11961,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 23: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField23(buf[offset:]) offset += l if err != nil { @@ -11898,7 +11975,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 24: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField24(buf[offset:]) offset += l if err != nil { @@ -11912,7 +11989,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 25: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField25(buf[offset:]) offset += l if err != nil { @@ -11926,7 +12003,7 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { } } case 26: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField26(buf[offset:]) offset += l if err != nil { @@ -11939,6 +12016,76 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 27: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField27(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 28: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField28(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 29: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField29(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 30: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField30(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 31: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField31(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11959,27 +12106,17 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetUser { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } - - if !issetDb { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetSql { - fieldId = 3 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportExecStatusParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -11987,279 +12124,330 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpRequest[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReportExecStatusParams[fieldId])) } -func (p *TMasterOpRequest) FastReadField1(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.User = v + p.ProtocolVersion = FrontendServiceVersion(v) } return offset, nil } -func (p *TMasterOpRequest) FastReadField2(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Db = v - } + p.QueryId = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField3(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Sql = v + p.BackendNum = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField4(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField4(buf []byte) (int, error) { offset := 0 - tmp := types.NewTResourceInfo() + tmp := types.NewTUniqueId() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.ResourceInfo = tmp + p.FragmentInstanceId = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField5(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v - } + p.Status = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField6(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ExecMemLimit = &v + p.Done = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField7(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField7(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := runtimeprofile.NewTRuntimeProfileTree() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.QueryTimeout = &v - } + p.Profile = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField8(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField9(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - p.UserIp = &v - } - return offset, nil -} + p.ErrorLog = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TMasterOpRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 + _elem = v - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + } + + p.ErrorLog = append(p.ErrorLog, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TimeZone = &v - } return offset, nil } -func (p *TMasterOpRequest) FastReadField10(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField10(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - p.StmtId = &v - } - return offset, nil -} + p.DeltaUrls = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TMasterOpRequest) FastReadField11(buf []byte) (int, error) { - offset := 0 + _elem = v - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + } + + p.DeltaUrls = append(p.DeltaUrls, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SqlMode = &v - } return offset, nil } -func (p *TMasterOpRequest) FastReadField12(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField11(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.LoadCounters = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.LoadCounters[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadMemLimit = &v - } return offset, nil } -func (p *TMasterOpRequest) FastReadField13(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField12(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.EnableStrictMode = &v + p.TrackingUrl = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField14(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField13(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUserIdentity() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ExportFiles = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ExportFiles = append(p.ExportFiles, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.CurrentUserIdent = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField15(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField14(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTTabletCommitInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.CommitInfos = append(p.CommitInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.StmtIdx = &v - } return offset, nil } -func (p *TMasterOpRequest) FastReadField16(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField15(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTQueryOptions() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.LoadedRows = &v + } - p.QueryOptions = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField17(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField16(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.BackendId = &v + } - p.QueryId = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField18(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField17(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.InsertVisibleTimeoutMs = &v + p.LoadedBytes = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField19(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField18(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.SessionVariables = make(map[string]string, size) + p.ErrorTabletInfos = make([]*types.TErrorTabletInfo, 0, size) for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem := types.NewTErrorTabletInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _val = v - } - p.SessionVariables[_key] = _val + p.ErrorTabletInfos = append(p.ErrorTabletInfos, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -12267,52 +12455,133 @@ func (p *TMasterOpRequest) FastReadField19(buf []byte) (int, error) { return offset, nil } -func (p *TMasterOpRequest) FastReadField20(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField19(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FoldConstantByBe = &v + p.FragmentId = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField21(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField20(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := palointernalservice.TQueryType(v) + p.QueryType = &tmp + + } + return offset, nil +} + +func (p *TReportExecStatusParams) FastReadField21(buf []byte) (int, error) { + offset := 0 + + tmp := runtimeprofile.NewTRuntimeProfileTree() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadChannelProfile = tmp + return offset, nil +} + +func (p *TReportExecStatusParams) FastReadField22(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FinishedScanRanges = &v + + } + return offset, nil +} + +func (p *TReportExecStatusParams) FastReadField23(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.TraceCarrier = make(map[string]string, size) + p.DetailedReport = make([]*TDetailedReportParams, 0, size) for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem := NewTDetailedReportParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l + } - _key = v + p.DetailedReport = append(p.DetailedReport, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} - } +func (p *TReportExecStatusParams) FastReadField24(buf []byte) (int, error) { + offset := 0 - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTQueryStatistics() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryStatistics = tmp + return offset, nil +} + +func (p *TReportExecStatusParams) FastReadField25(buf []byte) (int, error) { + offset := 0 + + tmp := NewTReportWorkloadRuntimeStatusParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.ReportWorkloadRuntimeStatus = tmp + return offset, nil +} + +func (p *TReportExecStatusParams) FastReadField26(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.HivePartitionUpdates = make([]*datasinks.THivePartitionUpdate, 0, size) + for i := 0; i < size; i++ { + _elem := datasinks.NewTHivePartitionUpdate() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _val = v - } - p.TraceCarrier[_key] = _val + p.HivePartitionUpdates = append(p.HivePartitionUpdates, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -12320,115 +12589,147 @@ func (p *TMasterOpRequest) FastReadField21(buf []byte) (int, error) { return offset, nil } -func (p *TMasterOpRequest) FastReadField22(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField27(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTQueryProfile() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ClientNodeHost = &v - } + p.QueryProfile = tmp return offset, nil } -func (p *TMasterOpRequest) FastReadField23(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField28(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.IcebergCommitDatas = make([]*datasinks.TIcebergCommitData, 0, size) + for i := 0; i < size; i++ { + _elem := datasinks.NewTIcebergCommitData() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.IcebergCommitDatas = append(p.IcebergCommitDatas, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ClientNodePort = &v - } return offset, nil } -func (p *TMasterOpRequest) FastReadField24(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField29(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SyncJournalOnly = &v + p.TxnId = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField25(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField30(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DefaultCatalog = &v + p.Label = &v } return offset, nil } -func (p *TMasterOpRequest) FastReadField26(buf []byte) (int, error) { +func (p *TReportExecStatusParams) FastReadField31(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FragmentInstanceReports = make([]*TFragmentInstanceReport, 0, size) + for i := 0; i < size; i++ { + _elem := NewTFragmentInstanceReport() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FragmentInstanceReports = append(p.FragmentInstanceReports, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DefaultDatabase = &v - } return offset, nil } // for compatibility -func (p *TMasterOpRequest) FastWrite(buf []byte) int { +func (p *TReportExecStatusParams) FastWrite(buf []byte) int { return 0 } -func (p *TMasterOpRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMasterOpRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TReportExecStatusParams") if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) offset += p.fastWriteField23(buf[offset:], binaryWriter) offset += p.fastWriteField24(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) - offset += p.fastWriteField17(buf[offset:], binaryWriter) - offset += p.fastWriteField19(buf[offset:], binaryWriter) - offset += p.fastWriteField21(buf[offset:], binaryWriter) - offset += p.fastWriteField22(buf[offset:], binaryWriter) offset += p.fastWriteField25(buf[offset:], binaryWriter) offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) + offset += p.fastWriteField31(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TMasterOpRequest) BLength() int { +func (p *TReportExecStatusParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMasterOpRequest") + l += bthrift.Binary.StructBeginLength("TReportExecStatusParams") if p != nil { l += p.field1Length() l += p.field2Length() @@ -12437,7 +12738,6 @@ func (p *TMasterOpRequest) BLength() int { l += p.field5Length() l += p.field6Length() l += p.field7Length() - l += p.field8Length() l += p.field9Length() l += p.field10Length() l += p.field11Length() @@ -12456,608 +12756,779 @@ func (p *TMasterOpRequest) BLength() int { l += p.field24Length() l += p.field25Length() l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field30Length() + l += p.field31Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMasterOpRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocol_version", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TMasterOpRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetQueryId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 2) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TMasterOpRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sql", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Sql) + if p.IsSetBackendNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_num", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BackendNum) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TMasterOpRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetResourceInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resourceInfo", thrift.STRUCT, 4) - offset += p.ResourceInfo.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetFragmentInstanceId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 4) + offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 5) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetExecMemLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "execMemLimit", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ExecMemLimit) + if p.IsSetDone() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "done", thrift.BOOL, 6) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Done) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryTimeout", thrift.I32, 7) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.QueryTimeout) - + if p.IsSetProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "profile", thrift.STRUCT, 7) + offset += p.Profile.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 8) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + if p.IsSetErrorLog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "error_log", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ErrorLog { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTimeZone() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_zone", thrift.STRING, 9) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TimeZone) + if p.IsSetDeltaUrls() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delta_urls", thrift.LIST, 10) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.DeltaUrls { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStmtId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "stmt_id", thrift.I64, 10) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.StmtId) + if p.IsSetLoadCounters() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_counters", thrift.MAP, 11) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.LoadCounters { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSqlMode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sqlMode", thrift.I64, 11) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.SqlMode) + if p.IsSetTrackingUrl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tracking_url", thrift.STRING, 12) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrackingUrl) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadMemLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadMemLimit", thrift.I64, 12) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadMemLimit) + if p.IsSetExportFiles() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "export_files", thrift.LIST, 13) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ExportFiles { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEnableStrictMode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enableStrictMode", thrift.BOOL, 13) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableStrictMode) - + if p.IsSetCommitInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commitInfos", thrift.LIST, 14) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.CommitInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCurrentUserIdent() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 14) - offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetLoadedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_rows", thrift.I64, 15) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStmtIdx() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "stmtIdx", thrift.I32, 15) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.StmtIdx) + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 16) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryOptions() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_options", thrift.STRUCT, 16) - offset += p.QueryOptions.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetLoadedBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_bytes", thrift.I64, 17) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedBytes) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 17) - offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetErrorTabletInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "errorTabletInfos", thrift.LIST, 18) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.ErrorTabletInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetInsertVisibleTimeoutMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "insert_visible_timeout_ms", thrift.I64, 18) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.InsertVisibleTimeoutMs) + if p.IsSetFragmentId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_id", thrift.I32, 19) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSessionVariables() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "session_variables", thrift.MAP, 19) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) - var length int - for k, v := range p.SessionVariables { - length++ - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + if p.IsSetQueryType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_type", thrift.I32, 20) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.QueryType)) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) +func (p *TReportExecStatusParams) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadChannelProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadChannelProfile", thrift.STRUCT, 21) + offset += p.LoadChannelProfile.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFoldConstantByBe() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "foldConstantByBe", thrift.BOOL, 20) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.FoldConstantByBe) + if p.IsSetFinishedScanRanges() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "finished_scan_ranges", thrift.I32, 22) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FinishedScanRanges) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTraceCarrier() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trace_carrier", thrift.MAP, 21) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + if p.IsSetDetailedReport() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "detailed_report", thrift.LIST, 23) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for k, v := range p.TraceCarrier { + for _, v := range p.DetailedReport { length++ - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetClientNodeHost() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clientNodeHost", thrift.STRING, 22) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClientNodeHost) - + if p.IsSetQueryStatistics() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_statistics", thrift.STRUCT, 24) + offset += p.QueryStatistics.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetClientNodePort() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clientNodePort", thrift.I32, 23) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.ClientNodePort) - + if p.IsSetReportWorkloadRuntimeStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "report_workload_runtime_status", thrift.STRUCT, 25) + offset += p.ReportWorkloadRuntimeStatus.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSyncJournalOnly() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "syncJournalOnly", thrift.BOOL, 24) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.SyncJournalOnly) - + if p.IsSetHivePartitionUpdates() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hive_partition_updates", thrift.LIST, 26) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.HivePartitionUpdates { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDefaultCatalog() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "defaultCatalog", thrift.STRING, 25) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DefaultCatalog) + if p.IsSetQueryProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_profile", thrift.STRUCT, 27) + offset += p.QueryProfile.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} +func (p *TReportExecStatusParams) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIcebergCommitDatas() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_commit_datas", thrift.LIST, 28) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.IcebergCommitDatas { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportExecStatusParams) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDefaultDatabase() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "defaultDatabase", thrift.STRING, 26) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DefaultDatabase) + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 29) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpRequest) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.User) +func (p *TReportExecStatusParams) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 30) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) - l += bthrift.Binary.FieldEndLength() - return l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TMasterOpRequest) field2Length() int { +func (p *TReportExecStatusParams) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentInstanceReports() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_reports", thrift.LIST, 31) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.FragmentInstanceReports { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TReportExecStatusParams) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.Db) + l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) l += bthrift.Binary.FieldEndLength() return l } -func (p *TMasterOpRequest) field3Length() int { +func (p *TReportExecStatusParams) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("sql", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Sql) - - l += bthrift.Binary.FieldEndLength() + if p.IsSetQueryId() { + l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 2) + l += p.QueryId.BLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TMasterOpRequest) field4Length() int { +func (p *TReportExecStatusParams) field3Length() int { l := 0 - if p.IsSetResourceInfo() { - l += bthrift.Binary.FieldBeginLength("resourceInfo", thrift.STRUCT, 4) - l += p.ResourceInfo.BLength() + if p.IsSetBackendNum() { + l += bthrift.Binary.FieldBeginLength("backend_num", thrift.I32, 3) + l += bthrift.Binary.I32Length(*p.BackendNum) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field5Length() int { +func (p *TReportExecStatusParams) field4Length() int { l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + if p.IsSetFragmentInstanceId() { + l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 4) + l += p.FragmentInstanceId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TReportExecStatusParams) field5Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 5) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field6Length() int { +func (p *TReportExecStatusParams) field6Length() int { l := 0 - if p.IsSetExecMemLimit() { - l += bthrift.Binary.FieldBeginLength("execMemLimit", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.ExecMemLimit) + if p.IsSetDone() { + l += bthrift.Binary.FieldBeginLength("done", thrift.BOOL, 6) + l += bthrift.Binary.BoolLength(*p.Done) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field7Length() int { +func (p *TReportExecStatusParams) field7Length() int { l := 0 - if p.IsSetQueryTimeout() { - l += bthrift.Binary.FieldBeginLength("queryTimeout", thrift.I32, 7) - l += bthrift.Binary.I32Length(*p.QueryTimeout) - + if p.IsSetProfile() { + l += bthrift.Binary.FieldBeginLength("profile", thrift.STRUCT, 7) + l += p.Profile.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field8Length() int { +func (p *TReportExecStatusParams) field9Length() int { l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 8) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + if p.IsSetErrorLog() { + l += bthrift.Binary.FieldBeginLength("error_log", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ErrorLog)) + for _, v := range p.ErrorLog { + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field9Length() int { +func (p *TReportExecStatusParams) field10Length() int { l := 0 - if p.IsSetTimeZone() { - l += bthrift.Binary.FieldBeginLength("time_zone", thrift.STRING, 9) - l += bthrift.Binary.StringLengthNocopy(*p.TimeZone) + if p.IsSetDeltaUrls() { + l += bthrift.Binary.FieldBeginLength("delta_urls", thrift.LIST, 10) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.DeltaUrls)) + for _, v := range p.DeltaUrls { + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field10Length() int { +func (p *TReportExecStatusParams) field11Length() int { l := 0 - if p.IsSetStmtId() { - l += bthrift.Binary.FieldBeginLength("stmt_id", thrift.I64, 10) - l += bthrift.Binary.I64Length(*p.StmtId) + if p.IsSetLoadCounters() { + l += bthrift.Binary.FieldBeginLength("load_counters", thrift.MAP, 11) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.LoadCounters)) + for k, v := range p.LoadCounters { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field11Length() int { +func (p *TReportExecStatusParams) field12Length() int { l := 0 - if p.IsSetSqlMode() { - l += bthrift.Binary.FieldBeginLength("sqlMode", thrift.I64, 11) - l += bthrift.Binary.I64Length(*p.SqlMode) + if p.IsSetTrackingUrl() { + l += bthrift.Binary.FieldBeginLength("tracking_url", thrift.STRING, 12) + l += bthrift.Binary.StringLengthNocopy(*p.TrackingUrl) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field12Length() int { +func (p *TReportExecStatusParams) field13Length() int { l := 0 - if p.IsSetLoadMemLimit() { - l += bthrift.Binary.FieldBeginLength("loadMemLimit", thrift.I64, 12) - l += bthrift.Binary.I64Length(*p.LoadMemLimit) + if p.IsSetExportFiles() { + l += bthrift.Binary.FieldBeginLength("export_files", thrift.LIST, 13) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ExportFiles)) + for _, v := range p.ExportFiles { + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field13Length() int { +func (p *TReportExecStatusParams) field14Length() int { l := 0 - if p.IsSetEnableStrictMode() { - l += bthrift.Binary.FieldBeginLength("enableStrictMode", thrift.BOOL, 13) - l += bthrift.Binary.BoolLength(*p.EnableStrictMode) - + if p.IsSetCommitInfos() { + l += bthrift.Binary.FieldBeginLength("commitInfos", thrift.LIST, 14) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.CommitInfos)) + for _, v := range p.CommitInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field14Length() int { +func (p *TReportExecStatusParams) field15Length() int { l := 0 - if p.IsSetCurrentUserIdent() { - l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 14) - l += p.CurrentUserIdent.BLength() + if p.IsSetLoadedRows() { + l += bthrift.Binary.FieldBeginLength("loaded_rows", thrift.I64, 15) + l += bthrift.Binary.I64Length(*p.LoadedRows) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field15Length() int { +func (p *TReportExecStatusParams) field16Length() int { l := 0 - if p.IsSetStmtIdx() { - l += bthrift.Binary.FieldBeginLength("stmtIdx", thrift.I32, 15) - l += bthrift.Binary.I32Length(*p.StmtIdx) + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 16) + l += bthrift.Binary.I64Length(*p.BackendId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field16Length() int { +func (p *TReportExecStatusParams) field17Length() int { l := 0 - if p.IsSetQueryOptions() { - l += bthrift.Binary.FieldBeginLength("query_options", thrift.STRUCT, 16) - l += p.QueryOptions.BLength() + if p.IsSetLoadedBytes() { + l += bthrift.Binary.FieldBeginLength("loaded_bytes", thrift.I64, 17) + l += bthrift.Binary.I64Length(*p.LoadedBytes) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field17Length() int { +func (p *TReportExecStatusParams) field18Length() int { l := 0 - if p.IsSetQueryId() { - l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 17) - l += p.QueryId.BLength() + if p.IsSetErrorTabletInfos() { + l += bthrift.Binary.FieldBeginLength("errorTabletInfos", thrift.LIST, 18) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ErrorTabletInfos)) + for _, v := range p.ErrorTabletInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field18Length() int { +func (p *TReportExecStatusParams) field19Length() int { l := 0 - if p.IsSetInsertVisibleTimeoutMs() { - l += bthrift.Binary.FieldBeginLength("insert_visible_timeout_ms", thrift.I64, 18) - l += bthrift.Binary.I64Length(*p.InsertVisibleTimeoutMs) + if p.IsSetFragmentId() { + l += bthrift.Binary.FieldBeginLength("fragment_id", thrift.I32, 19) + l += bthrift.Binary.I32Length(*p.FragmentId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field19Length() int { +func (p *TReportExecStatusParams) field20Length() int { l := 0 - if p.IsSetSessionVariables() { - l += bthrift.Binary.FieldBeginLength("session_variables", thrift.MAP, 19) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.SessionVariables)) - for k, v := range p.SessionVariables { - - l += bthrift.Binary.StringLengthNocopy(k) + if p.IsSetQueryType() { + l += bthrift.Binary.FieldBeginLength("query_type", thrift.I32, 20) + l += bthrift.Binary.I32Length(int32(*p.QueryType)) - l += bthrift.Binary.StringLengthNocopy(v) + l += bthrift.Binary.FieldEndLength() + } + return l +} - } - l += bthrift.Binary.MapEndLength() +func (p *TReportExecStatusParams) field21Length() int { + l := 0 + if p.IsSetLoadChannelProfile() { + l += bthrift.Binary.FieldBeginLength("loadChannelProfile", thrift.STRUCT, 21) + l += p.LoadChannelProfile.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field20Length() int { +func (p *TReportExecStatusParams) field22Length() int { l := 0 - if p.IsSetFoldConstantByBe() { - l += bthrift.Binary.FieldBeginLength("foldConstantByBe", thrift.BOOL, 20) - l += bthrift.Binary.BoolLength(*p.FoldConstantByBe) + if p.IsSetFinishedScanRanges() { + l += bthrift.Binary.FieldBeginLength("finished_scan_ranges", thrift.I32, 22) + l += bthrift.Binary.I32Length(*p.FinishedScanRanges) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field21Length() int { +func (p *TReportExecStatusParams) field23Length() int { l := 0 - if p.IsSetTraceCarrier() { - l += bthrift.Binary.FieldBeginLength("trace_carrier", thrift.MAP, 21) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.TraceCarrier)) - for k, v := range p.TraceCarrier { + if p.IsSetDetailedReport() { + l += bthrift.Binary.FieldBeginLength("detailed_report", thrift.LIST, 23) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DetailedReport)) + for _, v := range p.DetailedReport { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l += bthrift.Binary.StringLengthNocopy(k) +func (p *TReportExecStatusParams) field24Length() int { + l := 0 + if p.IsSetQueryStatistics() { + l += bthrift.Binary.FieldBeginLength("query_statistics", thrift.STRUCT, 24) + l += p.QueryStatistics.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l += bthrift.Binary.StringLengthNocopy(v) +func (p *TReportExecStatusParams) field25Length() int { + l := 0 + if p.IsSetReportWorkloadRuntimeStatus() { + l += bthrift.Binary.FieldBeginLength("report_workload_runtime_status", thrift.STRUCT, 25) + l += p.ReportWorkloadRuntimeStatus.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TReportExecStatusParams) field26Length() int { + l := 0 + if p.IsSetHivePartitionUpdates() { + l += bthrift.Binary.FieldBeginLength("hive_partition_updates", thrift.LIST, 26) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.HivePartitionUpdates)) + for _, v := range p.HivePartitionUpdates { + l += v.BLength() } - l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field22Length() int { +func (p *TReportExecStatusParams) field27Length() int { l := 0 - if p.IsSetClientNodeHost() { - l += bthrift.Binary.FieldBeginLength("clientNodeHost", thrift.STRING, 22) - l += bthrift.Binary.StringLengthNocopy(*p.ClientNodeHost) - + if p.IsSetQueryProfile() { + l += bthrift.Binary.FieldBeginLength("query_profile", thrift.STRUCT, 27) + l += p.QueryProfile.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field23Length() int { +func (p *TReportExecStatusParams) field28Length() int { l := 0 - if p.IsSetClientNodePort() { - l += bthrift.Binary.FieldBeginLength("clientNodePort", thrift.I32, 23) - l += bthrift.Binary.I32Length(*p.ClientNodePort) - + if p.IsSetIcebergCommitDatas() { + l += bthrift.Binary.FieldBeginLength("iceberg_commit_datas", thrift.LIST, 28) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.IcebergCommitDatas)) + for _, v := range p.IcebergCommitDatas { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field24Length() int { +func (p *TReportExecStatusParams) field29Length() int { l := 0 - if p.IsSetSyncJournalOnly() { - l += bthrift.Binary.FieldBeginLength("syncJournalOnly", thrift.BOOL, 24) - l += bthrift.Binary.BoolLength(*p.SyncJournalOnly) + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 29) + l += bthrift.Binary.I64Length(*p.TxnId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field25Length() int { +func (p *TReportExecStatusParams) field30Length() int { l := 0 - if p.IsSetDefaultCatalog() { - l += bthrift.Binary.FieldBeginLength("defaultCatalog", thrift.STRING, 25) - l += bthrift.Binary.StringLengthNocopy(*p.DefaultCatalog) + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 30) + l += bthrift.Binary.StringLengthNocopy(*p.Label) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpRequest) field26Length() int { +func (p *TReportExecStatusParams) field31Length() int { l := 0 - if p.IsSetDefaultDatabase() { - l += bthrift.Binary.FieldBeginLength("defaultDatabase", thrift.STRING, 26) - l += bthrift.Binary.StringLengthNocopy(*p.DefaultDatabase) - + if p.IsSetFragmentInstanceReports() { + l += bthrift.Binary.FieldBeginLength("fragment_instance_reports", thrift.LIST, 31) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FragmentInstanceReports)) + for _, v := range p.FragmentInstanceReports { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { +func (p *TFeResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetColumnName bool = false - var issetColumnType bool = false + var issetProtocolVersion bool = false + var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -13075,13 +13546,13 @@ func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetColumnName = true + issetProtocolVersion = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13096,7 +13567,7 @@ func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetColumnType = true + issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13104,9 +13575,9 @@ func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 3: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField3(buf[offset:]) + case 1000: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1000(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -13118,9 +13589,9 @@ func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) + case 1001: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1001(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -13152,12 +13623,12 @@ func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetColumnName { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } - if !issetColumnType { + if !issetStatus { fieldId = 2 goto RequiredFieldNotSetError } @@ -13167,7 +13638,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnDefinition[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFeResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -13175,182 +13646,179 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TColumnDefinition[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFeResult_[fieldId])) } -func (p *TColumnDefinition) FastReadField1(buf []byte) (int, error) { +func (p *TFeResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ColumnName = v + p.ProtocolVersion = FrontendServiceVersion(v) } return offset, nil } -func (p *TColumnDefinition) FastReadField2(buf []byte) (int, error) { +func (p *TFeResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := types.NewTColumnType() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.ColumnType = tmp + p.Status = tmp return offset, nil } -func (p *TColumnDefinition) FastReadField3(buf []byte) (int, error) { +func (p *TFeResult_) FastReadField1000(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := types.TAggregationType(v) - p.AggType = &tmp + p.CloudCluster = &v } return offset, nil } -func (p *TColumnDefinition) FastReadField4(buf []byte) (int, error) { +func (p *TFeResult_) FastReadField1001(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DefaultValue = &v + p.NoAuth = &v } return offset, nil } // for compatibility -func (p *TColumnDefinition) FastWrite(buf []byte) int { +func (p *TFeResult_) FastWrite(buf []byte) int { return 0 } -func (p *TColumnDefinition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFeResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TColumnDefinition") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFeResult") if p != nil { + offset += p.fastWriteField1001(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TColumnDefinition) BLength() int { +func (p *TFeResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TColumnDefinition") + l += bthrift.Binary.StructBeginLength("TFeResult") if p != nil { l += p.field1Length() l += p.field2Length() - l += p.field3Length() - l += p.field4Length() + l += p.field1000Length() + l += p.field1001Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TColumnDefinition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFeResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columnName", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ColumnName) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocolVersion", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TColumnDefinition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFeResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columnType", thrift.STRUCT, 2) - offset += p.ColumnType.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 2) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TColumnDefinition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFeResult_) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAggType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "aggType", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.AggType)) + if p.IsSetCloudCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_cluster", thrift.STRING, 1000) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudCluster) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TColumnDefinition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFeResult_) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDefaultValue() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "defaultValue", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DefaultValue) + if p.IsSetNoAuth() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "noAuth", thrift.BOOL, 1001) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.NoAuth) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TColumnDefinition) field1Length() int { +func (p *TFeResult_) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("columnName", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.ColumnName) + l += bthrift.Binary.FieldBeginLength("protocolVersion", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) l += bthrift.Binary.FieldEndLength() return l } -func (p *TColumnDefinition) field2Length() int { +func (p *TFeResult_) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("columnType", thrift.STRUCT, 2) - l += p.ColumnType.BLength() + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 2) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TColumnDefinition) field3Length() int { +func (p *TFeResult_) field1000Length() int { l := 0 - if p.IsSetAggType() { - l += bthrift.Binary.FieldBeginLength("aggType", thrift.I32, 3) - l += bthrift.Binary.I32Length(int32(*p.AggType)) + if p.IsSetCloudCluster() { + l += bthrift.Binary.FieldBeginLength("cloud_cluster", thrift.STRING, 1000) + l += bthrift.Binary.StringLengthNocopy(*p.CloudCluster) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TColumnDefinition) field4Length() int { +func (p *TFeResult_) field1001Length() int { l := 0 - if p.IsSetDefaultValue() { - l += bthrift.Binary.FieldBeginLength("defaultValue", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.DefaultValue) + if p.IsSetNoAuth() { + l += bthrift.Binary.FieldBeginLength("noAuth", thrift.BOOL, 1001) + l += bthrift.Binary.BoolLength(*p.NoAuth) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TShowResultSetMetaData) FastRead(buf []byte) (int, error) { +func (p *TSubTxnInfo) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetColumns bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -13368,13 +13836,54 @@ func (p *TShowResultSetMetaData) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetColumns = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13402,28 +13911,48 @@ func (p *TShowResultSetMetaData) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetColumns { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSetMetaData[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSubTxnInfo[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSetMetaData[fieldId])) } -func (p *TShowResultSetMetaData) FastReadField1(buf []byte) (int, error) { +func (p *TSubTxnInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SubTxnId = &v + + } + return offset, nil +} + +func (p *TSubTxnInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +func (p *TSubTxnInfo) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -13431,16 +13960,16 @@ func (p *TShowResultSetMetaData) FastReadField1(buf []byte) (int, error) { if err != nil { return offset, err } - p.Columns = make([]*TColumnDefinition, 0, size) + p.TabletCommitInfos = make([]*types.TTabletCommitInfo, 0, size) for i := 0; i < size; i++ { - _elem := NewTColumnDefinition() + _elem := types.NewTTabletCommitInfo() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Columns = append(p.Columns, _elem) + p.TabletCommitInfos = append(p.TabletCommitInfos, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -13450,69 +13979,158 @@ func (p *TShowResultSetMetaData) FastReadField1(buf []byte) (int, error) { return offset, nil } +func (p *TSubTxnInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TSubTxnType(v) + p.SubTxnType = &tmp + + } + return offset, nil +} + // for compatibility -func (p *TShowResultSetMetaData) FastWrite(buf []byte) int { +func (p *TSubTxnInfo) FastWrite(buf []byte) int { return 0 } -func (p *TShowResultSetMetaData) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSubTxnInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowResultSetMetaData") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSubTxnInfo") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TShowResultSetMetaData) BLength() int { +func (p *TSubTxnInfo) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TShowResultSetMetaData") + l += bthrift.Binary.StructBeginLength("TSubTxnInfo") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TShowResultSetMetaData) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSubTxnInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Columns { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetSubTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.SubTxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TShowResultSetMetaData) field1Length() int { +func (p *TSubTxnInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSubTxnInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletCommitInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_commit_infos", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TabletCommitInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSubTxnInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSubTxnType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_type", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.SubTxnType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSubTxnInfo) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("columns", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Columns)) - for _, v := range p.Columns { - l += v.BLength() + if p.IsSetSubTxnId() { + l += bthrift.Binary.FieldBeginLength("sub_txn_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.SubTxnId) + + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() return l } -func (p *TShowResultSet) FastRead(buf []byte) (int, error) { +func (p *TSubTxnInfo) field2Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSubTxnInfo) field3Length() int { + l := 0 + if p.IsSetTabletCommitInfos() { + l += bthrift.Binary.FieldBeginLength("tablet_commit_infos", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TabletCommitInfos)) + for _, v := range p.TabletCommitInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSubTxnInfo) field4Length() int { + l := 0 + if p.IsSetSubTxnType() { + l += bthrift.Binary.FieldBeginLength("sub_txn_type", thrift.I32, 4) + l += bthrift.Binary.I32Length(int32(*p.SubTxnType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTxnLoadInfo) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetMetaData bool = false - var issetResultRows bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -13530,13 +14148,12 @@ func (p *TShowResultSet) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetMetaData = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13545,13 +14162,68 @@ func (p *TShowResultSet) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetResultRows = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13579,81 +14251,104 @@ func (p *TShowResultSet) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetMetaData { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetResultRows { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSet[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnLoadInfo[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSet[fieldId])) } -func (p *TShowResultSet) FastReadField1(buf []byte) (int, error) { +func (p *TTxnLoadInfo) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTShowResultSetMetaData() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Label = &v + } - p.MetaData = tmp return offset, nil } -func (p *TShowResultSet) FastReadField2(buf []byte) (int, error) { +func (p *TTxnLoadInfo) FastReadField2(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err + } else { + offset += l + p.DbId = &v + } - p.ResultRows = make([][]string, 0, size) - for i := 0; i < size; i++ { - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + return offset, nil +} + +func (p *TTxnLoadInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { offset += l - if err != nil { - return offset, err - } - _elem := make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem1 string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + p.TxnId = &v - _elem1 = v + } + return offset, nil +} - } +func (p *TTxnLoadInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 - _elem = append(_elem, _elem1) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TimeoutTimestamp = &v + + } + return offset, nil +} + +func (p *TTxnLoadInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AllSubTxnNum = &v + + } + return offset, nil +} + +func (p *TTxnLoadInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SubTxnInfos = make([]*TSubTxnInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTSubTxnInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.ResultRows = append(p.ResultRows, _elem) + p.SubTxnInfos = append(p.SubTxnInfos, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -13664,100 +14359,190 @@ func (p *TShowResultSet) FastReadField2(buf []byte) (int, error) { } // for compatibility -func (p *TShowResultSet) FastWrite(buf []byte) int { +func (p *TTxnLoadInfo) FastWrite(buf []byte) int { return 0 } -func (p *TShowResultSet) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTxnLoadInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowResultSet") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTxnLoadInfo") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TShowResultSet) BLength() int { +func (p *TTxnLoadInfo) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TShowResultSet") + l += bthrift.Binary.StructBeginLength("TTxnLoadInfo") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TShowResultSet) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTxnLoadInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metaData", thrift.STRUCT, 1) - offset += p.MetaData.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TShowResultSet) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTxnLoadInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resultRows", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) - var length int - for _, v := range p.ResultRows { - length++ + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTxnLoadInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTxnLoadInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeoutTimestamp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeoutTimestamp", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TimeoutTimestamp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTxnLoadInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAllSubTxnNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "allSubTxnNum", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AllSubTxnNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTxnLoadInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSubTxnInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "subTxnInfos", thrift.LIST, 6) listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range v { + for _, v := range p.SubTxnInfos { length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TShowResultSet) field1Length() int { +func (p *TTxnLoadInfo) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("metaData", thrift.STRUCT, 1) - l += p.MetaData.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TShowResultSet) field2Length() int { +func (p *TTxnLoadInfo) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("resultRows", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultRows)) - for _, v := range p.ResultRows { - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(v)) - for _, v := range v { - l += bthrift.Binary.StringLengthNocopy(v) + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTxnLoadInfo) field3Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TTxnLoadInfo) field4Length() int { + l := 0 + if p.IsSetTimeoutTimestamp() { + l += bthrift.Binary.FieldBeginLength("timeoutTimestamp", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.TimeoutTimestamp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTxnLoadInfo) field5Length() int { + l := 0 + if p.IsSetAllSubTxnNum() { + l += bthrift.Binary.FieldBeginLength("allSubTxnNum", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.AllSubTxnNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTxnLoadInfo) field6Length() int { + l := 0 + if p.IsSetSubTxnInfos() { + l += bthrift.Binary.FieldBeginLength("subTxnInfos", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.SubTxnInfos)) + for _, v := range p.SubTxnInfos { + l += v.BLength() } l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() return l } -func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { +func (p *TGroupCommitInfo) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetMaxJournalId bool = false - var issetPacket bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -13775,13 +14560,12 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetMaxJournalId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13790,13 +14574,12 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetPacket = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -13805,7 +14588,7 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -13818,9 +14601,9 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField4(buf[offset:]) + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -13832,9 +14615,23 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -13866,241 +14663,277 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetMaxJournalId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetPacket { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGroupCommitInfo[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpResult_[fieldId])) } -func (p *TMasterOpResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGroupCommitInfo) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.MaxJournalId = v + p.GetGroupCommitLoadBeId = &v } return offset, nil } -func (p *TMasterOpResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGroupCommitInfo) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.GroupCommitLoadTableId = &v - p.Packet = []byte(v) + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v } return offset, nil } -func (p *TMasterOpResult_) FastReadField3(buf []byte) (int, error) { +func (p *TGroupCommitInfo) FastReadField5(buf []byte) (int, error) { offset := 0 - tmp := NewTShowResultSet() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l + p.UpdateLoadData = &v + } - p.ResultSet = tmp return offset, nil } -func (p *TMasterOpResult_) FastReadField4(buf []byte) (int, error) { +func (p *TGroupCommitInfo) FastReadField6(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.TableId = &v + } - p.QueryId = tmp return offset, nil } -func (p *TMasterOpResult_) FastReadField5(buf []byte) (int, error) { +func (p *TGroupCommitInfo) FastReadField7(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Status = &v + p.ReceiveData = &v } return offset, nil } // for compatibility -func (p *TMasterOpResult_) FastWrite(buf []byte) int { +func (p *TGroupCommitInfo) FastWrite(buf []byte) int { return 0 } -func (p *TMasterOpResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGroupCommitInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMasterOpResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGroupCommitInfo") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TMasterOpResult_) BLength() int { +func (p *TGroupCommitInfo) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMasterOpResult") + l += bthrift.Binary.StructBeginLength("TGroupCommitInfo") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() l += p.field5Length() + l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMasterOpResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGroupCommitInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "maxJournalId", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxJournalId) + if p.IsSetGetGroupCommitLoadBeId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "getGroupCommitLoadBeId", thrift.BOOL, 1) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.GetGroupCommitLoadBeId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TMasterOpResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGroupCommitInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "packet", thrift.STRING, 2) - offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Packet)) + if p.IsSetGroupCommitLoadTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommitLoadTableId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.GroupCommitLoadTableId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TMasterOpResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGroupCommitInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetResultSet() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resultSet", thrift.STRUCT, 3) - offset += p.ResultSet.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGroupCommitInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryId", thrift.STRUCT, 4) - offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetUpdateLoadData() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "updateLoadData", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.UpdateLoadData) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGroupCommitInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Status) + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tableId", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMasterOpResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("maxJournalId", thrift.I64, 1) - l += bthrift.Binary.I64Length(p.MaxJournalId) +func (p *TGroupCommitInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReceiveData() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "receiveData", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReceiveData) - l += bthrift.Binary.FieldEndLength() - return l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TMasterOpResult_) field2Length() int { +func (p *TGroupCommitInfo) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("packet", thrift.STRING, 2) - l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Packet)) + if p.IsSetGetGroupCommitLoadBeId() { + l += bthrift.Binary.FieldBeginLength("getGroupCommitLoadBeId", thrift.BOOL, 1) + l += bthrift.Binary.BoolLength(*p.GetGroupCommitLoadBeId) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TMasterOpResult_) field3Length() int { +func (p *TGroupCommitInfo) field2Length() int { l := 0 - if p.IsSetResultSet() { - l += bthrift.Binary.FieldBeginLength("resultSet", thrift.STRUCT, 3) - l += p.ResultSet.BLength() + if p.IsSetGroupCommitLoadTableId() { + l += bthrift.Binary.FieldBeginLength("groupCommitLoadTableId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.GroupCommitLoadTableId) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpResult_) field4Length() int { +func (p *TGroupCommitInfo) field3Length() int { l := 0 - if p.IsSetQueryId() { - l += bthrift.Binary.FieldBeginLength("queryId", thrift.STRUCT, 4) - l += p.QueryId.BLength() + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMasterOpResult_) field5Length() int { +func (p *TGroupCommitInfo) field5Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.Status) + if p.IsSetUpdateLoadData() { + l += bthrift.Binary.FieldBeginLength("updateLoadData", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.UpdateLoadData) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { +func (p *TGroupCommitInfo) field6Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("tableId", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field7Length() int { + l := 0 + if p.IsSetReceiveData() { + l += bthrift.Binary.FieldBeginLength("receiveData", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.ReceiveData) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false - var issetTaskId bool = false - var issetTaskStatus bool = false + var issetUser bool = false + var issetDb bool = false + var issetSql bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -14118,13 +14951,13 @@ func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetProtocolVersion = true + issetUser = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14133,13 +14966,13 @@ func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTaskId = true + issetDb = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14148,13 +14981,13 @@ func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTaskStatus = true + issetSql = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14162,208 +14995,9 @@ func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetTaskId { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetTaskStatus { - fieldId = 3 - goto RequiredFieldNotSetError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateExportTaskStatusRequest[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TUpdateExportTaskStatusRequest[fieldId])) -} - -func (p *TUpdateExportTaskStatusRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ProtocolVersion = FrontendServiceVersion(v) - - } - return offset, nil -} - -func (p *TUpdateExportTaskStatusRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.TaskId = tmp - return offset, nil -} - -func (p *TUpdateExportTaskStatusRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - tmp := palointernalservice.NewTExportStatusResult_() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.TaskStatus = tmp - return offset, nil -} - -// for compatibility -func (p *TUpdateExportTaskStatusRequest) FastWrite(buf []byte) int { - return 0 -} - -func (p *TUpdateExportTaskStatusRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUpdateExportTaskStatusRequest") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TUpdateExportTaskStatusRequest) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TUpdateExportTaskStatusRequest") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TUpdateExportTaskStatusRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocolVersion", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TUpdateExportTaskStatusRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "taskId", thrift.STRUCT, 2) - offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TUpdateExportTaskStatusRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "taskStatus", thrift.STRUCT, 3) - offset += p.TaskStatus.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TUpdateExportTaskStatusRequest) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("protocolVersion", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TUpdateExportTaskStatusRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("taskId", thrift.STRUCT, 2) - l += p.TaskId.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TUpdateExportTaskStatusRequest) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("taskStatus", thrift.STRUCT, 3) - l += p.TaskStatus.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetLabel bool = false - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14375,14 +15009,13 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: + case 5: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) + l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetUser = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14390,14 +15023,13 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetPasswd = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14405,14 +15037,13 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetDb = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14420,14 +15051,13 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: + case 8: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) + l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTbl = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14435,9 +15065,9 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 6: + case 9: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) + l, err = p.FastReadField9(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14449,14 +15079,13 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetLabel = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14464,9 +15093,9 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 8: + case 11: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) + l, err = p.FastReadField11(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14478,9 +15107,9 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 9: + case 12: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) + l, err = p.FastReadField12(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14492,9 +15121,9 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 10: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField10(buf[offset:]) + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14506,9 +15135,9 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: + case 14: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField11(buf[offset:]) + l, err = p.FastReadField14(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14520,9 +15149,107 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 12: + case 15: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 17: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 21: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 22: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField12(buf[offset:]) + l, err = p.FastReadField22(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -14534,109 +15261,198 @@ func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + case 23: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetLabel { - fieldId = 7 - goto RequiredFieldNotSetError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginRequest[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginRequest[fieldId])) -} - -func (p *TLoadTxnBeginRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Cluster = &v - - } - return offset, nil -} - -func (p *TLoadTxnBeginRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.User = v - - } - return offset, nil -} - -func (p *TLoadTxnBeginRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Passwd = v - - } - return offset, nil + case 24: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField24(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 25: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField25(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 26: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField26(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 27: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField27(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 28: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField28(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 29: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField29(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 30: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField30(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1001: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1001(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetSql { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpRequest[fieldId])) } -func (p *TLoadTxnBeginRequest) FastReadField4(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { @@ -14644,13 +15460,13 @@ func (p *TLoadTxnBeginRequest) FastReadField4(buf []byte) (int, error) { } else { offset += l - p.Db = v + p.User = v } return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField5(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { @@ -14658,1357 +15474,1309 @@ func (p *TLoadTxnBeginRequest) FastReadField5(buf []byte) (int, error) { } else { offset += l - p.Tbl = v + p.Db = v } return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField6(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.UserIp = &v + + p.Sql = v } return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField7(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := types.NewTResourceInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Label = v - } + p.ResourceInfo = tmp return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField8(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Timestamp = &v + p.Cluster = &v } return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField9(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField6(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.AuthCode = &v + p.ExecMemLimit = &v } return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField10(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField7(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Timeout = &v + p.QueryTimeout = &v } return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField11(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField8(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.UserIp = &v + } - p.RequestId = tmp return offset, nil } -func (p *TLoadTxnBeginRequest) FastReadField12(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField9(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Token = &v + p.TimeZone = &v } return offset, nil } -// for compatibility -func (p *TLoadTxnBeginRequest) FastWrite(buf []byte) int { - return 0 -} - -func (p *TLoadTxnBeginRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField10(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnBeginRequest") - if p != nil { - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} -func (p *TLoadTxnBeginRequest) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxnBeginRequest") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StmtId = &v + } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField11(buf []byte) (int, error) { offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SqlMode = &v + } - return offset + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField12(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} -func (p *TLoadTxnBeginRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadMemLimit = &v - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + } + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField13(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableStrictMode = &v + + } + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField14(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + tmp := types.NewTUserIdentity() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CurrentUserIdent = tmp + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField15(buf []byte) (int, error) { offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StmtIdx = &v + } - return offset + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField16(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Label) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + tmp := palointernalservice.NewTQueryOptions() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryOptions = tmp + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField17(buf []byte) (int, error) { offset := 0 - if p.IsSetTimestamp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timestamp", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timestamp) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.QueryId = tmp + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField18(buf []byte) (int, error) { offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.InsertVisibleTimeoutMs = &v + } - return offset + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField19(buf []byte) (int, error) { offset := 0 - if p.IsSetTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeout", thrift.I64, 10) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timeout) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return offset -} + p.SessionVariables = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TLoadTxnBeginRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRequestId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request_id", thrift.STRUCT, 11) - offset += p.RequestId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.SessionVariables[_key] = _val } - return offset + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TLoadTxnBeginRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastReadField20(buf []byte) (int, error) { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 12) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FoldConstantByBe = &v + } - return offset + return offset, nil } -func (p *TLoadTxnBeginRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) +func (p *TMasterOpRequest) FastReadField21(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return l -} + p.TraceCarrier = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TLoadTxnBeginRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.User) + _key = v - l += bthrift.Binary.FieldEndLength() - return l -} + } -func (p *TLoadTxnBeginRequest) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Passwd) + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l - l += bthrift.Binary.FieldEndLength() - return l -} + _val = v -func (p *TLoadTxnBeginRequest) field4Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(p.Db) + } - l += bthrift.Binary.FieldEndLength() - return l + p.TraceCarrier[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TLoadTxnBeginRequest) field5Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(p.Tbl) - - l += bthrift.Binary.FieldEndLength() - return l -} +func (p *TMasterOpRequest) FastReadField22(buf []byte) (int, error) { + offset := 0 -func (p *TLoadTxnBeginRequest) field6Length() int { - l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ClientNodeHost = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TLoadTxnBeginRequest) field7Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(p.Label) - - l += bthrift.Binary.FieldEndLength() - return l -} +func (p *TMasterOpRequest) FastReadField23(buf []byte) (int, error) { + offset := 0 -func (p *TLoadTxnBeginRequest) field8Length() int { - l := 0 - if p.IsSetTimestamp() { - l += bthrift.Binary.FieldBeginLength("timestamp", thrift.I64, 8) - l += bthrift.Binary.I64Length(*p.Timestamp) + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ClientNodePort = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TLoadTxnBeginRequest) field9Length() int { - l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.AuthCode) +func (p *TMasterOpRequest) FastReadField24(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SyncJournalOnly = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TLoadTxnBeginRequest) field10Length() int { - l := 0 - if p.IsSetTimeout() { - l += bthrift.Binary.FieldBeginLength("timeout", thrift.I64, 10) - l += bthrift.Binary.I64Length(*p.Timeout) +func (p *TMasterOpRequest) FastReadField25(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DefaultCatalog = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TLoadTxnBeginRequest) field11Length() int { - l := 0 - if p.IsSetRequestId() { - l += bthrift.Binary.FieldBeginLength("request_id", thrift.STRUCT, 11) - l += p.RequestId.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TMasterOpRequest) FastReadField26(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DefaultDatabase = &v + } - return l + return offset, nil } -func (p *TLoadTxnBeginRequest) field12Length() int { - l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 12) - l += bthrift.Binary.StringLengthNocopy(*p.Token) +func (p *TMasterOpRequest) FastReadField27(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CancelQeury = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TLoadTxnBeginResult_) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetStatus bool = false - _, l, err = bthrift.Binary.ReadStructBegin(buf) +func (p *TMasterOpRequest) FastReadField28(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) offset += l if err != nil { - goto ReadStructBeginError + return offset, err } + p.UserVariables = make(map[string]*exprs.TExprNode, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetStatus = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + _val := exprs.NewTExprNode() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { offset += l - if err != nil { - goto SkipFieldError - } } - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError + p.UserVariables[_key] = _val } - - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginResult_[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginResult_[fieldId])) } -func (p *TLoadTxnBeginResult_) FastReadField1(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField29(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTTxnLoadInfo() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp + p.TxnLoadInfo = tmp return offset, nil } -func (p *TLoadTxnBeginResult_) FastReadField2(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField30(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTGroupCommitInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TxnId = &v - } + p.GroupCommitInfo = tmp return offset, nil } -func (p *TLoadTxnBeginResult_) FastReadField3(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField1000(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.JobStatus = &v + p.CloudCluster = &v } return offset, nil } -func (p *TLoadTxnBeginResult_) FastReadField4(buf []byte) (int, error) { +func (p *TMasterOpRequest) FastReadField1001(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v + p.NoAuth = &v } return offset, nil } // for compatibility -func (p *TLoadTxnBeginResult_) FastWrite(buf []byte) int { +func (p *TMasterOpRequest) FastWrite(buf []byte) int { return 0 } -func (p *TLoadTxnBeginResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnBeginResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMasterOpRequest") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) + offset += p.fastWriteField24(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField1001(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField25(buf[offset:], binaryWriter) + offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TLoadTxnBeginResult_) BLength() int { +func (p *TMasterOpRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxnBeginResult") + l += bthrift.Binary.StructBeginLength("TMasterOpRequest") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() + l += p.field24Length() + l += p.field25Length() + l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field30Length() + l += p.field1000Length() + l += p.field1001Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxnBeginResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TLoadTxnBeginResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMasterOpRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sql", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Sql) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} +func (p *TMasterOpRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetResourceInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resourceInfo", thrift.STRUCT, 4) + offset += p.ResourceInfo.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnBeginResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetJobStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_status", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.JobStatus) + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnBeginResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + if p.IsSetExecMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "execMemLimit", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ExecMemLimit) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnBeginResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - return l +func (p *TMasterOpRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryTimeout", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.QueryTimeout) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TLoadTxnBeginResult_) field2Length() int { - l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.TxnId) +func (p *TMasterOpRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TLoadTxnBeginResult_) field3Length() int { - l := 0 - if p.IsSetJobStatus() { - l += bthrift.Binary.FieldBeginLength("job_status", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.JobStatus) +func (p *TMasterOpRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeZone() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_zone", thrift.STRING, 9) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TimeZone) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TLoadTxnBeginResult_) field4Length() int { - l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.DbId) +func (p *TMasterOpRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStmtId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "stmt_id", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.StmtId) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TBeginTxnRequest) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError +func (p *TMasterOpRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSqlMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sqlMode", thrift.I64, 11) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.SqlMode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } + return offset +} - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnRequest[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TBeginTxnRequest) FastReadField1(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetLoadMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadMemLimit", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadMemLimit) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Cluster = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField2(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableStrictMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enableStrictMode", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableStrictMode) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.User = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField3(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Passwd = &v - + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 14) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField4(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetStmtIdx() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "stmtIdx", thrift.I32, 15) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.StmtIdx) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField5(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.TableIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } - - p.TableIds = append(p.TableIds, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + if p.IsSetQueryOptions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_options", thrift.STRUCT, 16) + offset += p.QueryOptions.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField6(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v - + if p.IsSetQueryId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 17) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField7(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInsertVisibleTimeoutMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "insert_visible_timeout_ms", thrift.I64, 18) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.InsertVisibleTimeoutMs) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Label = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField8(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetSessionVariables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "session_variables", thrift.MAP, 19) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.SessionVariables { + length++ - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.AuthCode = &v - - } - return offset, nil -} - -func (p *TBeginTxnRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Timeout = &v + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField10(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetFoldConstantByBe() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "foldConstantByBe", thrift.BOOL, 20) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.FoldConstantByBe) - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.RequestId = tmp - return offset, nil + return offset } -func (p *TBeginTxnRequest) FastReadField11(buf []byte) (int, error) { +func (p *TMasterOpRequest) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetTraceCarrier() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trace_carrier", thrift.MAP, 21) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.TraceCarrier { + length++ - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v - - } - return offset, nil -} + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) -// for compatibility -func (p *TBeginTxnRequest) FastWrite(buf []byte) int { - return 0 -} + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) -func (p *TBeginTxnRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBeginTxnRequest") - if p != nil { - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TBeginTxnRequest) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TBeginTxnRequest") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TBeginTxnRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + if p.IsSetClientNodeHost() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clientNodeHost", thrift.STRING, 22) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClientNodeHost) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) + if p.IsSetClientNodePort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clientNodePort", thrift.I32, 23) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ClientNodePort) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPasswd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + if p.IsSetSyncJournalOnly() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "syncJournalOnly", thrift.BOOL, 24) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.SyncJournalOnly) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + if p.IsSetDefaultCatalog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "defaultCatalog", thrift.STRING, 25) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DefaultCatalog) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableIds() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_ids", thrift.LIST, 5) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) - var length int - for _, v := range p.TableIds { - length++ - offset += bthrift.Binary.WriteI64(buf[offset:], v) + if p.IsSetDefaultDatabase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "defaultDatabase", thrift.STRING, 26) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DefaultDatabase) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + if p.IsSetCancelQeury() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cancel_qeury", thrift.BOOL, 27) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.CancelQeury) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLabel() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + if p.IsSetUserVariables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_variables", thrift.MAP, 28) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRUCT, 0) + var length int + for k, v := range p.UserVariables { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - + if p.IsSetTxnLoadInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnLoadInfo", thrift.STRUCT, 29) + offset += p.TxnLoadInfo.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeout", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timeout) - + if p.IsSetGroupCommitInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommitInfo", thrift.STRUCT, 30) + offset += p.GroupCommitInfo.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRequestId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request_id", thrift.STRUCT, 10) - offset += p.RequestId.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCloudCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_cluster", thrift.STRING, 1000) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudCluster) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMasterOpRequest) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + if p.IsSetNoAuth() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "noAuth", thrift.BOOL, 1001) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.NoAuth) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnRequest) field1Length() int { +func (p *TMasterOpRequest) field1Length() int { l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.User) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TBeginTxnRequest) field2Length() int { +func (p *TMasterOpRequest) field2Length() int { l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.Db) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TBeginTxnRequest) field3Length() int { +func (p *TMasterOpRequest) field3Length() int { l := 0 - if p.IsSetPasswd() { - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + l += bthrift.Binary.FieldBeginLength("sql", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Sql) + + l += bthrift.Binary.FieldEndLength() + return l +} +func (p *TMasterOpRequest) field4Length() int { + l := 0 + if p.IsSetResourceInfo() { + l += bthrift.Binary.FieldBeginLength("resourceInfo", thrift.STRUCT, 4) + l += p.ResourceInfo.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field4Length() int { +func (p *TMasterOpRequest) field5Length() int { l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field5Length() int { +func (p *TMasterOpRequest) field6Length() int { l := 0 - if p.IsSetTableIds() { - l += bthrift.Binary.FieldBeginLength("table_ids", thrift.LIST, 5) - l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TableIds)) - var tmpV int64 - l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TableIds) - l += bthrift.Binary.ListEndLength() + if p.IsSetExecMemLimit() { + l += bthrift.Binary.FieldBeginLength("execMemLimit", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.ExecMemLimit) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field6Length() int { +func (p *TMasterOpRequest) field7Length() int { l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + if p.IsSetQueryTimeout() { + l += bthrift.Binary.FieldBeginLength("queryTimeout", thrift.I32, 7) + l += bthrift.Binary.I32Length(*p.QueryTimeout) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field7Length() int { +func (p *TMasterOpRequest) field8Length() int { l := 0 - if p.IsSetLabel() { - l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(*p.Label) + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field8Length() int { +func (p *TMasterOpRequest) field9Length() int { l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 8) - l += bthrift.Binary.I64Length(*p.AuthCode) + if p.IsSetTimeZone() { + l += bthrift.Binary.FieldBeginLength("time_zone", thrift.STRING, 9) + l += bthrift.Binary.StringLengthNocopy(*p.TimeZone) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field9Length() int { +func (p *TMasterOpRequest) field10Length() int { l := 0 - if p.IsSetTimeout() { - l += bthrift.Binary.FieldBeginLength("timeout", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.Timeout) + if p.IsSetStmtId() { + l += bthrift.Binary.FieldBeginLength("stmt_id", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.StmtId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field10Length() int { +func (p *TMasterOpRequest) field11Length() int { l := 0 - if p.IsSetRequestId() { - l += bthrift.Binary.FieldBeginLength("request_id", thrift.STRUCT, 10) - l += p.RequestId.BLength() + if p.IsSetSqlMode() { + l += bthrift.Binary.FieldBeginLength("sqlMode", thrift.I64, 11) + l += bthrift.Binary.I64Length(*p.SqlMode) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnRequest) field11Length() int { +func (p *TMasterOpRequest) field12Length() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.Token) + if p.IsSetLoadMemLimit() { + l += bthrift.Binary.FieldBeginLength("loadMemLimit", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.LoadMemLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { +func (p *TMasterOpRequest) field13Length() int { + l := 0 + if p.IsSetEnableStrictMode() { + l += bthrift.Binary.FieldBeginLength("enableStrictMode", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(*p.EnableStrictMode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field14Length() int { + l := 0 + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 14) + l += p.CurrentUserIdent.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field15Length() int { + l := 0 + if p.IsSetStmtIdx() { + l += bthrift.Binary.FieldBeginLength("stmtIdx", thrift.I32, 15) + l += bthrift.Binary.I32Length(*p.StmtIdx) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field16Length() int { + l := 0 + if p.IsSetQueryOptions() { + l += bthrift.Binary.FieldBeginLength("query_options", thrift.STRUCT, 16) + l += p.QueryOptions.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field17Length() int { + l := 0 + if p.IsSetQueryId() { + l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 17) + l += p.QueryId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field18Length() int { + l := 0 + if p.IsSetInsertVisibleTimeoutMs() { + l += bthrift.Binary.FieldBeginLength("insert_visible_timeout_ms", thrift.I64, 18) + l += bthrift.Binary.I64Length(*p.InsertVisibleTimeoutMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field19Length() int { + l := 0 + if p.IsSetSessionVariables() { + l += bthrift.Binary.FieldBeginLength("session_variables", thrift.MAP, 19) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.SessionVariables)) + for k, v := range p.SessionVariables { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field20Length() int { + l := 0 + if p.IsSetFoldConstantByBe() { + l += bthrift.Binary.FieldBeginLength("foldConstantByBe", thrift.BOOL, 20) + l += bthrift.Binary.BoolLength(*p.FoldConstantByBe) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field21Length() int { + l := 0 + if p.IsSetTraceCarrier() { + l += bthrift.Binary.FieldBeginLength("trace_carrier", thrift.MAP, 21) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.TraceCarrier)) + for k, v := range p.TraceCarrier { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field22Length() int { + l := 0 + if p.IsSetClientNodeHost() { + l += bthrift.Binary.FieldBeginLength("clientNodeHost", thrift.STRING, 22) + l += bthrift.Binary.StringLengthNocopy(*p.ClientNodeHost) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field23Length() int { + l := 0 + if p.IsSetClientNodePort() { + l += bthrift.Binary.FieldBeginLength("clientNodePort", thrift.I32, 23) + l += bthrift.Binary.I32Length(*p.ClientNodePort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field24Length() int { + l := 0 + if p.IsSetSyncJournalOnly() { + l += bthrift.Binary.FieldBeginLength("syncJournalOnly", thrift.BOOL, 24) + l += bthrift.Binary.BoolLength(*p.SyncJournalOnly) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field25Length() int { + l := 0 + if p.IsSetDefaultCatalog() { + l += bthrift.Binary.FieldBeginLength("defaultCatalog", thrift.STRING, 25) + l += bthrift.Binary.StringLengthNocopy(*p.DefaultCatalog) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field26Length() int { + l := 0 + if p.IsSetDefaultDatabase() { + l += bthrift.Binary.FieldBeginLength("defaultDatabase", thrift.STRING, 26) + l += bthrift.Binary.StringLengthNocopy(*p.DefaultDatabase) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field27Length() int { + l := 0 + if p.IsSetCancelQeury() { + l += bthrift.Binary.FieldBeginLength("cancel_qeury", thrift.BOOL, 27) + l += bthrift.Binary.BoolLength(*p.CancelQeury) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field28Length() int { + l := 0 + if p.IsSetUserVariables() { + l += bthrift.Binary.FieldBeginLength("user_variables", thrift.MAP, 28) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRUCT, len(p.UserVariables)) + for k, v := range p.UserVariables { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field29Length() int { + l := 0 + if p.IsSetTxnLoadInfo() { + l += bthrift.Binary.FieldBeginLength("txnLoadInfo", thrift.STRUCT, 29) + l += p.TxnLoadInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field30Length() int { + l := 0 + if p.IsSetGroupCommitInfo() { + l += bthrift.Binary.FieldBeginLength("groupCommitInfo", thrift.STRUCT, 30) + l += p.GroupCommitInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field1000Length() int { + l := 0 + if p.IsSetCloudCluster() { + l += bthrift.Binary.FieldBeginLength("cloud_cluster", thrift.STRING, 1000) + l += bthrift.Binary.StringLengthNocopy(*p.CloudCluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterOpRequest) field1001Length() int { + l := 0 + if p.IsSetNoAuth() { + l += bthrift.Binary.FieldBeginLength("noAuth", thrift.BOOL, 1001) + l += bthrift.Binary.BoolLength(*p.NoAuth) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TColumnDefinition) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetColumnName bool = false + var issetColumnType bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -16026,12 +16794,13 @@ func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetColumnName = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16040,12 +16809,13 @@ func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetColumnType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16054,7 +16824,7 @@ func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -16068,7 +16838,7 @@ func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -16101,95 +16871,109 @@ func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetColumnName { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetColumnType { + fieldId = 2 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnDefinition[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TColumnDefinition[fieldId])) } -func (p *TBeginTxnResult_) FastReadField1(buf []byte) (int, error) { +func (p *TColumnDefinition) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.ColumnName = v + } - p.Status = tmp return offset, nil } -func (p *TBeginTxnResult_) FastReadField2(buf []byte) (int, error) { +func (p *TColumnDefinition) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := types.NewTColumnType() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TxnId = &v - } + p.ColumnType = tmp return offset, nil } -func (p *TBeginTxnResult_) FastReadField3(buf []byte) (int, error) { +func (p *TColumnDefinition) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.JobStatus = &v + + tmp := types.TAggregationType(v) + p.AggType = &tmp } return offset, nil } -func (p *TBeginTxnResult_) FastReadField4(buf []byte) (int, error) { +func (p *TColumnDefinition) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v + p.DefaultValue = &v } return offset, nil } // for compatibility -func (p *TBeginTxnResult_) FastWrite(buf []byte) int { +func (p *TColumnDefinition) FastWrite(buf []byte) int { return 0 } -func (p *TBeginTxnResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnDefinition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBeginTxnResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TColumnDefinition") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TBeginTxnResult_) BLength() int { +func (p *TColumnDefinition) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TBeginTxnResult") + l += bthrift.Binary.StructBeginLength("TColumnDefinition") if p != nil { l += p.field1Length() l += p.field2Length() @@ -16201,106 +16985,91 @@ func (p *TBeginTxnResult_) BLength() int { return l } -func (p *TBeginTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnDefinition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columnName", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ColumnName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TBeginTxnResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnDefinition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columnType", thrift.STRUCT, 2) + offset += p.ColumnType.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TBeginTxnResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnDefinition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetJobStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_status", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.JobStatus) + if p.IsSetAggType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "aggType", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.AggType)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnDefinition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + if p.IsSetDefaultValue() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "defaultValue", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DefaultValue) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBeginTxnResult_) field1Length() int { +func (p *TColumnDefinition) field1Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("columnName", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.ColumnName) + + l += bthrift.Binary.FieldEndLength() return l } -func (p *TBeginTxnResult_) field2Length() int { +func (p *TColumnDefinition) field2Length() int { l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.TxnId) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("columnType", thrift.STRUCT, 2) + l += p.ColumnType.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TBeginTxnResult_) field3Length() int { +func (p *TColumnDefinition) field3Length() int { l := 0 - if p.IsSetJobStatus() { - l += bthrift.Binary.FieldBeginLength("job_status", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.JobStatus) + if p.IsSetAggType() { + l += bthrift.Binary.FieldBeginLength("aggType", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(*p.AggType)) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBeginTxnResult_) field4Length() int { +func (p *TColumnDefinition) field4Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.DbId) + if p.IsSetDefaultValue() { + l += bthrift.Binary.FieldBeginLength("defaultValue", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.DefaultValue) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { +func (p *TShowResultSetMetaData) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetLoadId bool = false - var issetTxnId bool = false - var issetFileType bool = false - var issetFormatType bool = false + var issetColumns bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -16318,12 +17087,13 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetColumns = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16331,118 +17101,161 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetUser = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPasswd = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetDb = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetTbl = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetLoadId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 8: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetTxnId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField9(buf[offset:]) + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetColumns { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSetMetaData[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSetMetaData[fieldId])) +} + +func (p *TShowResultSetMetaData) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Columns = make([]*TColumnDefinition, 0, size) + for i := 0; i < size; i++ { + _elem := NewTColumnDefinition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Columns = append(p.Columns, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TShowResultSetMetaData) FastWrite(buf []byte) int { + return 0 +} + +func (p *TShowResultSetMetaData) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowResultSetMetaData") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TShowResultSetMetaData) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TShowResultSetMetaData") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TShowResultSetMetaData) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Columns { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TShowResultSetMetaData) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("columns", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Columns)) + for _, v := range p.Columns { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TShowResultSet) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetMetaData bool = false + var issetResultRows bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetFileType = true + issetMetaData = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16450,14 +17263,14 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 10: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField10(buf[offset:]) + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetFormatType = true + issetResultRows = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16465,41 +17278,229 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 12: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetMetaData { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetResultRows { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowResultSet[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TShowResultSet[fieldId])) +} + +func (p *TShowResultSet) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTShowResultSetMetaData() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MetaData = tmp + return offset, nil +} + +func (p *TShowResultSet) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ResultRows = make([][]string, 0, size) + for i := 0; i < size; i++ { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem1 string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l - if err != nil { - goto SkipFieldError - } + + _elem1 = v + } - case 13: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField13(buf[offset:]) + + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ResultRows = append(p.ResultRows, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TShowResultSet) FastWrite(buf []byte) int { + return 0 +} + +func (p *TShowResultSet) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowResultSet") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TShowResultSet) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TShowResultSet") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TShowResultSet) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metaData", thrift.STRUCT, 1) + offset += p.MetaData.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TShowResultSet) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resultRows", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) + var length int + for _, v := range p.ResultRows { + length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range v { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TShowResultSet) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("metaData", thrift.STRUCT, 1) + l += p.MetaData.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TShowResultSet) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("resultRows", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultRows)) + for _, v := range p.ResultRows { + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(v)) + for _, v := range v { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetMaxJournalId bool = false + var issetPacket bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetMaxJournalId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16507,13 +17508,14 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 14: + case 2: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField14(buf[offset:]) + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetPacket = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16521,9 +17523,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 15: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField15(buf[offset:]) + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16535,9 +17537,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 16: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField16(buf[offset:]) + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16549,9 +17551,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 17: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField17(buf[offset:]) + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16563,23 +17565,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 18: + case 6: if fieldTypeId == thrift.I32 { - l, err = p.FastReadField18(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField19(buf[offset:]) + l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16591,37 +17579,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 20: + case 7: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField20(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField21(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField22(buf[offset:]) + l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16633,9 +17593,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 23: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField23(buf[offset:]) + case 8: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16647,9 +17607,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 24: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField24(buf[offset:]) + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16661,9 +17621,9 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 25: + case 10: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField25(buf[offset:]) + l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16675,403 +17635,11 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 26: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField26(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 27: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField27(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 28: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField28(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 29: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField29(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 30: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField30(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 31: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField31(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 32: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField32(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 33: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField33(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 34: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField34(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 35: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField35(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 36: - if fieldTypeId == thrift.DOUBLE { - l, err = p.FastReadField36(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 37: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField37(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 38: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField38(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 39: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField39(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 40: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField40(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 41: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField41(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 42: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField42(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 43: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField43(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 44: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField44(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 45: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField45(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 46: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField46(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 47: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField47(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 48: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField48(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 49: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField49(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 50: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField50(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 51: - if fieldTypeId == thrift.BYTE { - l, err = p.FastReadField51(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 52: - if fieldTypeId == thrift.BYTE { - l, err = p.FastReadField52(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 53: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField53(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } } @@ -17087,43 +17655,13 @@ func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetLoadId { - fieldId = 7 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 8 - goto RequiredFieldNotSetError - } - - if !issetFileType { - fieldId = 9 + if !issetMaxJournalId { + fieldId = 1 goto RequiredFieldNotSetError } - if !issetFormatType { - fieldId = 10 + if !issetPacket { + fieldId = 2 goto RequiredFieldNotSetError } return offset, nil @@ -17132,7 +17670,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterOpResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -17140,760 +17678,1180 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutRequest[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterOpResult_[fieldId])) } -func (p *TStreamLoadPutRequest) FastReadField1(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v + + p.MaxJournalId = v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField2(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { return offset, err } else { offset += l - p.User = v + p.Packet = []byte(v) } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField3(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTShowResultSet() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Passwd = v - } + p.ResultSet = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField4(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Db = v - } + p.QueryId = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField5(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField5(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Tbl = v + p.Status = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField6(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.UserIp = &v + p.StatusCode = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField7(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField7(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.ErrMessage = &v + } - p.LoadId = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField8(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField8(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - - p.TxnId = v - } - return offset, nil -} - -func (p *TStreamLoadPutRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 + p.QueryResultBufList = make([][]byte, 0, size) + for i := 0; i < size; i++ { + var _elem []byte + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { + return offset, err + } else { + offset += l - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + _elem = []byte(v) - p.FileType = types.TFileType(v) + } + p.QueryResultBufList = append(p.QueryResultBufList, _elem) } - return offset, nil -} - -func (p *TStreamLoadPutRequest) FastReadField10(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.FormatType = plannodes.TFileFormatType(v) - } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField11(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField9(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTTxnLoadInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Path = &v - } + p.TxnLoadInfo = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField12(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastReadField10(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Columns = &v + p.GroupCommitLoadBeId = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField13(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Where = &v - - } - return offset, nil +// for compatibility +func (p *TMasterOpResult_) FastWrite(buf []byte) int { + return 0 } -func (p *TStreamLoadPutRequest) FastReadField14(buf []byte) (int, error) { +func (p *TMasterOpResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ColumnSeparator = &v - + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMasterOpResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) } - return offset, nil + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) FastReadField15(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Partitions = &v - +func (p *TMasterOpResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TMasterOpResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TStreamLoadPutRequest) FastReadField16(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "maxJournalId", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxJournalId) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.AuthCode = &v - - } - return offset, nil + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) FastReadField17(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "packet", thrift.STRING, 2) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Packet)) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Negative = &v - - } - return offset, nil + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) FastReadField18(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Timeout = &v - + if p.IsSetResultSet() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resultSet", thrift.STRUCT, 3) + offset += p.ResultSet.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField19(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.StrictMode = &v - + if p.IsSetQueryId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryId", thrift.STRUCT, 4) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField20(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Status) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Timezone = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField21(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetStatusCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "statusCode", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.StatusCode) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ExecMemLimit = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField22(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetErrMessage() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "errMessage", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ErrMessage) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.IsTempPartition = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField23(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetQueryResultBufList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryResultBufList", thrift.LIST, 8) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.QueryResultBufList { + length++ + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(v)) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.StripOuterArray = &v - + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField24(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Jsonpaths = &v - + if p.IsSetTxnLoadInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnLoadInfo", thrift.STRUCT, 9) + offset += p.TxnLoadInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField25(buf []byte) (int, error) { +func (p *TMasterOpResult_) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetGroupCommitLoadBeId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommitLoadBeId", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.GroupCommitLoadBeId) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ThriftRpcTimeoutMs = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TStreamLoadPutRequest) FastReadField26(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.JsonRoot = &v +func (p *TMasterOpResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("maxJournalId", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.MaxJournalId) - } - return offset, nil + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TStreamLoadPutRequest) FastReadField27(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TMasterOpResult_) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("packet", thrift.STRING, 2) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Packet)) - tmp := types.TMergeType(v) - p.MergeType = &tmp + l += bthrift.Binary.FieldEndLength() + return l +} +func (p *TMasterOpResult_) field3Length() int { + l := 0 + if p.IsSetResultSet() { + l += bthrift.Binary.FieldBeginLength("resultSet", thrift.STRUCT, 3) + l += p.ResultSet.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TStreamLoadPutRequest) FastReadField28(buf []byte) (int, error) { - offset := 0 +func (p *TMasterOpResult_) field4Length() int { + l := 0 + if p.IsSetQueryId() { + l += bthrift.Binary.FieldBeginLength("queryId", thrift.STRUCT, 4) + l += p.QueryId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DeleteCondition = &v +func (p *TMasterOpResult_) field5Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Status) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TStreamLoadPutRequest) FastReadField29(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.SequenceCol = &v +func (p *TMasterOpResult_) field6Length() int { + l := 0 + if p.IsSetStatusCode() { + l += bthrift.Binary.FieldBeginLength("statusCode", thrift.I32, 6) + l += bthrift.Binary.I32Length(*p.StatusCode) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TStreamLoadPutRequest) FastReadField30(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.NumAsString = &v +func (p *TMasterOpResult_) field7Length() int { + l := 0 + if p.IsSetErrMessage() { + l += bthrift.Binary.FieldBeginLength("errMessage", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.ErrMessage) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TStreamLoadPutRequest) FastReadField31(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FuzzyParse = &v +func (p *TMasterOpResult_) field8Length() int { + l := 0 + if p.IsSetQueryResultBufList() { + l += bthrift.Binary.FieldBeginLength("queryResultBufList", thrift.LIST, 8) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.QueryResultBufList)) + for _, v := range p.QueryResultBufList { + l += bthrift.Binary.BinaryLengthNocopy([]byte(v)) + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TStreamLoadPutRequest) FastReadField32(buf []byte) (int, error) { - offset := 0 +func (p *TMasterOpResult_) field9Length() int { + l := 0 + if p.IsSetTxnLoadInfo() { + l += bthrift.Binary.FieldBeginLength("txnLoadInfo", thrift.STRUCT, 9) + l += p.TxnLoadInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LineDelimiter = &v +func (p *TMasterOpResult_) field10Length() int { + l := 0 + if p.IsSetGroupCommitLoadBeId() { + l += bthrift.Binary.FieldBeginLength("groupCommitLoadBeId", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.GroupCommitLoadBeId) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TStreamLoadPutRequest) FastReadField33(buf []byte) (int, error) { - offset := 0 +func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetProtocolVersion bool = false + var issetTaskId bool = false + var issetTaskStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - p.ReadJsonByLine = &v + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetProtocolVersion = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset, nil -} -func (p *TStreamLoadPutRequest) FastReadField34(buf []byte) (int, error) { - offset := 0 + if !issetProtocolVersion { + fieldId = 1 + goto RequiredFieldNotSetError + } - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v + if !issetTaskId { + fieldId = 2 + goto RequiredFieldNotSetError + } + if !issetTaskStatus { + fieldId = 3 + goto RequiredFieldNotSetError } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateExportTaskStatusRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TUpdateExportTaskStatusRequest[fieldId])) } -func (p *TStreamLoadPutRequest) FastReadField35(buf []byte) (int, error) { +func (p *TUpdateExportTaskStatusRequest) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SendBatchParallelism = &v + + p.ProtocolVersion = FrontendServiceVersion(v) } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField36(buf []byte) (int, error) { +func (p *TUpdateExportTaskStatusRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadDouble(buf[offset:]); err != nil { + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.MaxFilterRatio = &v - } + p.TaskId = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField37(buf []byte) (int, error) { +func (p *TUpdateExportTaskStatusRequest) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + tmp := palointernalservice.NewTExportStatusResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadToSingleTablet = &v - } + p.TaskStatus = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField38(buf []byte) (int, error) { - offset := 0 +// for compatibility +func (p *TUpdateExportTaskStatusRequest) FastWrite(buf []byte) int { + return 0 +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.HeaderType = &v +func (p *TUpdateExportTaskStatusRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUpdateExportTaskStatusRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} +func (p *TUpdateExportTaskStatusRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TUpdateExportTaskStatusRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TStreamLoadPutRequest) FastReadField39(buf []byte) (int, error) { +func (p *TUpdateExportTaskStatusRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocolVersion", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.HiddenColumns = &v - - } - return offset, nil + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) FastReadField40(buf []byte) (int, error) { +func (p *TUpdateExportTaskStatusRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "taskId", thrift.STRUCT, 2) + offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TUpdateExportTaskStatusRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "taskStatus", thrift.STRUCT, 3) + offset += p.TaskStatus.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TUpdateExportTaskStatusRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("protocolVersion", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TUpdateExportTaskStatusRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("taskId", thrift.STRUCT, 2) + l += p.TaskId.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TUpdateExportTaskStatusRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("taskStatus", thrift.STRUCT, 3) + l += p.TaskStatus.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnBeginRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetLabel bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUser = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPasswd = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDb = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTbl = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetLabel = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } - tmp := plannodes.TFileCompressType(v) - p.CompressType = &tmp + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + if !issetLabel { + fieldId = 7 + goto RequiredFieldNotSetError } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginRequest[fieldId])) } -func (p *TStreamLoadPutRequest) FastReadField41(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FileSize = &v + p.Cluster = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField42(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TrimDoubleQuotes = &v + + p.User = v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField43(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SkipLines = &v + + p.Passwd = v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField44(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.EnableProfile = &v + + p.Db = v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField45(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.PartialUpdate = &v + + p.Tbl = v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField46(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField6(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err + } else { + offset += l + p.UserIp = &v + } - p.TableNames = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return offset, nil +} - _elem = v +func (p *TLoadTxnBeginRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 - } + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Label = v - p.TableNames = append(p.TableNames, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TLoadTxnBeginRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Timestamp = &v + } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField47(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField9(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadSql = &v + p.AuthCode = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField48(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField10(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BackendId = &v + p.Timeout = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField49(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField11(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Version = &v - } + p.RequestId = tmp return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField50(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField12(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Label = &v + p.Token = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField51(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField13(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadByte(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Enclose = &v + p.AuthCodeUuid = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField52(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField14(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadByte(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Escape = &v + p.TableId = &v } return offset, nil } -func (p *TStreamLoadPutRequest) FastReadField53(buf []byte) (int, error) { +func (p *TLoadTxnBeginRequest) FastReadField15(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.MemtableOnSinkNode = &v + p.BackendId = &v } return offset, nil } // for compatibility -func (p *TStreamLoadPutRequest) FastWrite(buf []byte) int { +func (p *TLoadTxnBeginRequest) FastWrite(buf []byte) int { return 0 } -func (p *TStreamLoadPutRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadPutRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnBeginRequest") if p != nil { offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) - offset += p.fastWriteField17(buf[offset:], binaryWriter) - offset += p.fastWriteField18(buf[offset:], binaryWriter) - offset += p.fastWriteField19(buf[offset:], binaryWriter) - offset += p.fastWriteField21(buf[offset:], binaryWriter) - offset += p.fastWriteField22(buf[offset:], binaryWriter) - offset += p.fastWriteField23(buf[offset:], binaryWriter) - offset += p.fastWriteField25(buf[offset:], binaryWriter) - offset += p.fastWriteField30(buf[offset:], binaryWriter) - offset += p.fastWriteField31(buf[offset:], binaryWriter) - offset += p.fastWriteField33(buf[offset:], binaryWriter) - offset += p.fastWriteField35(buf[offset:], binaryWriter) - offset += p.fastWriteField36(buf[offset:], binaryWriter) - offset += p.fastWriteField37(buf[offset:], binaryWriter) - offset += p.fastWriteField41(buf[offset:], binaryWriter) - offset += p.fastWriteField42(buf[offset:], binaryWriter) - offset += p.fastWriteField43(buf[offset:], binaryWriter) - offset += p.fastWriteField44(buf[offset:], binaryWriter) - offset += p.fastWriteField45(buf[offset:], binaryWriter) - offset += p.fastWriteField48(buf[offset:], binaryWriter) - offset += p.fastWriteField49(buf[offset:], binaryWriter) - offset += p.fastWriteField51(buf[offset:], binaryWriter) - offset += p.fastWriteField52(buf[offset:], binaryWriter) - offset += p.fastWriteField53(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -17901,36 +18859,18 @@ func (p *TStreamLoadPutRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) - offset += p.fastWriteField20(buf[offset:], binaryWriter) - offset += p.fastWriteField24(buf[offset:], binaryWriter) - offset += p.fastWriteField26(buf[offset:], binaryWriter) - offset += p.fastWriteField27(buf[offset:], binaryWriter) - offset += p.fastWriteField28(buf[offset:], binaryWriter) - offset += p.fastWriteField29(buf[offset:], binaryWriter) - offset += p.fastWriteField32(buf[offset:], binaryWriter) - offset += p.fastWriteField34(buf[offset:], binaryWriter) - offset += p.fastWriteField38(buf[offset:], binaryWriter) - offset += p.fastWriteField39(buf[offset:], binaryWriter) - offset += p.fastWriteField40(buf[offset:], binaryWriter) - offset += p.fastWriteField46(buf[offset:], binaryWriter) - offset += p.fastWriteField47(buf[offset:], binaryWriter) - offset += p.fastWriteField50(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TStreamLoadPutRequest) BLength() int { +func (p *TLoadTxnBeginRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TStreamLoadPutRequest") + l += bthrift.Binary.StructBeginLength("TLoadTxnBeginRequest") if p != nil { l += p.field1Length() l += p.field2Length() @@ -17947,51 +18887,13 @@ func (p *TStreamLoadPutRequest) BLength() int { l += p.field13Length() l += p.field14Length() l += p.field15Length() - l += p.field16Length() - l += p.field17Length() - l += p.field18Length() - l += p.field19Length() - l += p.field20Length() - l += p.field21Length() - l += p.field22Length() - l += p.field23Length() - l += p.field24Length() - l += p.field25Length() - l += p.field26Length() - l += p.field27Length() - l += p.field28Length() - l += p.field29Length() - l += p.field30Length() - l += p.field31Length() - l += p.field32Length() - l += p.field33Length() - l += p.field34Length() - l += p.field35Length() - l += p.field36Length() - l += p.field37Length() - l += p.field38Length() - l += p.field39Length() - l += p.field40Length() - l += p.field41Length() - l += p.field42Length() - l += p.field43Length() - l += p.field44Length() - l += p.field45Length() - l += p.field46Length() - l += p.field47Length() - l += p.field48Length() - l += p.field49Length() - l += p.field50Length() - l += p.field51Length() - l += p.field52Length() - l += p.field53Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TStreamLoadPutRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetCluster() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) @@ -18002,7 +18904,7 @@ func (p *TStreamLoadPutRequest) fastWriteField1(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) @@ -18011,7 +18913,7 @@ func (p *TStreamLoadPutRequest) fastWriteField2(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) @@ -18020,7 +18922,7 @@ func (p *TStreamLoadPutRequest) fastWriteField3(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) @@ -18029,7 +18931,7 @@ func (p *TStreamLoadPutRequest) fastWriteField4(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) @@ -18038,7 +18940,7 @@ func (p *TStreamLoadPutRequest) fastWriteField5(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetUserIp() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) @@ -18049,353 +18951,4011 @@ func (p *TStreamLoadPutRequest) fastWriteField6(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadId", thrift.STRUCT, 7) - offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TStreamLoadPutRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TStreamLoadPutRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fileType", thrift.I32, 9) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.FileType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TStreamLoadPutRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "formatType", thrift.I32, 10) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.FormatType)) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Label) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TStreamLoadPutRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPath() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "path", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Path) + if p.IsSetTimestamp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timestamp", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timestamp) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumns() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.STRING, 12) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Columns) + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetWhere() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "where", thrift.STRING, 13) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Where) + if p.IsSetTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeout", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timeout) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumnSeparator() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columnSeparator", thrift.STRING, 14) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColumnSeparator) - + if p.IsSetRequestId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request_id", thrift.STRUCT, 11) + offset += p.RequestId.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPartitions() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.STRING, 15) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Partitions) + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 12) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 16) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + if p.IsSetAuthCodeUuid() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code_uuid", thrift.STRING, 13) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AuthCodeUuid) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNegative() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "negative", thrift.BOOL, 17) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.Negative) + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeout", thrift.I32, 18) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.Timeout) + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 15) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStrictMode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strictMode", thrift.BOOL, 19) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.StrictMode) +func (p *TLoadTxnBeginRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTimezone() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timezone", thrift.STRING, 20) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Timezone) +func (p *TLoadTxnBeginRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.User) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TStreamLoadPutRequest) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetExecMemLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "execMemLimit", thrift.I64, 21) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ExecMemLimit) +func (p *TLoadTxnBeginRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Passwd) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TStreamLoadPutRequest) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetIsTempPartition() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isTempPartition", thrift.BOOL, 22) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsTempPartition) +func (p *TLoadTxnBeginRequest) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(p.Db) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TStreamLoadPutRequest) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStripOuterArray() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strip_outer_array", thrift.BOOL, 23) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.StripOuterArray) +func (p *TLoadTxnBeginRequest) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(p.Tbl) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TStreamLoadPutRequest) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetJsonpaths() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jsonpaths", thrift.STRING, 24) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Jsonpaths) +func (p *TLoadTxnBeginRequest) field6Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetThriftRpcTimeoutMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 25) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) +func (p *TLoadTxnBeginRequest) field7Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(p.Label) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TStreamLoadPutRequest) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetJsonRoot() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "json_root", thrift.STRING, 26) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.JsonRoot) +func (p *TLoadTxnBeginRequest) field8Length() int { + l := 0 + if p.IsSetTimestamp() { + l += bthrift.Binary.FieldBeginLength("timestamp", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.Timestamp) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetMergeType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "merge_type", thrift.I32, 27) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MergeType)) +func (p *TLoadTxnBeginRequest) field9Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.AuthCode) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDeleteCondition() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delete_condition", thrift.STRING, 28) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DeleteCondition) +func (p *TLoadTxnBeginRequest) field10Length() int { + l := 0 + if p.IsSetTimeout() { + l += bthrift.Binary.FieldBeginLength("timeout", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.Timeout) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSequenceCol() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sequence_col", thrift.STRING, 29) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SequenceCol) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TLoadTxnBeginRequest) field11Length() int { + l := 0 + if p.IsSetRequestId() { + l += bthrift.Binary.FieldBeginLength("request_id", thrift.STRUCT, 11) + l += p.RequestId.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetNumAsString() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_as_string", thrift.BOOL, 30) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.NumAsString) +func (p *TLoadTxnBeginRequest) field12Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 12) + l += bthrift.Binary.StringLengthNocopy(*p.Token) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFuzzyParse() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fuzzy_parse", thrift.BOOL, 31) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.FuzzyParse) +func (p *TLoadTxnBeginRequest) field13Length() int { + l := 0 + if p.IsSetAuthCodeUuid() { + l += bthrift.Binary.FieldBeginLength("auth_code_uuid", thrift.STRING, 13) + l += bthrift.Binary.StringLengthNocopy(*p.AuthCodeUuid) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField32(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLineDelimiter() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "line_delimiter", thrift.STRING, 32) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LineDelimiter) +func (p *TLoadTxnBeginRequest) field14Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.TableId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField33(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetReadJsonByLine() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "read_json_by_line", thrift.BOOL, 33) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.ReadJsonByLine) +func (p *TLoadTxnBeginRequest) field15Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 15) + l += bthrift.Binary.I64Length(*p.BackendId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TStreamLoadPutRequest) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 34) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TLoadTxnBeginResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset -} -func (p *TStreamLoadPutRequest) fastWriteField35(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSendBatchParallelism() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "send_batch_parallelism", thrift.I32, 35) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.SendBatchParallelism) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return offset + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnBeginResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnBeginResult_[fieldId])) } -func (p *TStreamLoadPutRequest) fastWriteField36(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetMaxFilterRatio() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_filter_ratio", thrift.DOUBLE, 36) - offset += bthrift.Binary.WriteDouble(buf[offset:], *p.MaxFilterRatio) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Status = tmp + return offset, nil } -func (p *TStreamLoadPutRequest) fastWriteField37(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if p.IsSetLoadToSingleTablet() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_to_single_tablet", thrift.BOOL, 37) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.LoadToSingleTablet) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + } - return offset + return offset, nil } -func (p *TStreamLoadPutRequest) fastWriteField38(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - if p.IsSetHeaderType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "header_type", thrift.STRING, 38) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.HeaderType) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.JobStatus = &v + } - return offset + return offset, nil } -func (p *TStreamLoadPutRequest) fastWriteField39(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TLoadTxnBeginResult_) FastReadField4(buf []byte) (int, error) { offset := 0 - if p.IsSetHiddenColumns() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hidden_columns", thrift.STRING, 39) + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TLoadTxnBeginResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxnBeginResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnBeginResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnBeginResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxnBeginResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxnBeginResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnBeginResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnBeginResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_status", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.JobStatus) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnBeginResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnBeginResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnBeginResult_) field2Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnBeginResult_) field3Length() int { + l := 0 + if p.IsSetJobStatus() { + l += bthrift.Binary.FieldBeginLength("job_status", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.JobStatus) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnBeginResult_) field4Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBeginTxnRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.User = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TableIds = append(p.TableIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Label = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Timeout = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.RequestId = tmp + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + +func (p *TBeginTxnRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SubTxnNum = v + + } + return offset, nil +} + +// for compatibility +func (p *TBeginTxnRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TBeginTxnRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBeginTxnRequest") + if p != nil { + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TBeginTxnRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TBeginTxnRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TBeginTxnRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_ids", thrift.LIST, 5) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TableIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeout", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timeout) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRequestId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request_id", thrift.STRUCT, 10) + offset += p.RequestId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSubTxnNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_num", thrift.I64, 13) + offset += bthrift.Binary.WriteI64(buf[offset:], p.SubTxnNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field2Length() int { + l := 0 + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field3Length() int { + l := 0 + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field4Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field5Length() int { + l := 0 + if p.IsSetTableIds() { + l += bthrift.Binary.FieldBeginLength("table_ids", thrift.LIST, 5) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TableIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TableIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field6Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field7Length() int { + l := 0 + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field8Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field9Length() int { + l := 0 + if p.IsSetTimeout() { + l += bthrift.Binary.FieldBeginLength("timeout", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.Timeout) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field10Length() int { + l := 0 + if p.IsSetRequestId() { + l += bthrift.Binary.FieldBeginLength("request_id", thrift.STRUCT, 10) + l += p.RequestId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field11Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field12Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.BackendId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnRequest) field13Length() int { + l := 0 + if p.IsSetSubTxnNum() { + l += bthrift.Binary.FieldBeginLength("sub_txn_num", thrift.I64, 13) + l += bthrift.Binary.I64Length(p.SubTxnNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBeginTxnResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBeginTxnResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TBeginTxnResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TBeginTxnResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.JobStatus = &v + + } + return offset, nil +} + +func (p *TBeginTxnResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TBeginTxnResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterAddress = tmp + return offset, nil +} + +func (p *TBeginTxnResult_) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SubTxnIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.SubTxnIds = append(p.SubTxnIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TBeginTxnResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TBeginTxnResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBeginTxnResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TBeginTxnResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TBeginTxnResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TBeginTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_status", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.JobStatus) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 5) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSubTxnIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_ids", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.SubTxnIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBeginTxnResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnResult_) field2Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnResult_) field3Length() int { + l := 0 + if p.IsSetJobStatus() { + l += bthrift.Binary.FieldBeginLength("job_status", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.JobStatus) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnResult_) field4Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnResult_) field5Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 5) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBeginTxnResult_) field6Length() int { + l := 0 + if p.IsSetSubTxnIds() { + l += bthrift.Binary.FieldBeginLength("sub_txn_ids", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.SubTxnIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.SubTxnIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetLoadId bool = false + var issetTxnId bool = false + var issetFileType bool = false + var issetFormatType bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUser = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPasswd = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDb = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTbl = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetLoadId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTxnId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFileType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFormatType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 17: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 21: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 22: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField22(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 23: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 24: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField24(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 25: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField25(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 26: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField26(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 27: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField27(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 28: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField28(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 29: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField29(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 30: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField30(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 31: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField31(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 32: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField32(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 33: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField33(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 34: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField34(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 35: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField35(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 36: + if fieldTypeId == thrift.DOUBLE { + l, err = p.FastReadField36(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 37: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField37(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 38: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField38(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 39: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField39(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 40: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField40(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 41: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField41(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 42: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField42(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 43: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField43(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 44: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField44(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 45: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField45(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 46: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField46(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 47: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField47(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 48: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField48(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 49: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField49(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 50: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField50(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 51: + if fieldTypeId == thrift.BYTE { + l, err = p.FastReadField51(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 52: + if fieldTypeId == thrift.BYTE { + l, err = p.FastReadField52(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 53: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField53(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 54: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField54(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 55: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField55(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 56: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField56(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 57: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField57(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1001: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1001(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetLoadId { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 8 + goto RequiredFieldNotSetError + } + + if !issetFileType { + fieldId = 9 + goto RequiredFieldNotSetError + } + + if !issetFormatType { + fieldId = 10 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutRequest[fieldId])) +} + +func (p *TStreamLoadPutRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.User = v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Passwd = v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Db = v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Tbl = v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadId = tmp + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TxnId = v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FileType = types.TFileType(v) + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FormatType = plannodes.TFileFormatType(v) + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Path = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Columns = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Where = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ColumnSeparator = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Partitions = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Negative = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Timeout = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StrictMode = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField20(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Timezone = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField21(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ExecMemLimit = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField22(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsTempPartition = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField23(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StripOuterArray = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField24(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Jsonpaths = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField25(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ThriftRpcTimeoutMs = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField26(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.JsonRoot = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField27(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TMergeType(v) + p.MergeType = &tmp + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField28(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DeleteCondition = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField29(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SequenceCol = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField30(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumAsString = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField31(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FuzzyParse = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField32(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LineDelimiter = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField33(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReadJsonByLine = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField34(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField35(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SendBatchParallelism = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField36(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadDouble(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxFilterRatio = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField37(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadToSingleTablet = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField38(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.HeaderType = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField39(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.HiddenColumns = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField40(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := plannodes.TFileCompressType(v) + p.CompressType = &tmp + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField41(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FileSize = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField42(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TrimDoubleQuotes = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField43(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SkipLines = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField44(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableProfile = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField45(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PartialUpdate = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField46(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableNames = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TableNames = append(p.TableNames, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField47(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadSql = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField48(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField49(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Version = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField50(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Label = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField51(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadByte(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Enclose = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField52(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadByte(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Escape = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField53(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MemtableOnSinkNode = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField54(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommit = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField55(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StreamPerNode = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField56(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommitMode = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField57(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TUniqueKeyUpdateMode(v) + p.UniqueKeyUpdateMode = &tmp + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CloudCluster = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutRequest) FastReadField1001(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TStreamLoadPutRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TStreamLoadPutRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadPutRequest") + if p != nil { + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) + offset += p.fastWriteField25(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) + offset += p.fastWriteField31(buf[offset:], binaryWriter) + offset += p.fastWriteField33(buf[offset:], binaryWriter) + offset += p.fastWriteField35(buf[offset:], binaryWriter) + offset += p.fastWriteField36(buf[offset:], binaryWriter) + offset += p.fastWriteField37(buf[offset:], binaryWriter) + offset += p.fastWriteField41(buf[offset:], binaryWriter) + offset += p.fastWriteField42(buf[offset:], binaryWriter) + offset += p.fastWriteField43(buf[offset:], binaryWriter) + offset += p.fastWriteField44(buf[offset:], binaryWriter) + offset += p.fastWriteField45(buf[offset:], binaryWriter) + offset += p.fastWriteField48(buf[offset:], binaryWriter) + offset += p.fastWriteField49(buf[offset:], binaryWriter) + offset += p.fastWriteField51(buf[offset:], binaryWriter) + offset += p.fastWriteField52(buf[offset:], binaryWriter) + offset += p.fastWriteField53(buf[offset:], binaryWriter) + offset += p.fastWriteField54(buf[offset:], binaryWriter) + offset += p.fastWriteField55(buf[offset:], binaryWriter) + offset += p.fastWriteField1001(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField24(buf[offset:], binaryWriter) + offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField32(buf[offset:], binaryWriter) + offset += p.fastWriteField34(buf[offset:], binaryWriter) + offset += p.fastWriteField38(buf[offset:], binaryWriter) + offset += p.fastWriteField39(buf[offset:], binaryWriter) + offset += p.fastWriteField40(buf[offset:], binaryWriter) + offset += p.fastWriteField46(buf[offset:], binaryWriter) + offset += p.fastWriteField47(buf[offset:], binaryWriter) + offset += p.fastWriteField50(buf[offset:], binaryWriter) + offset += p.fastWriteField56(buf[offset:], binaryWriter) + offset += p.fastWriteField57(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TStreamLoadPutRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() + l += p.field24Length() + l += p.field25Length() + l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field30Length() + l += p.field31Length() + l += p.field32Length() + l += p.field33Length() + l += p.field34Length() + l += p.field35Length() + l += p.field36Length() + l += p.field37Length() + l += p.field38Length() + l += p.field39Length() + l += p.field40Length() + l += p.field41Length() + l += p.field42Length() + l += p.field43Length() + l += p.field44Length() + l += p.field45Length() + l += p.field46Length() + l += p.field47Length() + l += p.field48Length() + l += p.field49Length() + l += p.field50Length() + l += p.field51Length() + l += p.field52Length() + l += p.field53Length() + l += p.field54Length() + l += p.field55Length() + l += p.field56Length() + l += p.field57Length() + l += p.field1000Length() + l += p.field1001Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TStreamLoadPutRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadId", thrift.STRUCT, 7) + offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fileType", thrift.I32, 9) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.FileType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "formatType", thrift.I32, 10) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.FormatType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "path", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Path) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.STRING, 12) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Columns) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWhere() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "where", thrift.STRING, 13) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Where) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnSeparator() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columnSeparator", thrift.STRING, 14) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColumnSeparator) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.STRING, 15) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Partitions) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 16) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNegative() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "negative", thrift.BOOL, 17) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Negative) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timeout", thrift.I32, 18) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.Timeout) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStrictMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strictMode", thrift.BOOL, 19) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.StrictMode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTimezone() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timezone", thrift.STRING, 20) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Timezone) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetExecMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "execMemLimit", thrift.I64, 21) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ExecMemLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsTempPartition() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isTempPartition", thrift.BOOL, 22) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsTempPartition) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStripOuterArray() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strip_outer_array", thrift.BOOL, 23) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.StripOuterArray) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJsonpaths() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jsonpaths", thrift.STRING, 24) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Jsonpaths) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetThriftRpcTimeoutMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 25) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJsonRoot() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "json_root", thrift.STRING, 26) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.JsonRoot) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMergeType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "merge_type", thrift.I32, 27) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MergeType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDeleteCondition() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delete_condition", thrift.STRING, 28) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DeleteCondition) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSequenceCol() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sequence_col", thrift.STRING, 29) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SequenceCol) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumAsString() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_as_string", thrift.BOOL, 30) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.NumAsString) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFuzzyParse() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fuzzy_parse", thrift.BOOL, 31) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.FuzzyParse) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField32(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLineDelimiter() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "line_delimiter", thrift.STRING, 32) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LineDelimiter) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField33(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReadJsonByLine() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "read_json_by_line", thrift.BOOL, 33) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ReadJsonByLine) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 34) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField35(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSendBatchParallelism() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "send_batch_parallelism", thrift.I32, 35) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.SendBatchParallelism) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField36(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxFilterRatio() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_filter_ratio", thrift.DOUBLE, 36) + offset += bthrift.Binary.WriteDouble(buf[offset:], *p.MaxFilterRatio) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField37(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadToSingleTablet() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_to_single_tablet", thrift.BOOL, 37) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.LoadToSingleTablet) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField38(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHeaderType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "header_type", thrift.STRING, 38) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.HeaderType) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField39(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHiddenColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hidden_columns", thrift.STRING, 39) offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.HiddenColumns) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) @@ -18403,745 +22963,18288 @@ func (p *TStreamLoadPutRequest) fastWriteField39(buf []byte, binaryWriter bthrif return offset } -func (p *TStreamLoadPutRequest) fastWriteField40(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TStreamLoadPutRequest) fastWriteField40(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compress_type", thrift.I32, 40) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField41(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_size", thrift.I64, 41) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FileSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField42(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrimDoubleQuotes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trim_double_quotes", thrift.BOOL, 42) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.TrimDoubleQuotes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField43(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSkipLines() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_lines", thrift.I32, 43) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.SkipLines) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField44(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_profile", thrift.BOOL, 44) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableProfile) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField45(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartialUpdate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partial_update", thrift.BOOL, 45) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.PartialUpdate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField46(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_names", thrift.LIST, 46) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.TableNames { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField47(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadSql() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_sql", thrift.STRING, 47) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LoadSql) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField48(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 48) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField49(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I32, 49) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.Version) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField50(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 50) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField51(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnclose() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enclose", thrift.BYTE, 51) + offset += bthrift.Binary.WriteByte(buf[offset:], *p.Enclose) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField52(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEscape() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "escape", thrift.BYTE, 52) + offset += bthrift.Binary.WriteByte(buf[offset:], *p.Escape) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField53(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMemtableOnSinkNode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memtable_on_sink_node", thrift.BOOL, 53) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.MemtableOnSinkNode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField54(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit", thrift.BOOL, 54) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.GroupCommit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField55(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStreamPerNode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "stream_per_node", thrift.I32, 55) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.StreamPerNode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField56(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit_mode", thrift.STRING, 56) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.GroupCommitMode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField57(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUniqueKeyUpdateMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "unique_key_update_mode", thrift.I32, 57) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.UniqueKeyUpdateMode)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCloudCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_cluster", thrift.STRING, 1000) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudCluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 1001) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.User) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Passwd) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(p.Db) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(p.Tbl) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field6Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field7Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("loadId", thrift.STRUCT, 7) + l += p.LoadId.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field8Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 8) + l += bthrift.Binary.I64Length(p.TxnId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field9Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("fileType", thrift.I32, 9) + l += bthrift.Binary.I32Length(int32(p.FileType)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field10Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("formatType", thrift.I32, 10) + l += bthrift.Binary.I32Length(int32(p.FormatType)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutRequest) field11Length() int { + l := 0 + if p.IsSetPath() { + l += bthrift.Binary.FieldBeginLength("path", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Path) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field12Length() int { + l := 0 + if p.IsSetColumns() { + l += bthrift.Binary.FieldBeginLength("columns", thrift.STRING, 12) + l += bthrift.Binary.StringLengthNocopy(*p.Columns) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field13Length() int { + l := 0 + if p.IsSetWhere() { + l += bthrift.Binary.FieldBeginLength("where", thrift.STRING, 13) + l += bthrift.Binary.StringLengthNocopy(*p.Where) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field14Length() int { + l := 0 + if p.IsSetColumnSeparator() { + l += bthrift.Binary.FieldBeginLength("columnSeparator", thrift.STRING, 14) + l += bthrift.Binary.StringLengthNocopy(*p.ColumnSeparator) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field15Length() int { + l := 0 + if p.IsSetPartitions() { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.STRING, 15) + l += bthrift.Binary.StringLengthNocopy(*p.Partitions) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field16Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 16) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field17Length() int { + l := 0 + if p.IsSetNegative() { + l += bthrift.Binary.FieldBeginLength("negative", thrift.BOOL, 17) + l += bthrift.Binary.BoolLength(*p.Negative) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field18Length() int { + l := 0 + if p.IsSetTimeout() { + l += bthrift.Binary.FieldBeginLength("timeout", thrift.I32, 18) + l += bthrift.Binary.I32Length(*p.Timeout) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field19Length() int { + l := 0 + if p.IsSetStrictMode() { + l += bthrift.Binary.FieldBeginLength("strictMode", thrift.BOOL, 19) + l += bthrift.Binary.BoolLength(*p.StrictMode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field20Length() int { + l := 0 + if p.IsSetTimezone() { + l += bthrift.Binary.FieldBeginLength("timezone", thrift.STRING, 20) + l += bthrift.Binary.StringLengthNocopy(*p.Timezone) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field21Length() int { + l := 0 + if p.IsSetExecMemLimit() { + l += bthrift.Binary.FieldBeginLength("execMemLimit", thrift.I64, 21) + l += bthrift.Binary.I64Length(*p.ExecMemLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field22Length() int { + l := 0 + if p.IsSetIsTempPartition() { + l += bthrift.Binary.FieldBeginLength("isTempPartition", thrift.BOOL, 22) + l += bthrift.Binary.BoolLength(*p.IsTempPartition) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field23Length() int { + l := 0 + if p.IsSetStripOuterArray() { + l += bthrift.Binary.FieldBeginLength("strip_outer_array", thrift.BOOL, 23) + l += bthrift.Binary.BoolLength(*p.StripOuterArray) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field24Length() int { + l := 0 + if p.IsSetJsonpaths() { + l += bthrift.Binary.FieldBeginLength("jsonpaths", thrift.STRING, 24) + l += bthrift.Binary.StringLengthNocopy(*p.Jsonpaths) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field25Length() int { + l := 0 + if p.IsSetThriftRpcTimeoutMs() { + l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 25) + l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field26Length() int { + l := 0 + if p.IsSetJsonRoot() { + l += bthrift.Binary.FieldBeginLength("json_root", thrift.STRING, 26) + l += bthrift.Binary.StringLengthNocopy(*p.JsonRoot) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field27Length() int { + l := 0 + if p.IsSetMergeType() { + l += bthrift.Binary.FieldBeginLength("merge_type", thrift.I32, 27) + l += bthrift.Binary.I32Length(int32(*p.MergeType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field28Length() int { + l := 0 + if p.IsSetDeleteCondition() { + l += bthrift.Binary.FieldBeginLength("delete_condition", thrift.STRING, 28) + l += bthrift.Binary.StringLengthNocopy(*p.DeleteCondition) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field29Length() int { + l := 0 + if p.IsSetSequenceCol() { + l += bthrift.Binary.FieldBeginLength("sequence_col", thrift.STRING, 29) + l += bthrift.Binary.StringLengthNocopy(*p.SequenceCol) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field30Length() int { + l := 0 + if p.IsSetNumAsString() { + l += bthrift.Binary.FieldBeginLength("num_as_string", thrift.BOOL, 30) + l += bthrift.Binary.BoolLength(*p.NumAsString) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field31Length() int { + l := 0 + if p.IsSetFuzzyParse() { + l += bthrift.Binary.FieldBeginLength("fuzzy_parse", thrift.BOOL, 31) + l += bthrift.Binary.BoolLength(*p.FuzzyParse) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field32Length() int { + l := 0 + if p.IsSetLineDelimiter() { + l += bthrift.Binary.FieldBeginLength("line_delimiter", thrift.STRING, 32) + l += bthrift.Binary.StringLengthNocopy(*p.LineDelimiter) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field33Length() int { + l := 0 + if p.IsSetReadJsonByLine() { + l += bthrift.Binary.FieldBeginLength("read_json_by_line", thrift.BOOL, 33) + l += bthrift.Binary.BoolLength(*p.ReadJsonByLine) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field34Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 34) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field35Length() int { + l := 0 + if p.IsSetSendBatchParallelism() { + l += bthrift.Binary.FieldBeginLength("send_batch_parallelism", thrift.I32, 35) + l += bthrift.Binary.I32Length(*p.SendBatchParallelism) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field36Length() int { + l := 0 + if p.IsSetMaxFilterRatio() { + l += bthrift.Binary.FieldBeginLength("max_filter_ratio", thrift.DOUBLE, 36) + l += bthrift.Binary.DoubleLength(*p.MaxFilterRatio) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field37Length() int { + l := 0 + if p.IsSetLoadToSingleTablet() { + l += bthrift.Binary.FieldBeginLength("load_to_single_tablet", thrift.BOOL, 37) + l += bthrift.Binary.BoolLength(*p.LoadToSingleTablet) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field38Length() int { + l := 0 + if p.IsSetHeaderType() { + l += bthrift.Binary.FieldBeginLength("header_type", thrift.STRING, 38) + l += bthrift.Binary.StringLengthNocopy(*p.HeaderType) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field39Length() int { + l := 0 + if p.IsSetHiddenColumns() { + l += bthrift.Binary.FieldBeginLength("hidden_columns", thrift.STRING, 39) + l += bthrift.Binary.StringLengthNocopy(*p.HiddenColumns) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field40Length() int { + l := 0 + if p.IsSetCompressType() { + l += bthrift.Binary.FieldBeginLength("compress_type", thrift.I32, 40) + l += bthrift.Binary.I32Length(int32(*p.CompressType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field41Length() int { + l := 0 + if p.IsSetFileSize() { + l += bthrift.Binary.FieldBeginLength("file_size", thrift.I64, 41) + l += bthrift.Binary.I64Length(*p.FileSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field42Length() int { + l := 0 + if p.IsSetTrimDoubleQuotes() { + l += bthrift.Binary.FieldBeginLength("trim_double_quotes", thrift.BOOL, 42) + l += bthrift.Binary.BoolLength(*p.TrimDoubleQuotes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field43Length() int { + l := 0 + if p.IsSetSkipLines() { + l += bthrift.Binary.FieldBeginLength("skip_lines", thrift.I32, 43) + l += bthrift.Binary.I32Length(*p.SkipLines) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field44Length() int { + l := 0 + if p.IsSetEnableProfile() { + l += bthrift.Binary.FieldBeginLength("enable_profile", thrift.BOOL, 44) + l += bthrift.Binary.BoolLength(*p.EnableProfile) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field45Length() int { + l := 0 + if p.IsSetPartialUpdate() { + l += bthrift.Binary.FieldBeginLength("partial_update", thrift.BOOL, 45) + l += bthrift.Binary.BoolLength(*p.PartialUpdate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field46Length() int { + l := 0 + if p.IsSetTableNames() { + l += bthrift.Binary.FieldBeginLength("table_names", thrift.LIST, 46) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.TableNames)) + for _, v := range p.TableNames { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field47Length() int { + l := 0 + if p.IsSetLoadSql() { + l += bthrift.Binary.FieldBeginLength("load_sql", thrift.STRING, 47) + l += bthrift.Binary.StringLengthNocopy(*p.LoadSql) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field48Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 48) + l += bthrift.Binary.I64Length(*p.BackendId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field49Length() int { + l := 0 + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.I32, 49) + l += bthrift.Binary.I32Length(*p.Version) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field50Length() int { + l := 0 + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 50) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field51Length() int { + l := 0 + if p.IsSetEnclose() { + l += bthrift.Binary.FieldBeginLength("enclose", thrift.BYTE, 51) + l += bthrift.Binary.ByteLength(*p.Enclose) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field52Length() int { + l := 0 + if p.IsSetEscape() { + l += bthrift.Binary.FieldBeginLength("escape", thrift.BYTE, 52) + l += bthrift.Binary.ByteLength(*p.Escape) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field53Length() int { + l := 0 + if p.IsSetMemtableOnSinkNode() { + l += bthrift.Binary.FieldBeginLength("memtable_on_sink_node", thrift.BOOL, 53) + l += bthrift.Binary.BoolLength(*p.MemtableOnSinkNode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field54Length() int { + l := 0 + if p.IsSetGroupCommit() { + l += bthrift.Binary.FieldBeginLength("group_commit", thrift.BOOL, 54) + l += bthrift.Binary.BoolLength(*p.GroupCommit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field55Length() int { + l := 0 + if p.IsSetStreamPerNode() { + l += bthrift.Binary.FieldBeginLength("stream_per_node", thrift.I32, 55) + l += bthrift.Binary.I32Length(*p.StreamPerNode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field56Length() int { + l := 0 + if p.IsSetGroupCommitMode() { + l += bthrift.Binary.FieldBeginLength("group_commit_mode", thrift.STRING, 56) + l += bthrift.Binary.StringLengthNocopy(*p.GroupCommitMode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field57Length() int { + l := 0 + if p.IsSetUniqueKeyUpdateMode() { + l += bthrift.Binary.FieldBeginLength("unique_key_update_mode", thrift.I32, 57) + l += bthrift.Binary.I32Length(int32(*p.UniqueKeyUpdateMode)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field1000Length() int { + l := 0 + if p.IsSetCloudCluster() { + l += bthrift.Binary.FieldBeginLength("cloud_cluster", thrift.STRING, 1000) + l += bthrift.Binary.StringLengthNocopy(*p.CloudCluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutRequest) field1001Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 1001) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutResult_[fieldId])) +} + +func (p *TStreamLoadPutResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTExecPlanFragmentParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Params = tmp + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := palointernalservice.NewTPipelineFragmentParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PipelineParams = tmp + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BaseSchemaVersion = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.WaitInternalGroupCommitFinish = v + + } + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommitIntervalMs = &v + + } + return offset, nil +} + +func (p *TStreamLoadPutResult_) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommitDataBytes = &v + + } + return offset, nil +} + +// for compatibility +func (p *TStreamLoadPutResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TStreamLoadPutResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadPutResult") + if p != nil { + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TStreamLoadPutResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TStreamLoadPutResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 2) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPipelineParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pipeline_params", thrift.STRUCT, 3) + offset += p.PipelineParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBaseSchemaVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_schema_version", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BaseSchemaVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWaitInternalGroupCommitFinish() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "wait_internal_group_commit_finish", thrift.BOOL, 7) + offset += bthrift.Binary.WriteBool(buf[offset:], p.WaitInternalGroupCommitFinish) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitIntervalMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit_interval_ms", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.GroupCommitIntervalMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitDataBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit_data_bytes", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.GroupCommitDataBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadPutResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadPutResult_) field2Length() int { + l := 0 + if p.IsSetParams() { + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 2) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field3Length() int { + l := 0 + if p.IsSetPipelineParams() { + l += bthrift.Binary.FieldBeginLength("pipeline_params", thrift.STRUCT, 3) + l += p.PipelineParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field4Length() int { + l := 0 + if p.IsSetBaseSchemaVersion() { + l += bthrift.Binary.FieldBeginLength("base_schema_version", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.BaseSchemaVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field5Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field6Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field7Length() int { + l := 0 + if p.IsSetWaitInternalGroupCommitFinish() { + l += bthrift.Binary.FieldBeginLength("wait_internal_group_commit_finish", thrift.BOOL, 7) + l += bthrift.Binary.BoolLength(p.WaitInternalGroupCommitFinish) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field8Length() int { + l := 0 + if p.IsSetGroupCommitIntervalMs() { + l += bthrift.Binary.FieldBeginLength("group_commit_interval_ms", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.GroupCommitIntervalMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadPutResult_) field9Length() int { + l := 0 + if p.IsSetGroupCommitDataBytes() { + l += bthrift.Binary.FieldBeginLength("group_commit_data_bytes", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.GroupCommitDataBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId])) +} + +func (p *TStreamLoadMultiTablePutResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TStreamLoadMultiTablePutResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Params = make([]*palointernalservice.TExecPlanFragmentParams, 0, size) + for i := 0; i < size; i++ { + _elem := palointernalservice.NewTExecPlanFragmentParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Params = append(p.Params, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TStreamLoadMultiTablePutResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PipelineParams = make([]*palointernalservice.TPipelineFragmentParams, 0, size) + for i := 0; i < size; i++ { + _elem := palointernalservice.NewTPipelineFragmentParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PipelineParams = append(p.PipelineParams, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TStreamLoadMultiTablePutResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TStreamLoadMultiTablePutResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadMultiTablePutResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadMultiTablePutResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TStreamLoadMultiTablePutResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TStreamLoadMultiTablePutResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadMultiTablePutResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Params { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadMultiTablePutResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPipelineParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pipeline_params", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.PipelineParams { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadMultiTablePutResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TStreamLoadMultiTablePutResult_) field2Length() int { + l := 0 + if p.IsSetParams() { + l += bthrift.Binary.FieldBeginLength("params", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Params)) + for _, v := range p.Params { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadMultiTablePutResult_) field3Length() int { + l := 0 + if p.IsSetPipelineParams() { + l += bthrift.Binary.FieldBeginLength("pipeline_params", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PipelineParams)) + for _, v := range p.PipelineParams { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadWithLoadStatusResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TStreamLoadWithLoadStatusResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TStreamLoadWithLoadStatusResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TStreamLoadWithLoadStatusResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TotalRows = &v + + } + return offset, nil +} + +func (p *TStreamLoadWithLoadStatusResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadedRows = &v + + } + return offset, nil +} + +func (p *TStreamLoadWithLoadStatusResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FilteredRows = &v + + } + return offset, nil +} + +func (p *TStreamLoadWithLoadStatusResult_) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UnselectedRows = &v + + } + return offset, nil +} + +// for compatibility +func (p *TStreamLoadWithLoadStatusResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TStreamLoadWithLoadStatusResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadWithLoadStatusResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TStreamLoadWithLoadStatusResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTotalRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_rows", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_rows", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFilteredRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filtered_rows", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FilteredRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUnselectedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "unselected_rows", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.UnselectedRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TStreamLoadWithLoadStatusResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) field2Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) field3Length() int { + l := 0 + if p.IsSetTotalRows() { + l += bthrift.Binary.FieldBeginLength("total_rows", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.TotalRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) field4Length() int { + l := 0 + if p.IsSetLoadedRows() { + l += bthrift.Binary.FieldBeginLength("loaded_rows", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.LoadedRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) field5Length() int { + l := 0 + if p.IsSetFilteredRows() { + l += bthrift.Binary.FieldBeginLength("filtered_rows", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.FilteredRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TStreamLoadWithLoadStatusResult_) field6Length() int { + l := 0 + if p.IsSetUnselectedRows() { + l += bthrift.Binary.FieldBeginLength("unselected_rows", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.UnselectedRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TKafkaRLTaskProgress) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionCmtOffset bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPartitionCmtOffset = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetPartitionCmtOffset { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TKafkaRLTaskProgress[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TKafkaRLTaskProgress[fieldId])) +} + +func (p *TKafkaRLTaskProgress) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionCmtOffset = make(map[int32]int64, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PartitionCmtOffset[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TKafkaRLTaskProgress) FastWrite(buf []byte) int { + return 0 +} + +func (p *TKafkaRLTaskProgress) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TKafkaRLTaskProgress") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TKafkaRLTaskProgress) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TKafkaRLTaskProgress") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TKafkaRLTaskProgress) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitionCmtOffset", thrift.MAP, 1) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I64, 0) + var length int + for k, v := range p.PartitionCmtOffset { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I64, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TKafkaRLTaskProgress) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("partitionCmtOffset", thrift.MAP, 1) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I64, len(p.PartitionCmtOffset)) + var tmpK int32 + var tmpV int64 + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.PartitionCmtOffset) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetLoadSourceType bool = false + var issetId bool = false + var issetJobId bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetLoadSourceType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetJobId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetLoadSourceType { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetJobId { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRLTaskTxnCommitAttachment[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRLTaskTxnCommitAttachment[fieldId])) +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.LoadSourceType = types.TLoadSourceType(v) + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Id = tmp + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.JobId = v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadedRows = &v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FilteredRows = &v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UnselectedRows = &v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReceivedBytes = &v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadedBytes = &v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadCostMs = &v + + } + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := NewTKafkaRLTaskProgress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.KafkaRLTaskProgress = tmp + return offset, nil +} + +func (p *TRLTaskTxnCommitAttachment) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ErrorLogUrl = &v + + } + return offset, nil +} + +// for compatibility +func (p *TRLTaskTxnCommitAttachment) FastWrite(buf []byte) int { + return 0 +} + +func (p *TRLTaskTxnCommitAttachment) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRLTaskTxnCommitAttachment") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TRLTaskTxnCommitAttachment) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TRLTaskTxnCommitAttachment") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadSourceType", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.LoadSourceType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.STRUCT, 2) + offset += p.Id.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jobId", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadedRows", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFilteredRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filteredRows", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FilteredRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUnselectedRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "unselectedRows", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.UnselectedRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReceivedBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "receivedBytes", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReceivedBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadedBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadedBytes", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadCostMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadCostMs", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadCostMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetKafkaRLTaskProgress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "kafkaRLTaskProgress", thrift.STRUCT, 10) + offset += p.KafkaRLTaskProgress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetErrorLogUrl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "errorLogUrl", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ErrorLogUrl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRLTaskTxnCommitAttachment) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("loadSourceType", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.LoadSourceType)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TRLTaskTxnCommitAttachment) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("id", thrift.STRUCT, 2) + l += p.Id.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TRLTaskTxnCommitAttachment) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("jobId", thrift.I64, 3) + l += bthrift.Binary.I64Length(p.JobId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TRLTaskTxnCommitAttachment) field4Length() int { + l := 0 + if p.IsSetLoadedRows() { + l += bthrift.Binary.FieldBeginLength("loadedRows", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.LoadedRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field5Length() int { + l := 0 + if p.IsSetFilteredRows() { + l += bthrift.Binary.FieldBeginLength("filteredRows", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.FilteredRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field6Length() int { + l := 0 + if p.IsSetUnselectedRows() { + l += bthrift.Binary.FieldBeginLength("unselectedRows", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.UnselectedRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field7Length() int { + l := 0 + if p.IsSetReceivedBytes() { + l += bthrift.Binary.FieldBeginLength("receivedBytes", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.ReceivedBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field8Length() int { + l := 0 + if p.IsSetLoadedBytes() { + l += bthrift.Binary.FieldBeginLength("loadedBytes", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.LoadedBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field9Length() int { + l := 0 + if p.IsSetLoadCostMs() { + l += bthrift.Binary.FieldBeginLength("loadCostMs", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.LoadCostMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field10Length() int { + l := 0 + if p.IsSetKafkaRLTaskProgress() { + l += bthrift.Binary.FieldBeginLength("kafkaRLTaskProgress", thrift.STRUCT, 10) + l += p.KafkaRLTaskProgress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRLTaskTxnCommitAttachment) field11Length() int { + l := 0 + if p.IsSetErrorLogUrl() { + l += bthrift.Binary.FieldBeginLength("errorLogUrl", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.ErrorLogUrl) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTxnCommitAttachment) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetLoadType bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetLoadType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetLoadType { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnCommitAttachment[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTxnCommitAttachment[fieldId])) +} + +func (p *TTxnCommitAttachment) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.LoadType = types.TLoadType(v) + + } + return offset, nil +} + +func (p *TTxnCommitAttachment) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := NewTRLTaskTxnCommitAttachment() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.RlTaskTxnCommitAttachment = tmp + return offset, nil +} + +// for compatibility +func (p *TTxnCommitAttachment) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTxnCommitAttachment) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTxnCommitAttachment") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTxnCommitAttachment) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTxnCommitAttachment") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTxnCommitAttachment) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadType", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.LoadType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TTxnCommitAttachment) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRlTaskTxnCommitAttachment() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rlTaskTxnCommitAttachment", thrift.STRUCT, 2) + offset += p.RlTaskTxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTxnCommitAttachment) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("loadType", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.LoadType)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TTxnCommitAttachment) field2Length() int { + l := 0 + if p.IsSetRlTaskTxnCommitAttachment() { + l += bthrift.Binary.FieldBeginLength("rlTaskTxnCommitAttachment", thrift.STRUCT, 2) + l += p.RlTaskTxnCommitAttachment.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetTxnId bool = false + var issetSync bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUser = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPasswd = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDb = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTbl = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTxnId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetSync = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 17: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetSync { + fieldId = 8 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitRequest[fieldId])) +} + +func (p *TLoadTxnCommitRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.User = v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Passwd = v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Db = v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Tbl = v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TxnId = v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Sync = v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTTabletCommitInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.CommitInfos = append(p.CommitInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTxnCommitAttachment() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TxnCommitAttachment = tmp + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ThriftRpcTimeoutMs = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField15(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tbls = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.Tbls = append(p.Tbls, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCodeUuid = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommit = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReceiveBytes = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField20(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TLoadTxnCommitRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxnCommitRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnCommitRequest") + if p != nil { + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxnCommitRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sync", thrift.BOOL, 8) + offset += bthrift.Binary.WriteBool(buf[offset:], p.Sync) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCommitInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commitInfos", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.CommitInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnCommitAttachment() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnCommitAttachment", thrift.STRUCT, 11) + offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetThriftRpcTimeoutMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 13) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTbls() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbls", thrift.LIST, 15) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.Tbls { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 16) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCodeUuid() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code_uuid", thrift.STRING, 17) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AuthCodeUuid) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommit", thrift.BOOL, 18) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.GroupCommit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReceiveBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "receiveBytes", thrift.I64, 19) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReceiveBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backendId", thrift.I64, 20) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.User) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Passwd) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(p.Db) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(p.Tbl) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) field6Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field7Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 7) + l += bthrift.Binary.I64Length(p.TxnId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) field8Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("sync", thrift.BOOL, 8) + l += bthrift.Binary.BoolLength(p.Sync) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnCommitRequest) field9Length() int { + l := 0 + if p.IsSetCommitInfos() { + l += bthrift.Binary.FieldBeginLength("commitInfos", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.CommitInfos)) + for _, v := range p.CommitInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field10Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field11Length() int { + l := 0 + if p.IsSetTxnCommitAttachment() { + l += bthrift.Binary.FieldBeginLength("txnCommitAttachment", thrift.STRUCT, 11) + l += p.TxnCommitAttachment.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field12Length() int { + l := 0 + if p.IsSetThriftRpcTimeoutMs() { + l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field13Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 13) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field14Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field15Length() int { + l := 0 + if p.IsSetTbls() { + l += bthrift.Binary.FieldBeginLength("tbls", thrift.LIST, 15) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Tbls)) + for _, v := range p.Tbls { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field16Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 16) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field17Length() int { + l := 0 + if p.IsSetAuthCodeUuid() { + l += bthrift.Binary.FieldBeginLength("auth_code_uuid", thrift.STRING, 17) + l += bthrift.Binary.StringLengthNocopy(*p.AuthCodeUuid) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field18Length() int { + l := 0 + if p.IsSetGroupCommit() { + l += bthrift.Binary.FieldBeginLength("groupCommit", thrift.BOOL, 18) + l += bthrift.Binary.BoolLength(*p.GroupCommit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field19Length() int { + l := 0 + if p.IsSetReceiveBytes() { + l += bthrift.Binary.FieldBeginLength("receiveBytes", thrift.I64, 19) + l += bthrift.Binary.I64Length(*p.ReceiveBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field20Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backendId", thrift.I64, 20) + l += bthrift.Binary.I64Length(*p.BackendId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitResult_[fieldId])) +} + +func (p *TLoadTxnCommitResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +// for compatibility +func (p *TLoadTxnCommitResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxnCommitResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnCommitResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxnCommitResult") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxnCommitResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnCommitResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCommitTxnRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCommitTxnRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.User = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTTabletCommitInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.CommitInfos = append(p.CommitInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTxnCommitAttachment() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TxnCommitAttachment = tmp + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ThriftRpcTimeoutMs = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnInsert = &v + + } + return offset, nil +} + +func (p *TCommitTxnRequest) FastReadField14(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SubTxnInfos = make([]*TSubTxnInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTSubTxnInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.SubTxnInfos = append(p.SubTxnInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TCommitTxnRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCommitTxnRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCommitTxnRequest") + if p != nil { + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCommitTxnRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCommitTxnRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCommitTxnRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCommitInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commit_infos", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.CommitInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnCommitAttachment() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_commit_attachment", thrift.STRUCT, 9) + offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetThriftRpcTimeoutMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnInsert() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_insert", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.TxnInsert) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSubTxnInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_infos", thrift.LIST, 14) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.SubTxnInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field2Length() int { + l := 0 + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field3Length() int { + l := 0 + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field4Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field5Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field6Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field7Length() int { + l := 0 + if p.IsSetCommitInfos() { + l += bthrift.Binary.FieldBeginLength("commit_infos", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.CommitInfos)) + for _, v := range p.CommitInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field8Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field9Length() int { + l := 0 + if p.IsSetTxnCommitAttachment() { + l += bthrift.Binary.FieldBeginLength("txn_commit_attachment", thrift.STRUCT, 9) + l += p.TxnCommitAttachment.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field10Length() int { + l := 0 + if p.IsSetThriftRpcTimeoutMs() { + l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field11Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field12Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field13Length() int { + l := 0 + if p.IsSetTxnInsert() { + l += bthrift.Binary.FieldBeginLength("txn_insert", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(*p.TxnInsert) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnRequest) field14Length() int { + l := 0 + if p.IsSetSubTxnInfos() { + l += bthrift.Binary.FieldBeginLength("sub_txn_infos", thrift.LIST, 14) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.SubTxnInfos)) + for _, v := range p.SubTxnInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TCommitTxnResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TCommitTxnResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterAddress = tmp + return offset, nil +} + +// for compatibility +func (p *TCommitTxnResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCommitTxnResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCommitTxnResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCommitTxnResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCommitTxnResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCommitTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 2) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCommitTxnResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCommitTxnResult_) field2Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 2) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUser = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPasswd = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCRequest[fieldId])) +} + +func (p *TLoadTxn2PCRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.User = v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Passwd = v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Operation = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ThriftRpcTimeoutMs = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Label = &v + + } + return offset, nil +} + +func (p *TLoadTxn2PCRequest) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCodeUuid = &v + + } + return offset, nil +} + +// for compatibility +func (p *TLoadTxn2PCRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxn2PCRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxn2PCRequest") + if p != nil { + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxn2PCRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxn2PCRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field1000Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxn2PCRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOperation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "operation", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Operation) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 9) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetThriftRpcTimeoutMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCodeUuid() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code_uuid", thrift.STRING, 1000) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AuthCodeUuid) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxn2PCRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.User) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxn2PCRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Passwd) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxn2PCRequest) field4Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field5Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field6Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field7Length() int { + l := 0 + if p.IsSetOperation() { + l += bthrift.Binary.FieldBeginLength("operation", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.Operation) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field8Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field9Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 9) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field10Length() int { + l := 0 + if p.IsSetThriftRpcTimeoutMs() { + l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field11Length() int { + l := 0 + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCRequest) field1000Length() int { + l := 0 + if p.IsSetAuthCodeUuid() { + l += bthrift.Binary.FieldBeginLength("auth_code_uuid", thrift.STRING, 1000) + l += bthrift.Binary.StringLengthNocopy(*p.AuthCodeUuid) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxn2PCResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCResult_[fieldId])) +} + +func (p *TLoadTxn2PCResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +// for compatibility +func (p *TLoadTxn2PCResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxn2PCResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxn2PCResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxn2PCResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxn2PCResult") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxn2PCResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxn2PCResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRollbackTxnRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.User = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Reason = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTxnCommitAttachment() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TxnCommitAttachment = tmp + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TRollbackTxnRequest) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TRollbackTxnRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TRollbackTxnRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRollbackTxnRequest") + if p != nil { + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TRollbackTxnRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TRollbackTxnRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TRollbackTxnRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReason() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "reason", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Reason) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnCommitAttachment() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_commit_attachment", thrift.STRUCT, 10) + offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field2Length() int { + l := 0 + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field3Length() int { + l := 0 + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field4Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field5Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field6Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field7Length() int { + l := 0 + if p.IsSetReason() { + l += bthrift.Binary.FieldBeginLength("reason", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.Reason) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field9Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field10Length() int { + l := 0 + if p.IsSetTxnCommitAttachment() { + l += bthrift.Binary.FieldBeginLength("txn_commit_attachment", thrift.STRUCT, 10) + l += p.TxnCommitAttachment.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field11Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnRequest) field12Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRollbackTxnResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TRollbackTxnResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterAddress = tmp + return offset, nil +} + +// for compatibility +func (p *TRollbackTxnResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TRollbackTxnResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRollbackTxnResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TRollbackTxnResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TRollbackTxnResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TRollbackTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 2) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRollbackTxnResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRollbackTxnResult_) field2Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 2) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + var issetDb bool = false + var issetTbl bool = false + var issetTxnId bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUser = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPasswd = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDb = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTbl = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTxnId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetDb { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTbl { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 7 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackRequest[fieldId])) +} + +func (p *TLoadTxnRollbackRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.User = v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Passwd = v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Db = v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Tbl = v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TxnId = v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Reason = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCode = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTxnCommitAttachment() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TxnCommitAttachment = tmp + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tbls = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.Tbls = append(p.Tbls, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthCodeUuid = &v + + } + return offset, nil +} + +func (p *TLoadTxnRollbackRequest) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Label = &v + + } + return offset, nil +} + +// for compatibility +func (p *TLoadTxnRollbackRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxnRollbackRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnRollbackRequest") + if p != nil { + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxnRollbackRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxnRollbackRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReason() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "reason", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Reason) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnCommitAttachment() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnCommitAttachment", thrift.STRUCT, 10) + offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTbls() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbls", thrift.LIST, 13) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.Tbls { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthCodeUuid() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code_uuid", thrift.STRING, 14) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AuthCodeUuid) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 15) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnRollbackRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.User) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnRollbackRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Passwd) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnRollbackRequest) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(p.Db) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnRollbackRequest) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(p.Tbl) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnRollbackRequest) field6Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field7Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 7) + l += bthrift.Binary.I64Length(p.TxnId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TLoadTxnRollbackRequest) field8Length() int { + l := 0 + if p.IsSetReason() { + l += bthrift.Binary.FieldBeginLength("reason", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.Reason) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field9Length() int { + l := 0 + if p.IsSetAuthCode() { + l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.AuthCode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field10Length() int { + l := 0 + if p.IsSetTxnCommitAttachment() { + l += bthrift.Binary.FieldBeginLength("txnCommitAttachment", thrift.STRUCT, 10) + l += p.TxnCommitAttachment.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field11Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field12Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field13Length() int { + l := 0 + if p.IsSetTbls() { + l += bthrift.Binary.FieldBeginLength("tbls", thrift.LIST, 13) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Tbls)) + for _, v := range p.Tbls { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field14Length() int { + l := 0 + if p.IsSetAuthCodeUuid() { + l += bthrift.Binary.FieldBeginLength("auth_code_uuid", thrift.STRING, 14) + l += bthrift.Binary.StringLengthNocopy(*p.AuthCodeUuid) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackRequest) field15Length() int { + l := 0 + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 15) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnRollbackResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackResult_[fieldId])) +} + +func (p *TLoadTxnRollbackResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +// for compatibility +func (p *TLoadTxnRollbackResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLoadTxnRollbackResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnRollbackResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLoadTxnRollbackResult") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLoadTxnRollbackResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TLoadTxnRollbackResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetJobId bool = false + var issetTaskId bool = false + var issetTaskType bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetJobId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetJobId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTaskId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskType { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotLoaderReportRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotLoaderReportRequest[fieldId])) +} + +func (p *TSnapshotLoaderReportRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.JobId = v + + } + return offset, nil +} + +func (p *TSnapshotLoaderReportRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TaskId = v + + } + return offset, nil +} + +func (p *TSnapshotLoaderReportRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TaskType = types.TTaskType(v) + + } + return offset, nil +} + +func (p *TSnapshotLoaderReportRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FinishedNum = &v + + } + return offset, nil +} + +func (p *TSnapshotLoaderReportRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TotalNum = &v + + } + return offset, nil +} + +// for compatibility +func (p *TSnapshotLoaderReportRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSnapshotLoaderReportRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSnapshotLoaderReportRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TSnapshotLoaderReportRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSnapshotLoaderReportRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TSnapshotLoaderReportRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TSnapshotLoaderReportRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], p.TaskId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TSnapshotLoaderReportRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_type", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.TaskType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TSnapshotLoaderReportRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFinishedNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "finished_num", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FinishedNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSnapshotLoaderReportRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTotalNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_num", thrift.I32, 5) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.TotalNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSnapshotLoaderReportRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.JobId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TSnapshotLoaderReportRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("task_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.TaskId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TSnapshotLoaderReportRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("task_type", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(p.TaskType)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TSnapshotLoaderReportRequest) field4Length() int { + l := 0 + if p.IsSetFinishedNum() { + l += bthrift.Binary.FieldBeginLength("finished_num", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.FinishedNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSnapshotLoaderReportRequest) field5Length() int { + l := 0 + if p.IsSetTotalNum() { + l += bthrift.Binary.FieldBeginLength("total_num", thrift.I32, 5) + l += bthrift.Binary.I32Length(*p.TotalNum) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFrontendPingFrontendRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetClusterId bool = false + var issetToken bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetClusterId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetToken = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetClusterId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetToken { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendRequest[fieldId])) +} + +func (p *TFrontendPingFrontendRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ClusterId = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Token = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DeployMode = &v + + } + return offset, nil +} + +// for compatibility +func (p *TFrontendPingFrontendRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFrontendPingFrontendRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendPingFrontendRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFrontendPingFrontendRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFrontendPingFrontendRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clusterId", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ClusterId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDeployMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "deployMode", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DeployMode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendPingFrontendRequest) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("clusterId", thrift.I32, 1) + l += bthrift.Binary.I32Length(p.ClusterId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.Token) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendRequest) field3Length() int { + l := 0 + if p.IsSetDeployMode() { + l += bthrift.Binary.FieldBeginLength("deployMode", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.DeployMode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDiskInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetDirType bool = false + var issetDir bool = false + var issetFilesystem bool = false + var issetBlocks bool = false + var issetUsed bool = false + var issetAvailable bool = false + var issetUseRate bool = false + var issetMountedOn bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDirType = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDir = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFilesystem = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetBlocks = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUsed = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetAvailable = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUseRate = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetMountedOn = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetDirType { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetDir { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetFilesystem { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetBlocks { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetUsed { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetAvailable { + fieldId = 6 + goto RequiredFieldNotSetError + } + + if !issetUseRate { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetMountedOn { + fieldId = 8 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDiskInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TDiskInfo[fieldId])) +} + +func (p *TDiskInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DirType = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Dir = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Filesystem = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Blocks = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Used = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Available = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.UseRate = v + + } + return offset, nil +} + +func (p *TDiskInfo) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MountedOn = v + + } + return offset, nil +} + +// for compatibility +func (p *TDiskInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TDiskInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDiskInfo") + if p != nil { + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TDiskInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TDiskInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dirType", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.DirType) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dir", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Dir) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filesystem", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Filesystem) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "blocks", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], p.Blocks) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "used", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], p.Used) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "available", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], p.Available) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "useRate", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], p.UseRate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mountedOn", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.MountedOn) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TDiskInfo) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("dirType", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.DirType) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("dir", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.Dir) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("filesystem", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Filesystem) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("blocks", thrift.I64, 4) + l += bthrift.Binary.I64Length(p.Blocks) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("used", thrift.I64, 5) + l += bthrift.Binary.I64Length(p.Used) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field6Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("available", thrift.I64, 6) + l += bthrift.Binary.I64Length(p.Available) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field7Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("useRate", thrift.I32, 7) + l += bthrift.Binary.I32Length(p.UseRate) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TDiskInfo) field8Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("mountedOn", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(p.MountedOn) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + var issetMsg bool = false + var issetQueryPort bool = false + var issetRpcPort bool = false + var issetReplayedJournalId bool = false + var issetVersion bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetMsg = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetQueryPort = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetRpcPort = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetReplayedJournalId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetVersion = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetMsg { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetQueryPort { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetRpcPort { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetReplayedJournalId { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetVersion { + fieldId = 6 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendResult_[fieldId])) +} + +func (p *TFrontendPingFrontendResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Status = TFrontendPingFrontendStatusCode(v) + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Msg = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.QueryPort = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RpcPort = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ReplayedJournalId = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Version = v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LastStartupTime = &v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField8(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DiskInfos = make([]*TDiskInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTDiskInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.DiskInfos = append(p.DiskInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ProcessUUID = &v + + } + return offset, nil +} + +func (p *TFrontendPingFrontendResult_) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ArrowFlightSqlPort = &v + + } + return offset, nil +} + +// for compatibility +func (p *TFrontendPingFrontendResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFrontendPingFrontendResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendPingFrontendResult") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFrontendPingFrontendResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Status)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "msg", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Msg) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryPort", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], p.QueryPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rpcPort", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RpcPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replayedJournalId", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], p.ReplayedJournalId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Version) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLastStartupTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lastStartupTime", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LastStartupTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDiskInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "diskInfos", thrift.LIST, 8) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.DiskInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProcessUUID() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "processUUID", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ProcessUUID) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendPingFrontendResult_) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetArrowFlightSqlPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "arrowFlightSqlPort", thrift.I32, 10) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ArrowFlightSqlPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendPingFrontendResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.Status)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("msg", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.Msg) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("queryPort", thrift.I32, 3) + l += bthrift.Binary.I32Length(p.QueryPort) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("rpcPort", thrift.I32, 4) + l += bthrift.Binary.I32Length(p.RpcPort) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("replayedJournalId", thrift.I64, 5) + l += bthrift.Binary.I64Length(p.ReplayedJournalId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) field6Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("version", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(p.Version) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFrontendPingFrontendResult_) field7Length() int { + l := 0 + if p.IsSetLastStartupTime() { + l += bthrift.Binary.FieldBeginLength("lastStartupTime", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.LastStartupTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFrontendPingFrontendResult_) field8Length() int { + l := 0 + if p.IsSetDiskInfos() { + l += bthrift.Binary.FieldBeginLength("diskInfos", thrift.LIST, 8) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DiskInfos)) + for _, v := range p.DiskInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFrontendPingFrontendResult_) field9Length() int { + l := 0 + if p.IsSetProcessUUID() { + l += bthrift.Binary.FieldBeginLength("processUUID", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.ProcessUUID) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFrontendPingFrontendResult_) field10Length() int { + l := 0 + if p.IsSetArrowFlightSqlPort() { + l += bthrift.Binary.FieldBeginLength("arrowFlightSqlPort", thrift.I32, 10) + l += bthrift.Binary.I32Length(*p.ArrowFlightSqlPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPropertyVal) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPropertyVal[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPropertyVal) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StrVal = &v + + } + return offset, nil +} + +func (p *TPropertyVal) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IntVal = &v + + } + return offset, nil +} + +func (p *TPropertyVal) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LongVal = &v + + } + return offset, nil +} + +func (p *TPropertyVal) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BoolVal = &v + + } + return offset, nil +} + +// for compatibility +func (p *TPropertyVal) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPropertyVal) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPropertyVal") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPropertyVal) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPropertyVal") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPropertyVal) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStrVal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strVal", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StrVal) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPropertyVal) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIntVal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "intVal", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.IntVal) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPropertyVal) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLongVal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "longVal", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LongVal) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPropertyVal) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBoolVal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "boolVal", thrift.BOOL, 4) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.BoolVal) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPropertyVal) field1Length() int { + l := 0 + if p.IsSetStrVal() { + l += bthrift.Binary.FieldBeginLength("strVal", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.StrVal) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPropertyVal) field2Length() int { + l := 0 + if p.IsSetIntVal() { + l += bthrift.Binary.FieldBeginLength("intVal", thrift.I32, 2) + l += bthrift.Binary.I32Length(*p.IntVal) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPropertyVal) field3Length() int { + l := 0 + if p.IsSetLongVal() { + l += bthrift.Binary.FieldBeginLength("longVal", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.LongVal) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPropertyVal) field4Length() int { + l := 0 + if p.IsSetBoolVal() { + l += bthrift.Binary.FieldBeginLength("boolVal", thrift.BOOL, 4) + l += bthrift.Binary.BoolLength(*p.BoolVal) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWaitingTxnStatusRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWaitingTxnStatusRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TWaitingTxnStatusRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnId = &v + + } + return offset, nil +} + +func (p *TWaitingTxnStatusRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Label = &v + + } + return offset, nil +} + +// for compatibility +func (p *TWaitingTxnStatusRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWaitingTxnStatusRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWaitingTxnStatusRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWaitingTxnStatusRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWaitingTxnStatusRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWaitingTxnStatusRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWaitingTxnStatusRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWaitingTxnStatusRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWaitingTxnStatusRequest) field1Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWaitingTxnStatusRequest) field2Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWaitingTxnStatusRequest) field3Length() int { + l := 0 + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Label) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWaitingTxnStatusResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TWaitingTxnStatusResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TWaitingTxnStatusResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TxnStatusId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TWaitingTxnStatusResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TWaitingTxnStatusResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWaitingTxnStatusResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TWaitingTxnStatusResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TWaitingTxnStatusResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TWaitingTxnStatusResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWaitingTxnStatusResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnStatusId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_status_id", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.TxnStatusId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWaitingTxnStatusResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWaitingTxnStatusResult_) field2Length() int { + l := 0 + if p.IsSetTxnStatusId() { + l += bthrift.Binary.FieldBeginLength("txn_status_id", thrift.I32, 2) + l += bthrift.Binary.I32Length(*p.TxnStatusId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TInitExternalCtlMetaRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TInitExternalCtlMetaRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CatalogId = &v + + } + return offset, nil +} + +func (p *TInitExternalCtlMetaRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TInitExternalCtlMetaRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TInitExternalCtlMetaRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TInitExternalCtlMetaRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TInitExternalCtlMetaRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TInitExternalCtlMetaRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TInitExternalCtlMetaRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TInitExternalCtlMetaRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalogId", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TInitExternalCtlMetaRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TInitExternalCtlMetaRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tableId", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TInitExternalCtlMetaRequest) field1Length() int { + l := 0 + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalogId", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.CatalogId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TInitExternalCtlMetaRequest) field2Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TInitExternalCtlMetaRequest) field3Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("tableId", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TInitExternalCtlMetaResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TInitExternalCtlMetaResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxJournalId = &v + + } + return offset, nil +} + +func (p *TInitExternalCtlMetaResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Status = &v + + } + return offset, nil +} + +// for compatibility +func (p *TInitExternalCtlMetaResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TInitExternalCtlMetaResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TInitExternalCtlMetaResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TInitExternalCtlMetaResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TInitExternalCtlMetaResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TInitExternalCtlMetaResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxJournalId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "maxJournalId", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.MaxJournalId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TInitExternalCtlMetaResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Status) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TInitExternalCtlMetaResult_) field1Length() int { + l := 0 + if p.IsSetMaxJournalId() { + l += bthrift.Binary.FieldBeginLength("maxJournalId", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.MaxJournalId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TInitExternalCtlMetaResult_) field2Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Status) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMetadataTableRequestParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TMetadataTableRequestParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TMetadataType(v) + p.MetadataType = &tmp + + } + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTIcebergMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.IcebergMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTBackendsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BackendsMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnsName = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ColumnsName = append(p.ColumnsName, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTFrontendsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.FrontendsMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField6(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUserIdentity() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CurrentUserIdent = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField7(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTQueriesMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueriesMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTMaterializedViewsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MaterializedViewsMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField9(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTJobsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.JobsMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTTasksMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TasksMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField11(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTPartitionsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PartitionsMetadataParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField12(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTMetaCacheStatsParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MetaCacheStatsParams = tmp + return offset, nil +} + +func (p *TMetadataTableRequestParams) FastReadField13(buf []byte) (int, error) { + offset := 0 + + tmp := plannodes.NewTPartitionValuesMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PartitionValuesMetadataParams = tmp + return offset, nil +} + +// for compatibility +func (p *TMetadataTableRequestParams) FastWrite(buf []byte) int { + return 0 +} + +func (p *TMetadataTableRequestParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMetadataTableRequestParams") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TMetadataTableRequestParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TMetadataTableRequestParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TMetadataTableRequestParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetadataType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metadata_type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MetadataType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIcebergMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_metadata_params", thrift.STRUCT, 2) + offset += p.IcebergMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendsMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backends_metadata_params", thrift.STRUCT, 3) + offset += p.BackendsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnsName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_name", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ColumnsName { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFrontendsMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "frontends_metadata_params", thrift.STRUCT, 5) + offset += p.FrontendsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 6) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueriesMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queries_metadata_params", thrift.STRUCT, 7) + offset += p.QueriesMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaterializedViewsMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "materialized_views_metadata_params", thrift.STRUCT, 8) + offset += p.MaterializedViewsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobsMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jobs_metadata_params", thrift.STRUCT, 9) + offset += p.JobsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTasksMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks_metadata_params", thrift.STRUCT, 10) + offset += p.TasksMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionsMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions_metadata_params", thrift.STRUCT, 11) + offset += p.PartitionsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetaCacheStatsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta_cache_stats_params", thrift.STRUCT, 12) + offset += p.MetaCacheStatsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionValuesMetadataParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_values_metadata_params", thrift.STRUCT, 13) + offset += p.PartitionValuesMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetadataTableRequestParams) field1Length() int { + l := 0 + if p.IsSetMetadataType() { + l += bthrift.Binary.FieldBeginLength("metadata_type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.MetadataType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field2Length() int { + l := 0 + if p.IsSetIcebergMetadataParams() { + l += bthrift.Binary.FieldBeginLength("iceberg_metadata_params", thrift.STRUCT, 2) + l += p.IcebergMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field3Length() int { + l := 0 + if p.IsSetBackendsMetadataParams() { + l += bthrift.Binary.FieldBeginLength("backends_metadata_params", thrift.STRUCT, 3) + l += p.BackendsMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field4Length() int { + l := 0 + if p.IsSetColumnsName() { + l += bthrift.Binary.FieldBeginLength("columns_name", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsName)) + for _, v := range p.ColumnsName { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field5Length() int { + l := 0 + if p.IsSetFrontendsMetadataParams() { + l += bthrift.Binary.FieldBeginLength("frontends_metadata_params", thrift.STRUCT, 5) + l += p.FrontendsMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field6Length() int { + l := 0 + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 6) + l += p.CurrentUserIdent.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field7Length() int { + l := 0 + if p.IsSetQueriesMetadataParams() { + l += bthrift.Binary.FieldBeginLength("queries_metadata_params", thrift.STRUCT, 7) + l += p.QueriesMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field8Length() int { + l := 0 + if p.IsSetMaterializedViewsMetadataParams() { + l += bthrift.Binary.FieldBeginLength("materialized_views_metadata_params", thrift.STRUCT, 8) + l += p.MaterializedViewsMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field9Length() int { + l := 0 + if p.IsSetJobsMetadataParams() { + l += bthrift.Binary.FieldBeginLength("jobs_metadata_params", thrift.STRUCT, 9) + l += p.JobsMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field10Length() int { + l := 0 + if p.IsSetTasksMetadataParams() { + l += bthrift.Binary.FieldBeginLength("tasks_metadata_params", thrift.STRUCT, 10) + l += p.TasksMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field11Length() int { + l := 0 + if p.IsSetPartitionsMetadataParams() { + l += bthrift.Binary.FieldBeginLength("partitions_metadata_params", thrift.STRUCT, 11) + l += p.PartitionsMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field12Length() int { + l := 0 + if p.IsSetMetaCacheStatsParams() { + l += bthrift.Binary.FieldBeginLength("meta_cache_stats_params", thrift.STRUCT, 12) + l += p.MetaCacheStatsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetadataTableRequestParams) field13Length() int { + l := 0 + if p.IsSetPartitionValuesMetadataParams() { + l += bthrift.Binary.FieldBeginLength("partition_values_metadata_params", thrift.STRUCT, 13) + l += p.PartitionValuesMetadataParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSchemaTableRequestParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSchemaTableRequestParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSchemaTableRequestParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnsName = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ColumnsName = append(p.ColumnsName, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TSchemaTableRequestParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUserIdentity() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CurrentUserIdent = tmp + return offset, nil +} + +func (p *TSchemaTableRequestParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReplayToOtherFe = &v + + } + return offset, nil +} + +func (p *TSchemaTableRequestParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Catalog = &v + + } + return offset, nil +} + +func (p *TSchemaTableRequestParams) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +// for compatibility +func (p *TSchemaTableRequestParams) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSchemaTableRequestParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSchemaTableRequestParams") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TSchemaTableRequestParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSchemaTableRequestParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TSchemaTableRequestParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnsName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_name", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ColumnsName { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSchemaTableRequestParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 2) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSchemaTableRequestParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReplayToOtherFe() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replay_to_other_fe", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ReplayToOtherFe) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSchemaTableRequestParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCatalog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSchemaTableRequestParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSchemaTableRequestParams) field1Length() int { + l := 0 + if p.IsSetColumnsName() { + l += bthrift.Binary.FieldBeginLength("columns_name", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsName)) + for _, v := range p.ColumnsName { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSchemaTableRequestParams) field2Length() int { + l := 0 + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 2) + l += p.CurrentUserIdent.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSchemaTableRequestParams) field3Length() int { + l := 0 + if p.IsSetReplayToOtherFe() { + l += bthrift.Binary.FieldBeginLength("replay_to_other_fe", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(*p.ReplayToOtherFe) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSchemaTableRequestParams) field4Length() int { + l := 0 + if p.IsSetCatalog() { + l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Catalog) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSchemaTableRequestParams) field5Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSchemaTableDataRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFetchSchemaTableDataRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ClusterName = &v + + } + return offset, nil +} + +func (p *TFetchSchemaTableDataRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TSchemaTableName(v) + p.SchemaTableName = &tmp + + } + return offset, nil +} + +func (p *TFetchSchemaTableDataRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := NewTMetadataTableRequestParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MetadaTableParams = tmp + return offset, nil +} + +func (p *TFetchSchemaTableDataRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := NewTSchemaTableRequestParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.SchemaTableParams = tmp + return offset, nil +} + +// for compatibility +func (p *TFetchSchemaTableDataRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFetchSchemaTableDataRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchSchemaTableDataRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFetchSchemaTableDataRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFetchSchemaTableDataRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFetchSchemaTableDataRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetClusterName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClusterName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSchemaTableDataRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSchemaTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_table_name", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.SchemaTableName)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSchemaTableDataRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetadaTableParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metada_table_params", thrift.STRUCT, 3) + offset += p.MetadaTableParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSchemaTableDataRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSchemaTableParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_table_params", thrift.STRUCT, 4) + offset += p.SchemaTableParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSchemaTableDataRequest) field1Length() int { + l := 0 + if p.IsSetClusterName() { + l += bthrift.Binary.FieldBeginLength("cluster_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.ClusterName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSchemaTableDataRequest) field2Length() int { + l := 0 + if p.IsSetSchemaTableName() { + l += bthrift.Binary.FieldBeginLength("schema_table_name", thrift.I32, 2) + l += bthrift.Binary.I32Length(int32(*p.SchemaTableName)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSchemaTableDataRequest) field3Length() int { + l := 0 + if p.IsSetMetadaTableParams() { + l += bthrift.Binary.FieldBeginLength("metada_table_params", thrift.STRUCT, 3) + l += p.MetadaTableParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSchemaTableDataRequest) field4Length() int { + l := 0 + if p.IsSetSchemaTableParams() { + l += bthrift.Binary.FieldBeginLength("schema_table_params", thrift.STRUCT, 4) + l += p.SchemaTableParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSchemaTableDataResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchSchemaTableDataResult_[fieldId])) +} + +func (p *TFetchSchemaTableDataResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TFetchSchemaTableDataResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DataBatch = make([]*data.TRow, 0, size) + for i := 0; i < size; i++ { + _elem := data.NewTRow() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.DataBatch = append(p.DataBatch, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TFetchSchemaTableDataResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFetchSchemaTableDataResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchSchemaTableDataResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFetchSchemaTableDataResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFetchSchemaTableDataResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFetchSchemaTableDataResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TFetchSchemaTableDataResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDataBatch() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_batch", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.DataBatch { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSchemaTableDataResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TFetchSchemaTableDataResult_) field2Length() int { + l := 0 + if p.IsSetDataBatch() { + l += bthrift.Binary.FieldBeginLength("data_batch", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DataBatch)) + for _, v := range p.DataBatch { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMySqlLoadAcquireTokenResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMySqlLoadAcquireTokenResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TMySqlLoadAcquireTokenResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TMySqlLoadAcquireTokenResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +// for compatibility +func (p *TMySqlLoadAcquireTokenResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TMySqlLoadAcquireTokenResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMySqlLoadAcquireTokenResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TMySqlLoadAcquireTokenResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TMySqlLoadAcquireTokenResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TMySqlLoadAcquireTokenResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMySqlLoadAcquireTokenResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMySqlLoadAcquireTokenResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMySqlLoadAcquireTokenResult_) field2Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletCooldownInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletCooldownInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTabletCooldownInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TabletId = &v + + } + return offset, nil +} + +func (p *TTabletCooldownInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CooldownReplicaId = &v + + } + return offset, nil +} + +func (p *TTabletCooldownInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CooldownMetaId = tmp + return offset, nil +} + +// for compatibility +func (p *TTabletCooldownInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTabletCooldownInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTabletCooldownInfo") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTabletCooldownInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTabletCooldownInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTabletCooldownInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TabletId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletCooldownInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCooldownReplicaId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cooldown_replica_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CooldownReplicaId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletCooldownInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCooldownMetaId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cooldown_meta_id", thrift.STRUCT, 3) + offset += p.CooldownMetaId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletCooldownInfo) field1Length() int { + l := 0 + if p.IsSetTabletId() { + l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.TabletId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletCooldownInfo) field2Length() int { + l := 0 + if p.IsSetCooldownReplicaId() { + l += bthrift.Binary.FieldBeginLength("cooldown_replica_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.CooldownReplicaId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletCooldownInfo) field3Length() int { + l := 0 + if p.IsSetCooldownMetaId() { + l += bthrift.Binary.FieldBeginLength("cooldown_meta_id", thrift.STRUCT, 3) + l += p.CooldownMetaId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TConfirmUnusedRemoteFilesRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ConfirmList = make([]*TTabletCooldownInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTTabletCooldownInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ConfirmList = append(p.ConfirmList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TConfirmUnusedRemoteFilesRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TConfirmUnusedRemoteFilesRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TConfirmUnusedRemoteFilesRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TConfirmUnusedRemoteFilesRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TConfirmUnusedRemoteFilesRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TConfirmUnusedRemoteFilesRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConfirmList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "confirm_list", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.ConfirmList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TConfirmUnusedRemoteFilesRequest) field1Length() int { + l := 0 + if p.IsSetConfirmList() { + l += bthrift.Binary.FieldBeginLength("confirm_list", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ConfirmList)) + for _, v := range p.ConfirmList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TConfirmUnusedRemoteFilesResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TConfirmUnusedRemoteFilesResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ConfirmedTablets = make([]types.TTabletId, 0, size) + for i := 0; i < size; i++ { + var _elem types.TTabletId + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ConfirmedTablets = append(p.ConfirmedTablets, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TConfirmUnusedRemoteFilesResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TConfirmUnusedRemoteFilesResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TConfirmUnusedRemoteFilesResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TConfirmUnusedRemoteFilesResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TConfirmUnusedRemoteFilesResult") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TConfirmUnusedRemoteFilesResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConfirmedTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "confirmed_tablets", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.ConfirmedTablets { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TConfirmUnusedRemoteFilesResult_) field1Length() int { + l := 0 + if p.IsSetConfirmedTablets() { + l += bthrift.Binary.FieldBeginLength("confirmed_tablets", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.ConfirmedTablets)) + var tmpV types.TTabletId + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.ConfirmedTablets) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPrivilegeCtrl) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetPrivHier bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPrivHier = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.SET { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetPrivHier { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPrivilegeCtrl[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPrivilegeCtrl[fieldId])) +} + +func (p *TPrivilegeCtrl) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PrivHier = TPrivilegeHier(v) + + } + return offset, nil +} + +func (p *TPrivilegeCtrl) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Ctl = &v + + } + return offset, nil +} + +func (p *TPrivilegeCtrl) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TPrivilegeCtrl) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Tbl = &v + + } + return offset, nil +} + +func (p *TPrivilegeCtrl) FastReadField5(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadSetBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Cols = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.Cols = append(p.Cols, _elem) + } + if l, err := bthrift.Binary.ReadSetEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPrivilegeCtrl) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Res = &v + + } + return offset, nil +} + +// for compatibility +func (p *TPrivilegeCtrl) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPrivilegeCtrl) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPrivilegeCtrl") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPrivilegeCtrl) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPrivilegeCtrl") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPrivilegeCtrl) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priv_hier", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.PrivHier)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPrivilegeCtrl) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCtl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ctl", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Ctl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPrivilegeCtrl) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPrivilegeCtrl) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTbl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tbl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPrivilegeCtrl) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCols() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cols", thrift.SET, 5) + setBeginOffset := offset + offset += bthrift.Binary.SetBeginLength(thrift.STRING, 0) + + for i := 0; i < len(p.Cols); i++ { + for j := i + 1; j < len(p.Cols); j++ { + if func(tgt, src string) bool { + if strings.Compare(tgt, src) != 0 { + return false + } + return true + }(p.Cols[i], p.Cols[j]) { + panic(fmt.Errorf("%T error writing set field: slice is not unique", p.Cols[i])) + } + } + } + var length int + for _, v := range p.Cols { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteSetBegin(buf[setBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteSetEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPrivilegeCtrl) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "res", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Res) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPrivilegeCtrl) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("priv_hier", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.PrivHier)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPrivilegeCtrl) field2Length() int { + l := 0 + if p.IsSetCtl() { + l += bthrift.Binary.FieldBeginLength("ctl", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Ctl) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPrivilegeCtrl) field3Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPrivilegeCtrl) field4Length() int { + l := 0 + if p.IsSetTbl() { + l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Tbl) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPrivilegeCtrl) field5Length() int { + l := 0 + if p.IsSetCols() { + l += bthrift.Binary.FieldBeginLength("cols", thrift.SET, 5) + l += bthrift.Binary.SetBeginLength(thrift.STRING, len(p.Cols)) + + for i := 0; i < len(p.Cols); i++ { + for j := i + 1; j < len(p.Cols); j++ { + if func(tgt, src string) bool { + if strings.Compare(tgt, src) != 0 { + return false + } + return true + }(p.Cols[i], p.Cols[j]) { + panic(fmt.Errorf("%T error writing set field: slice is not unique", p.Cols[i])) + } + } + } + for _, v := range p.Cols { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.SetEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPrivilegeCtrl) field6Length() int { + l := 0 + if p.IsSetRes() { + l += bthrift.Binary.FieldBeginLength("res", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.Res) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckAuthRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetUser bool = false + var issetPasswd bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetUser = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPasswd = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetUser { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPasswd { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthRequest[fieldId])) +} + +func (p *TCheckAuthRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TCheckAuthRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.User = v + + } + return offset, nil +} + +func (p *TCheckAuthRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Passwd = v + + } + return offset, nil +} + +func (p *TCheckAuthRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TCheckAuthRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := NewTPrivilegeCtrl() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PrivCtrl = tmp + return offset, nil +} + +func (p *TCheckAuthRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TPrivilegeType(v) + p.PrivType = &tmp + + } + return offset, nil +} + +func (p *TCheckAuthRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ThriftRpcTimeoutMs = &v + + } + return offset, nil +} + +// for compatibility +func (p *TCheckAuthRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCheckAuthRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckAuthRequest") + if p != nil { + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCheckAuthRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCheckAuthRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCheckAuthRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckAuthRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TCheckAuthRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TCheckAuthRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckAuthRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPrivCtrl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priv_ctrl", thrift.STRUCT, 5) + offset += p.PrivCtrl.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckAuthRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPrivType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priv_type", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.PrivType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckAuthRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetThriftRpcTimeoutMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCheckAuthRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckAuthRequest) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.User) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCheckAuthRequest) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(p.Passwd) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCheckAuthRequest) field4Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckAuthRequest) field5Length() int { + l := 0 + if p.IsSetPrivCtrl() { + l += bthrift.Binary.FieldBeginLength("priv_ctrl", thrift.STRUCT, 5) + l += p.PrivCtrl.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckAuthRequest) field6Length() int { + l := 0 + if p.IsSetPrivType() { + l += bthrift.Binary.FieldBeginLength("priv_type", thrift.I32, 6) + l += bthrift.Binary.I32Length(int32(*p.PrivType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckAuthRequest) field7Length() int { + l := 0 + if p.IsSetThriftRpcTimeoutMs() { + l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCheckAuthResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthResult_[fieldId])) +} + +func (p *TCheckAuthResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +// for compatibility +func (p *TCheckAuthResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCheckAuthResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckAuthResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCheckAuthResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCheckAuthResult") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCheckAuthResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TCheckAuthResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TGetQueryStatsRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetQueryStatsRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetQueryStatsRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TQueryStatsType(v) + p.Type = &tmp + + } + return offset, nil +} + +func (p *TGetQueryStatsRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Catalog = &v + + } + return offset, nil +} + +func (p *TGetQueryStatsRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TGetQueryStatsRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Tbl = &v + + } + return offset, nil +} + +func (p *TGetQueryStatsRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReplicaId = &v + + } + return offset, nil +} + +func (p *TGetQueryStatsRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ReplicaIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ReplicaIds = append(p.ReplicaIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TGetQueryStatsRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetQueryStatsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetQueryStatsRequest") + if p != nil { + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetQueryStatsRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetQueryStatsRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetQueryStatsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Type)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetQueryStatsRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCatalog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetQueryStatsRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetQueryStatsRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTbl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tbl) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetQueryStatsRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReplicaId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replica_id", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReplicaId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetQueryStatsRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReplicaIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replica_ids", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.ReplicaIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetQueryStatsRequest) field1Length() int { + l := 0 + if p.IsSetType() { + l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.Type)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetQueryStatsRequest) field2Length() int { + l := 0 + if p.IsSetCatalog() { + l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Catalog) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetQueryStatsRequest) field3Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetQueryStatsRequest) field4Length() int { + l := 0 + if p.IsSetTbl() { + l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Tbl) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetQueryStatsRequest) field5Length() int { + l := 0 + if p.IsSetReplicaId() { + l += bthrift.Binary.FieldBeginLength("replica_id", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.ReplicaId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetQueryStatsRequest) field6Length() int { + l := 0 + if p.IsSetReplicaIds() { + l += bthrift.Binary.FieldBeginLength("replica_ids", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.ReplicaIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.ReplicaIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableQueryStats) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableQueryStats[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableQueryStats) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Field = &v + + } + return offset, nil +} + +func (p *TTableQueryStats) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.QueryStats = &v + + } + return offset, nil +} + +func (p *TTableQueryStats) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FilterStats = &v + + } + return offset, nil +} + +// for compatibility +func (p *TTableQueryStats) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTableQueryStats) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableQueryStats") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTableQueryStats) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTableQueryStats") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTableQueryStats) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetField() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "field", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Field) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableQueryStats) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryStats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_stats", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.QueryStats) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableQueryStats) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFilterStats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filter_stats", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FilterStats) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableQueryStats) field1Length() int { + l := 0 + if p.IsSetField() { + l += bthrift.Binary.FieldBeginLength("field", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Field) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableQueryStats) field2Length() int { + l := 0 + if p.IsSetQueryStats() { + l += bthrift.Binary.FieldBeginLength("query_stats", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.QueryStats) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableQueryStats) field3Length() int { + l := 0 + if p.IsSetFilterStats() { + l += bthrift.Binary.FieldBeginLength("filter_stats", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.FilterStats) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableIndexQueryStats) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableIndexQueryStats[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableIndexQueryStats) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IndexName = &v + + } + return offset, nil +} + +func (p *TTableIndexQueryStats) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableStats = make([]*TTableQueryStats, 0, size) + for i := 0; i < size; i++ { + _elem := NewTTableQueryStats() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TableStats = append(p.TableStats, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TTableIndexQueryStats) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTableIndexQueryStats) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableIndexQueryStats") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTableIndexQueryStats) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTableIndexQueryStats") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTableIndexQueryStats) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIndexName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "index_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.IndexName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableIndexQueryStats) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableStats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_stats", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TableStats { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableIndexQueryStats) field1Length() int { + l := 0 + if p.IsSetIndexName() { + l += bthrift.Binary.FieldBeginLength("index_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.IndexName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableIndexQueryStats) field2Length() int { + l := 0 + if p.IsSetTableStats() { + l += bthrift.Binary.FieldBeginLength("table_stats", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableStats)) + for _, v := range p.TableStats { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryStatsResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryStatsResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryStatsResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TQueryStatsResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SimpleResult_ = make(map[string]int64, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.SimpleResult_[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TQueryStatsResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableStats = make([]*TTableQueryStats, 0, size) + for i := 0; i < size; i++ { + _elem := NewTTableQueryStats() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TableStats = append(p.TableStats, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TQueryStatsResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableVerbosStats = make([]*TTableIndexQueryStats, 0, size) + for i := 0; i < size; i++ { + _elem := NewTTableIndexQueryStats() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TableVerbosStats = append(p.TableVerbosStats, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TQueryStatsResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletStats = make(map[int64]int64, size) + for i := 0; i < size; i++ { + var _key int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.TabletStats[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TQueryStatsResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TQueryStatsResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryStatsResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TQueryStatsResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TQueryStatsResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TQueryStatsResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryStatsResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSimpleResult_() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "simple_result", thrift.MAP, 2) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I64, 0) + var length int + for k, v := range p.SimpleResult_ { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.I64, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryStatsResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableStats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_stats", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TableStats { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryStatsResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableVerbosStats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_verbos_stats", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TableVerbosStats { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryStatsResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletStats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_stats", thrift.MAP, 5) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0) + var length int + for k, v := range p.TabletStats { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.I64, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryStatsResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryStatsResult_) field2Length() int { + l := 0 + if p.IsSetSimpleResult_() { + l += bthrift.Binary.FieldBeginLength("simple_result", thrift.MAP, 2) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I64, len(p.SimpleResult_)) + for k, v := range p.SimpleResult_ { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.I64Length(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryStatsResult_) field3Length() int { + l := 0 + if p.IsSetTableStats() { + l += bthrift.Binary.FieldBeginLength("table_stats", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableStats)) + for _, v := range p.TableStats { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryStatsResult_) field4Length() int { + l := 0 + if p.IsSetTableVerbosStats() { + l += bthrift.Binary.FieldBeginLength("table_verbos_stats", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableVerbosStats)) + for _, v := range p.TableVerbosStats { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryStatsResult_) field5Length() int { + l := 0 + if p.IsSetTabletStats() { + l += bthrift.Binary.FieldBeginLength("tablet_stats", thrift.MAP, 5) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.TabletStats)) + var tmpK int64 + var tmpV int64 + l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.TabletStats) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetBinlogRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.User = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Table = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TGetBinlogRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PrevCommitSeq = &v + + } + return offset, nil +} + +// for compatibility +func (p *TGetBinlogRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBinlogRequest") + if p != nil { + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetBinlogRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetBinlogRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPrevCommitSeq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "prev_commit_seq", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.PrevCommitSeq) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBinlogRequest) field1Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field2Length() int { + l := 0 + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field3Length() int { + l := 0 + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field4Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field5Length() int { + l := 0 + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Table) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field6Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field7Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field8Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogRequest) field9Length() int { + l := 0 + if p.IsSetPrevCommitSeq() { + l += bthrift.Binary.FieldBeginLength("prev_commit_seq", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.PrevCommitSeq) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlog[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBinlog) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CommitSeq = &v + + } + return offset, nil +} + +func (p *TBinlog) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Timestamp = &v + + } + return offset, nil +} + +func (p *TBinlog) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TBinlogType(v) + p.Type = &tmp + + } + return offset, nil +} + +func (p *TBinlog) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TBinlog) FastReadField5(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TableIds = append(p.TableIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TBinlog) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Data = &v + + } + return offset, nil +} + +func (p *TBinlog) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Belong = &v + + } + return offset, nil +} + +func (p *TBinlog) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableRef = &v + + } + return offset, nil +} + +func (p *TBinlog) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RemoveEnableCache = &v + + } + return offset, nil +} + +// for compatibility +func (p *TBinlog) FastWrite(buf []byte) int { + return 0 +} + +func (p *TBinlog) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBinlog") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TBinlog) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TBinlog") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TBinlog) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCommitSeq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commit_seq", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CommitSeq) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBinlog) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCompressType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compress_type", thrift.I32, 40) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressType)) + if p.IsSetTimestamp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timestamp", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timestamp) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField41(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFileSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_size", thrift.I64, 41) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.FileSize) + if p.IsSetType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Type)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField42(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTrimDoubleQuotes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trim_double_quotes", thrift.BOOL, 42) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.TrimDoubleQuotes) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField43(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSkipLines() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_lines", thrift.I32, 43) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.SkipLines) + if p.IsSetTableIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_ids", thrift.LIST, 5) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TableIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField44(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEnableProfile() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_profile", thrift.BOOL, 44) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableProfile) + if p.IsSetData() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Data) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField45(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPartialUpdate() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partial_update", thrift.BOOL, 45) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.PartialUpdate) + if p.IsSetBelong() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "belong", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Belong) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField46(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableNames() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_names", thrift.LIST, 46) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.TableNames { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetTableRef() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_ref", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableRef) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField47(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadSql() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_sql", thrift.STRING, 47) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LoadSql) + if p.IsSetRemoveEnableCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remove_enable_cache", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.RemoveEnableCache) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField48(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBinlog) field1Length() int { + l := 0 + if p.IsSetCommitSeq() { + l += bthrift.Binary.FieldBeginLength("commit_seq", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.CommitSeq) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field2Length() int { + l := 0 + if p.IsSetTimestamp() { + l += bthrift.Binary.FieldBeginLength("timestamp", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.Timestamp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field3Length() int { + l := 0 + if p.IsSetType() { + l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(*p.Type)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field4Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field5Length() int { + l := 0 + if p.IsSetTableIds() { + l += bthrift.Binary.FieldBeginLength("table_ids", thrift.LIST, 5) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TableIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TableIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field6Length() int { + l := 0 + if p.IsSetData() { + l += bthrift.Binary.FieldBeginLength("data", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.Data) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field7Length() int { + l := 0 + if p.IsSetBelong() { + l += bthrift.Binary.FieldBeginLength("belong", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.Belong) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field8Length() int { + l := 0 + if p.IsSetTableRef() { + l += bthrift.Binary.FieldBeginLength("table_ref", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.TableRef) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBinlog) field9Length() int { + l := 0 + if p.IsSetRemoveEnableCache() { + l += bthrift.Binary.FieldBeginLength("remove_enable_cache", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(*p.RemoveEnableCache) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetBinlogResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetBackendId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 48) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *TGetBinlogResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NextCommitSeq = &v + + } + return offset, nil +} + +func (p *TGetBinlogResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Binlogs = make([]*TBinlog, 0, size) + for i := 0; i < size; i++ { + _elem := NewTBinlog() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Binlogs = append(p.Binlogs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TGetBinlogResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FeVersion = &v + + } + return offset, nil +} + +func (p *TGetBinlogResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FeMetaVersion = &v + + } + return offset, nil +} + +func (p *TGetBinlogResult_) FastReadField6(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterAddress = tmp + return offset, nil +} + +// for compatibility +func (p *TGetBinlogResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBinlogResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetBinlogResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetBinlogResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField49(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBinlogResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I32, 49) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.Version) + if p.IsSetNextCommitSeq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "next_commit_seq", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.NextCommitSeq) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField50(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBinlogResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLabel() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 50) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) - + if p.IsSetBinlogs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "binlogs", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Binlogs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField51(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBinlogResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEnclose() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enclose", thrift.BYTE, 51) - offset += bthrift.Binary.WriteByte(buf[offset:], *p.Enclose) + if p.IsSetFeVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_version", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FeVersion) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField52(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBinlogResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEscape() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "escape", thrift.BYTE, 52) - offset += bthrift.Binary.WriteByte(buf[offset:], *p.Escape) + if p.IsSetFeMetaVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_meta_version", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FeMetaVersion) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) fastWriteField53(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBinlogResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMemtableOnSinkNode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memtable_on_sink_node", thrift.BOOL, 53) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.MemtableOnSinkNode) - + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 6) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutRequest) field1Length() int { +func (p *TGetBinlogResult_) field1Length() int { l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field2Length() int { +func (p *TGetBinlogResult_) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.User) + if p.IsSetNextCommitSeq() { + l += bthrift.Binary.FieldBeginLength("next_commit_seq", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.NextCommitSeq) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TStreamLoadPutRequest) field3Length() int { +func (p *TGetBinlogResult_) field3Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Passwd) - - l += bthrift.Binary.FieldEndLength() + if p.IsSetBinlogs() { + l += bthrift.Binary.FieldBeginLength("binlogs", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Binlogs)) + for _, v := range p.Binlogs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TStreamLoadPutRequest) field4Length() int { +func (p *TGetBinlogResult_) field4Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(p.Db) + if p.IsSetFeVersion() { + l += bthrift.Binary.FieldBeginLength("fe_version", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.FeVersion) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TStreamLoadPutRequest) field5Length() int { +func (p *TGetBinlogResult_) field5Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(p.Tbl) + if p.IsSetFeMetaVersion() { + l += bthrift.Binary.FieldBeginLength("fe_meta_version", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.FeMetaVersion) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TStreamLoadPutRequest) field6Length() int { +func (p *TGetBinlogResult_) field6Length() int { l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) - + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 6) + l += p.MasterAddress.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field7Length() int { +func (p *TGetTabletReplicaInfosRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTabletIds bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTabletIds = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetTabletIds { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTabletReplicaInfosRequest[fieldId])) +} + +func (p *TGetTabletReplicaInfosRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TabletIds = append(p.TabletIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TGetTabletReplicaInfosRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetTabletReplicaInfosRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTabletReplicaInfosRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetTabletReplicaInfosRequest) BLength() int { l := 0 - l += bthrift.Binary.FieldBeginLength("loadId", thrift.STRUCT, 7) - l += p.LoadId.BLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TGetTabletReplicaInfosRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TStreamLoadPutRequest) field8Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 8) - l += bthrift.Binary.I64Length(p.TxnId) +func (p *TGetTabletReplicaInfosRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.TabletIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) - l += bthrift.Binary.FieldEndLength() - return l + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) field9Length() int { +func (p *TGetTabletReplicaInfosRequest) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("fileType", thrift.I32, 9) - l += bthrift.Binary.I32Length(int32(p.FileType)) - + l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TStreamLoadPutRequest) field10Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("formatType", thrift.I32, 10) - l += bthrift.Binary.I32Length(int32(p.FormatType)) +func (p *TGetTabletReplicaInfosResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - l += bthrift.Binary.FieldEndLength() - return l + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TStreamLoadPutRequest) field11Length() int { - l := 0 - if p.IsSetPath() { - l += bthrift.Binary.FieldBeginLength("path", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.Path) +func (p *TGetTabletReplicaInfosResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + p.Status = tmp + return offset, nil } -func (p *TStreamLoadPutRequest) field12Length() int { - l := 0 - if p.IsSetColumns() { - l += bthrift.Binary.FieldBeginLength("columns", thrift.STRING, 12) - l += bthrift.Binary.StringLengthNocopy(*p.Columns) +func (p *TGetTabletReplicaInfosResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return l + p.TabletReplicaInfos = make(map[int64][]*types.TReplicaInfo, size) + for i := 0; i < size; i++ { + var _key int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _val := make([]*types.TReplicaInfo, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTReplicaInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _val = append(_val, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TabletReplicaInfos[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TStreamLoadPutRequest) field13Length() int { - l := 0 - if p.IsSetWhere() { - l += bthrift.Binary.FieldBeginLength("where", thrift.STRING, 13) - l += bthrift.Binary.StringLengthNocopy(*p.Where) +func (p *TGetTabletReplicaInfosResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field14Length() int { - l := 0 - if p.IsSetColumnSeparator() { - l += bthrift.Binary.FieldBeginLength("columnSeparator", thrift.STRING, 14) - l += bthrift.Binary.StringLengthNocopy(*p.ColumnSeparator) +// for compatibility +func (p *TGetTabletReplicaInfosResult_) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *TGetTabletReplicaInfosResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTabletReplicaInfosResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) field15Length() int { +func (p *TGetTabletReplicaInfosResult_) BLength() int { l := 0 - if p.IsSetPartitions() { - l += bthrift.Binary.FieldBeginLength("partitions", thrift.STRING, 15) - l += bthrift.Binary.StringLengthNocopy(*p.Partitions) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TGetTabletReplicaInfosResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TStreamLoadPutRequest) field16Length() int { - l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 16) - l += bthrift.Binary.I64Length(*p.AuthCode) +func (p *TGetTabletReplicaInfosResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - l += bthrift.Binary.FieldEndLength() +func (p *TGetTabletReplicaInfosResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletReplicaInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_replica_infos", thrift.MAP, 2) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.LIST, 0) + var length int + for k, v := range p.TabletReplicaInfos { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.LIST, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field17Length() int { - l := 0 - if p.IsSetNegative() { - l += bthrift.Binary.FieldBeginLength("negative", thrift.BOOL, 17) - l += bthrift.Binary.BoolLength(*p.Negative) +func (p *TGetTabletReplicaInfosResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field18Length() int { +func (p *TGetTabletReplicaInfosResult_) field1Length() int { l := 0 - if p.IsSetTimeout() { - l += bthrift.Binary.FieldBeginLength("timeout", thrift.I32, 18) - l += bthrift.Binary.I32Length(*p.Timeout) - + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field19Length() int { +func (p *TGetTabletReplicaInfosResult_) field2Length() int { l := 0 - if p.IsSetStrictMode() { - l += bthrift.Binary.FieldBeginLength("strictMode", thrift.BOOL, 19) - l += bthrift.Binary.BoolLength(*p.StrictMode) + if p.IsSetTabletReplicaInfos() { + l += bthrift.Binary.FieldBeginLength("tablet_replica_infos", thrift.MAP, 2) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.LIST, len(p.TabletReplicaInfos)) + for k, v := range p.TabletReplicaInfos { + l += bthrift.Binary.I64Length(k) + + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field20Length() int { +func (p *TGetTabletReplicaInfosResult_) field3Length() int { l := 0 - if p.IsSetTimezone() { - l += bthrift.Binary.FieldBeginLength("timezone", thrift.STRING, 20) - l += bthrift.Binary.StringLengthNocopy(*p.Timezone) + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Token) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field21Length() int { - l := 0 - if p.IsSetExecMemLimit() { - l += bthrift.Binary.FieldBeginLength("execMemLimit", thrift.I64, 21) - l += bthrift.Binary.I64Length(*p.ExecMemLimit) +func (p *TGetSnapshotRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - l += bthrift.Binary.FieldEndLength() + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TStreamLoadPutRequest) field22Length() int { - l := 0 - if p.IsSetIsTempPartition() { - l += bthrift.Binary.FieldBeginLength("isTempPartition", thrift.BOOL, 22) - l += bthrift.Binary.BoolLength(*p.IsTempPartition) +func (p *TGetSnapshotRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field23Length() int { - l := 0 - if p.IsSetStripOuterArray() { - l += bthrift.Binary.FieldBeginLength("strip_outer_array", thrift.BOOL, 23) - l += bthrift.Binary.BoolLength(*p.StripOuterArray) +func (p *TGetSnapshotRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.User = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field24Length() int { - l := 0 - if p.IsSetJsonpaths() { - l += bthrift.Binary.FieldBeginLength("jsonpaths", thrift.STRING, 24) - l += bthrift.Binary.StringLengthNocopy(*p.Jsonpaths) +func (p *TGetSnapshotRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field25Length() int { - l := 0 - if p.IsSetThriftRpcTimeoutMs() { - l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 25) - l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) +func (p *TGetSnapshotRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Db = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field26Length() int { - l := 0 - if p.IsSetJsonRoot() { - l += bthrift.Binary.FieldBeginLength("json_root", thrift.STRING, 26) - l += bthrift.Binary.StringLengthNocopy(*p.JsonRoot) +func (p *TGetSnapshotRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Table = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field27Length() int { - l := 0 - if p.IsSetMergeType() { - l += bthrift.Binary.FieldBeginLength("merge_type", thrift.I32, 27) - l += bthrift.Binary.I32Length(int32(*p.MergeType)) +func (p *TGetSnapshotRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field28Length() int { - l := 0 - if p.IsSetDeleteCondition() { - l += bthrift.Binary.FieldBeginLength("delete_condition", thrift.STRING, 28) - l += bthrift.Binary.StringLengthNocopy(*p.DeleteCondition) +func (p *TGetSnapshotRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LabelName = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field29Length() int { - l := 0 - if p.IsSetSequenceCol() { - l += bthrift.Binary.FieldBeginLength("sequence_col", thrift.STRING, 29) - l += bthrift.Binary.StringLengthNocopy(*p.SequenceCol) +func (p *TGetSnapshotRequest) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SnapshotName = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field30Length() int { - l := 0 - if p.IsSetNumAsString() { - l += bthrift.Binary.FieldBeginLength("num_as_string", thrift.BOOL, 30) - l += bthrift.Binary.BoolLength(*p.NumAsString) +func (p *TGetSnapshotRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TSnapshotType(v) + p.SnapshotType = &tmp - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field31Length() int { - l := 0 - if p.IsSetFuzzyParse() { - l += bthrift.Binary.FieldBeginLength("fuzzy_parse", thrift.BOOL, 31) - l += bthrift.Binary.BoolLength(*p.FuzzyParse) +func (p *TGetSnapshotRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableCompress = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadPutRequest) field32Length() int { - l := 0 - if p.IsSetLineDelimiter() { - l += bthrift.Binary.FieldBeginLength("line_delimiter", thrift.STRING, 32) - l += bthrift.Binary.StringLengthNocopy(*p.LineDelimiter) +// for compatibility +func (p *TGetSnapshotRequest) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *TGetSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetSnapshotRequest") + if p != nil { + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TStreamLoadPutRequest) field33Length() int { +func (p *TGetSnapshotRequest) BLength() int { l := 0 - if p.IsSetReadJsonByLine() { - l += bthrift.Binary.FieldBeginLength("read_json_by_line", thrift.BOOL, 33) - l += bthrift.Binary.BoolLength(*p.ReadJsonByLine) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TGetSnapshotRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TStreamLoadPutRequest) field34Length() int { - l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 34) - l += bthrift.Binary.StringLengthNocopy(*p.Token) +func (p *TGetSnapshotRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field35Length() int { - l := 0 - if p.IsSetSendBatchParallelism() { - l += bthrift.Binary.FieldBeginLength("send_batch_parallelism", thrift.I32, 35) - l += bthrift.Binary.I32Length(*p.SendBatchParallelism) +func (p *TGetSnapshotRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field36Length() int { - l := 0 - if p.IsSetMaxFilterRatio() { - l += bthrift.Binary.FieldBeginLength("max_filter_ratio", thrift.DOUBLE, 36) - l += bthrift.Binary.DoubleLength(*p.MaxFilterRatio) +func (p *TGetSnapshotRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field37Length() int { - l := 0 - if p.IsSetLoadToSingleTablet() { - l += bthrift.Binary.FieldBeginLength("load_to_single_tablet", thrift.BOOL, 37) - l += bthrift.Binary.BoolLength(*p.LoadToSingleTablet) +func (p *TGetSnapshotRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field38Length() int { - l := 0 - if p.IsSetHeaderType() { - l += bthrift.Binary.FieldBeginLength("header_type", thrift.STRING, 38) - l += bthrift.Binary.StringLengthNocopy(*p.HeaderType) - - l += bthrift.Binary.FieldEndLength() +func (p *TGetSnapshotRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field39Length() int { - l := 0 - if p.IsSetHiddenColumns() { - l += bthrift.Binary.FieldBeginLength("hidden_columns", thrift.STRING, 39) - l += bthrift.Binary.StringLengthNocopy(*p.HiddenColumns) +func (p *TGetSnapshotRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field40Length() int { - l := 0 - if p.IsSetCompressType() { - l += bthrift.Binary.FieldBeginLength("compress_type", thrift.I32, 40) - l += bthrift.Binary.I32Length(int32(*p.CompressType)) +func (p *TGetSnapshotRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabelName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label_name", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LabelName) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field41Length() int { - l := 0 - if p.IsSetFileSize() { - l += bthrift.Binary.FieldBeginLength("file_size", thrift.I64, 41) - l += bthrift.Binary.I64Length(*p.FileSize) +func (p *TGetSnapshotRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSnapshotName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_name", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SnapshotName) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field42Length() int { - l := 0 - if p.IsSetTrimDoubleQuotes() { - l += bthrift.Binary.FieldBeginLength("trim_double_quotes", thrift.BOOL, 42) - l += bthrift.Binary.BoolLength(*p.TrimDoubleQuotes) +func (p *TGetSnapshotRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSnapshotType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_type", thrift.I32, 9) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.SnapshotType)) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field43Length() int { - l := 0 - if p.IsSetSkipLines() { - l += bthrift.Binary.FieldBeginLength("skip_lines", thrift.I32, 43) - l += bthrift.Binary.I32Length(*p.SkipLines) +func (p *TGetSnapshotRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableCompress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_compress", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableCompress) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadPutRequest) field44Length() int { +func (p *TGetSnapshotRequest) field1Length() int { l := 0 - if p.IsSetEnableProfile() { - l += bthrift.Binary.FieldBeginLength("enable_profile", thrift.BOOL, 44) - l += bthrift.Binary.BoolLength(*p.EnableProfile) + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field45Length() int { +func (p *TGetSnapshotRequest) field2Length() int { l := 0 - if p.IsSetPartialUpdate() { - l += bthrift.Binary.FieldBeginLength("partial_update", thrift.BOOL, 45) - l += bthrift.Binary.BoolLength(*p.PartialUpdate) + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field46Length() int { +func (p *TGetSnapshotRequest) field3Length() int { l := 0 - if p.IsSetTableNames() { - l += bthrift.Binary.FieldBeginLength("table_names", thrift.LIST, 46) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.TableNames)) - for _, v := range p.TableNames { - l += bthrift.Binary.StringLengthNocopy(v) + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field47Length() int { +func (p *TGetSnapshotRequest) field4Length() int { l := 0 - if p.IsSetLoadSql() { - l += bthrift.Binary.FieldBeginLength("load_sql", thrift.STRING, 47) - l += bthrift.Binary.StringLengthNocopy(*p.LoadSql) + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field48Length() int { +func (p *TGetSnapshotRequest) field5Length() int { l := 0 - if p.IsSetBackendId() { - l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 48) - l += bthrift.Binary.I64Length(*p.BackendId) + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Table) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field49Length() int { +func (p *TGetSnapshotRequest) field6Length() int { l := 0 - if p.IsSetVersion() { - l += bthrift.Binary.FieldBeginLength("version", thrift.I32, 49) - l += bthrift.Binary.I32Length(*p.Version) + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.Token) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field50Length() int { +func (p *TGetSnapshotRequest) field7Length() int { l := 0 - if p.IsSetLabel() { - l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 50) - l += bthrift.Binary.StringLengthNocopy(*p.Label) + if p.IsSetLabelName() { + l += bthrift.Binary.FieldBeginLength("label_name", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.LabelName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field51Length() int { +func (p *TGetSnapshotRequest) field8Length() int { l := 0 - if p.IsSetEnclose() { - l += bthrift.Binary.FieldBeginLength("enclose", thrift.BYTE, 51) - l += bthrift.Binary.ByteLength(*p.Enclose) + if p.IsSetSnapshotName() { + l += bthrift.Binary.FieldBeginLength("snapshot_name", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.SnapshotName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field52Length() int { +func (p *TGetSnapshotRequest) field9Length() int { l := 0 - if p.IsSetEscape() { - l += bthrift.Binary.FieldBeginLength("escape", thrift.BYTE, 52) - l += bthrift.Binary.ByteLength(*p.Escape) + if p.IsSetSnapshotType() { + l += bthrift.Binary.FieldBeginLength("snapshot_type", thrift.I32, 9) + l += bthrift.Binary.I32Length(int32(*p.SnapshotType)) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutRequest) field53Length() int { +func (p *TGetSnapshotRequest) field10Length() int { l := 0 - if p.IsSetMemtableOnSinkNode() { - l += bthrift.Binary.FieldBeginLength("memtable_on_sink_node", thrift.BOOL, 53) - l += bthrift.Binary.BoolLength(*p.MemtableOnSinkNode) + if p.IsSetEnableCompress() { + l += bthrift.Binary.FieldBeginLength("enable_compress", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.EnableCompress) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { +func (p *TGetSnapshotResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -19165,7 +41268,6 @@ func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -19174,7 +41276,7 @@ func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -19188,7 +41290,7 @@ func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -19202,7 +41304,7 @@ func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -19215,6 +41317,48 @@ func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -19235,28 +41379,22 @@ func (p *TStreamLoadPutResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadPutResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadPutResult_[fieldId])) } -func (p *TStreamLoadPutResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetSnapshotResult_) FastReadField1(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -19269,57 +41407,396 @@ func (p *TStreamLoadPutResult_) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TStreamLoadPutResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGetSnapshotResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTExecPlanFragmentParams() + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Meta = []byte(v) + + } + return offset, nil +} + +func (p *TGetSnapshotResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.JobInfo = []byte(v) + + } + return offset, nil +} + +func (p *TGetSnapshotResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.MasterAddress = tmp + return offset, nil +} + +func (p *TGetSnapshotResult_) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Compressed = &v + + } + return offset, nil +} + +func (p *TGetSnapshotResult_) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ExpiredAt = &v + + } + return offset, nil +} + +func (p *TGetSnapshotResult_) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CommitSeq = &v + + } + return offset, nil +} + +// for compatibility +func (p *TGetSnapshotResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGetSnapshotResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetSnapshotResult") + if p != nil { + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGetSnapshotResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetSnapshotResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetSnapshotResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMeta() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta", thrift.STRING, 2) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Meta)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_info", thrift.STRING, 3) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.JobInfo)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 4) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressed() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compressed", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Compressed) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetExpiredAt() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "expiredAt", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ExpiredAt) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCommitSeq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commit_seq", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CommitSeq) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetSnapshotResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetSnapshotResult_) field2Length() int { + l := 0 + if p.IsSetMeta() { + l += bthrift.Binary.FieldBeginLength("meta", thrift.STRING, 2) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Meta)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetSnapshotResult_) field3Length() int { + l := 0 + if p.IsSetJobInfo() { + l += bthrift.Binary.FieldBeginLength("job_info", thrift.STRING, 3) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.JobInfo)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetSnapshotResult_) field4Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 4) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetSnapshotResult_) field5Length() int { + l := 0 + if p.IsSetCompressed() { + l += bthrift.Binary.FieldBeginLength("compressed", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.Compressed) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetSnapshotResult_) field6Length() int { + l := 0 + if p.IsSetExpiredAt() { + l += bthrift.Binary.FieldBeginLength("expiredAt", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.ExpiredAt) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetSnapshotResult_) field7Length() int { + l := 0 + if p.IsSetCommitSeq() { + l += bthrift.Binary.FieldBeginLength("commit_seq", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.CommitSeq) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableRef) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableRef[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TStreamLoadPutResult_) FastReadField3(buf []byte) (int, error) { +func (p *TTableRef) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := palointernalservice.NewTPipelineFragmentParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Table = &v + } - p.PipelineParams = tmp return offset, nil } -func (p *TStreamLoadPutResult_) FastReadField4(buf []byte) (int, error) { +func (p *TTableRef) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BaseSchemaVersion = &v + p.AliasName = &v } return offset, nil } // for compatibility -func (p *TStreamLoadPutResult_) FastWrite(buf []byte) int { +func (p *TTableRef) FastWrite(buf []byte) int { return 0 } -func (p *TStreamLoadPutResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTableRef) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadPutResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableRef") if p != nil { - offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -19327,105 +41804,68 @@ func (p *TStreamLoadPutResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift return offset } -func (p *TStreamLoadPutResult_) BLength() int { +func (p *TTableRef) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TStreamLoadPutResult") + l += bthrift.Binary.StructBeginLength("TTableRef") if p != nil { l += p.field1Length() - l += p.field2Length() l += p.field3Length() - l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TStreamLoadPutResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TStreamLoadPutResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTableRef) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 2) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) -func (p *TStreamLoadPutResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPipelineParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pipeline_params", thrift.STRUCT, 3) - offset += p.PipelineParams.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTableRef) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetBaseSchemaVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_schema_version", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.BaseSchemaVersion) + if p.IsSetAliasName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "alias_name", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AliasName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadPutResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TStreamLoadPutResult_) field2Length() int { +func (p *TTableRef) field1Length() int { l := 0 - if p.IsSetParams() { - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 2) - l += p.Params.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Table) -func (p *TStreamLoadPutResult_) field3Length() int { - l := 0 - if p.IsSetPipelineParams() { - l += bthrift.Binary.FieldBeginLength("pipeline_params", thrift.STRUCT, 3) - l += p.PipelineParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadPutResult_) field4Length() int { +func (p *TTableRef) field3Length() int { l := 0 - if p.IsSetBaseSchemaVersion() { - l += bthrift.Binary.FieldBeginLength("base_schema_version", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.BaseSchemaVersion) + if p.IsSetAliasName() { + l += bthrift.Binary.FieldBeginLength("alias_name", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.AliasName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -19443,13 +41883,12 @@ func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -19458,7 +41897,7 @@ func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -19472,7 +41911,7 @@ func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -19485,6 +41924,188 @@ func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -19505,448 +42126,306 @@ func (p *TStreamLoadMultiTablePutResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TStreamLoadMultiTablePutResult_[fieldId])) } -func (p *TStreamLoadMultiTablePutResult_) FastReadField1(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Cluster = &v + } - p.Status = tmp return offset, nil } -func (p *TStreamLoadMultiTablePutResult_) FastReadField2(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err - } - p.Params = make([]*palointernalservice.TExecPlanFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := palointernalservice.NewTExecPlanFragmentParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + } else { + offset += l + p.User = &v - p.Params = append(p.Params, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TRestoreSnapshotRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Passwd = &v + } return offset, nil } -func (p *TStreamLoadMultiTablePutResult_) FastReadField3(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField4(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err - } - p.PipelineParams = make([]*palointernalservice.TPipelineFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := palointernalservice.NewTPipelineFragmentParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + } else { + offset += l + p.Db = &v - p.PipelineParams = append(p.PipelineParams, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TRestoreSnapshotRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Table = &v + } return offset, nil } -// for compatibility -func (p *TStreamLoadMultiTablePutResult_) FastWrite(buf []byte) int { - return 0 -} - -func (p *TStreamLoadMultiTablePutResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) FastReadField6(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadMultiTablePutResult") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} -func (p *TStreamLoadMultiTablePutResult_) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TStreamLoadMultiTablePutResult") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l + return offset, nil } -func (p *TStreamLoadMultiTablePutResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) FastReadField7(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} -func (p *TStreamLoadMultiTablePutResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Params { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LabelName = &v + } - return offset + return offset, nil } -func (p *TStreamLoadMultiTablePutResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) FastReadField8(buf []byte) (int, error) { offset := 0 - if p.IsSetPipelineParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pipeline_params", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.PipelineParams { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} -func (p *TStreamLoadMultiTablePutResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RepoName = &v -func (p *TStreamLoadMultiTablePutResult_) field2Length() int { - l := 0 - if p.IsSetParams() { - l += bthrift.Binary.FieldBeginLength("params", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Params)) - for _, v := range p.Params { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TStreamLoadMultiTablePutResult_) field3Length() int { - l := 0 - if p.IsSetPipelineParams() { - l += bthrift.Binary.FieldBeginLength("pipeline_params", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PipelineParams)) - for _, v := range p.PipelineParams { - l += v.BLength() +func (p *TRestoreSnapshotRequest) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableRefs = make([]*TTableRef, 0, size) + for i := 0; i < size; i++ { + _elem := NewTTableRef() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + + p.TableRefs = append(p.TableRefs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + return offset, nil } -func (p *TStreamLoadWithLoadStatusResult_) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) +func (p *TRestoreSnapshotRequest) FastReadField10(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) offset += l if err != nil { - goto ReadStructBeginError + return offset, err } + p.Properties = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { offset += l - if err != nil { - goto SkipFieldError - } - } - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError + _val = v + } + + p.Properties[_key] = _val } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TStreamLoadWithLoadStatusResult_[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TStreamLoadWithLoadStatusResult_) FastReadField1(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField11(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.Meta = []byte(v) + } - p.Status = tmp return offset, nil } -func (p *TStreamLoadWithLoadStatusResult_) FastReadField2(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField12(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TxnId = &v + + p.JobInfo = []byte(v) } return offset, nil } -func (p *TStreamLoadWithLoadStatusResult_) FastReadField3(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField13(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TotalRows = &v + p.CleanTables = &v } return offset, nil } -func (p *TStreamLoadWithLoadStatusResult_) FastReadField4(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField14(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadedRows = &v + p.CleanPartitions = &v } return offset, nil } -func (p *TStreamLoadWithLoadStatusResult_) FastReadField5(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField15(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FilteredRows = &v + p.AtomicRestore = &v } return offset, nil } -func (p *TStreamLoadWithLoadStatusResult_) FastReadField6(buf []byte) (int, error) { +func (p *TRestoreSnapshotRequest) FastReadField16(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.UnselectedRows = &v + p.Compressed = &v } return offset, nil } // for compatibility -func (p *TStreamLoadWithLoadStatusResult_) FastWrite(buf []byte) int { +func (p *TRestoreSnapshotRequest) FastWrite(buf []byte) int { return 0 } -func (p *TStreamLoadWithLoadStatusResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TStreamLoadWithLoadStatusResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRestoreSnapshotRequest") if p != nil { + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TStreamLoadWithLoadStatusResult_) BLength() int { +func (p *TRestoreSnapshotRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TStreamLoadWithLoadStatusResult") + l += bthrift.Binary.StructBeginLength("TRestoreSnapshotRequest") if p != nil { l += p.field1Length() l += p.field2Length() @@ -19954,327 +42433,403 @@ func (p *TStreamLoadWithLoadStatusResult_) BLength() int { l += p.field4Length() l += p.field5Length() l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TStreamLoadWithLoadStatusResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadWithLoadStatusResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadWithLoadStatusResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTotalRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_rows", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalRows) + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadWithLoadStatusResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadedRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loaded_rows", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadWithLoadStatusResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFilteredRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filtered_rows", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.FilteredRows) + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadWithLoadStatusResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetUnselectedRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "unselected_rows", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.UnselectedRows) + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TStreamLoadWithLoadStatusResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TRestoreSnapshotRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLabelName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label_name", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LabelName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TStreamLoadWithLoadStatusResult_) field2Length() int { +func (p *TRestoreSnapshotRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRepoName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "repo_name", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RepoName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableRefs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_refs", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TableRefs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProperties() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 10) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.Properties { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMeta() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta", thrift.STRING, 11) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Meta)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_info", thrift.STRING, 12) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.JobInfo)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCleanTables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_tables", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.CleanTables) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCleanPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_partitions", thrift.BOOL, 14) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.CleanPartitions) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAtomicRestore() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "atomic_restore", thrift.BOOL, 15) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.AtomicRestore) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressed() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compressed", thrift.BOOL, 16) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Compressed) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRestoreSnapshotRequest) field1Length() int { l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.TxnId) + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadWithLoadStatusResult_) field3Length() int { +func (p *TRestoreSnapshotRequest) field2Length() int { l := 0 - if p.IsSetTotalRows() { - l += bthrift.Binary.FieldBeginLength("total_rows", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.TotalRows) + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadWithLoadStatusResult_) field4Length() int { +func (p *TRestoreSnapshotRequest) field3Length() int { l := 0 - if p.IsSetLoadedRows() { - l += bthrift.Binary.FieldBeginLength("loaded_rows", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.LoadedRows) + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadWithLoadStatusResult_) field5Length() int { +func (p *TRestoreSnapshotRequest) field4Length() int { l := 0 - if p.IsSetFilteredRows() { - l += bthrift.Binary.FieldBeginLength("filtered_rows", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.FilteredRows) + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Db) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TStreamLoadWithLoadStatusResult_) field6Length() int { +func (p *TRestoreSnapshotRequest) field5Length() int { l := 0 - if p.IsSetUnselectedRows() { - l += bthrift.Binary.FieldBeginLength("unselected_rows", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.UnselectedRows) + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Table) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCheckWalRequest) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } +func (p *TRestoreSnapshotRequest) field6Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.Token) - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWalRequest[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + l += bthrift.Binary.FieldEndLength() + } + return l } -func (p *TCheckWalRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.WalId = &v +func (p *TRestoreSnapshotRequest) field7Length() int { + l := 0 + if p.IsSetLabelName() { + l += bthrift.Binary.FieldBeginLength("label_name", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.LabelName) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TCheckWalRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 +func (p *TRestoreSnapshotRequest) field8Length() int { + l := 0 + if p.IsSetRepoName() { + l += bthrift.Binary.FieldBeginLength("repo_name", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.RepoName) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DbId = &v + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TRestoreSnapshotRequest) field9Length() int { + l := 0 + if p.IsSetTableRefs() { + l += bthrift.Binary.FieldBeginLength("table_refs", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableRefs)) + for _, v := range p.TableRefs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -// for compatibility -func (p *TCheckWalRequest) FastWrite(buf []byte) int { - return 0 +func (p *TRestoreSnapshotRequest) field10Length() int { + l := 0 + if p.IsSetProperties() { + l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 10) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) + for k, v := range p.Properties { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l } -func (p *TCheckWalRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckWalRequest") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) +func (p *TRestoreSnapshotRequest) field11Length() int { + l := 0 + if p.IsSetMeta() { + l += bthrift.Binary.FieldBeginLength("meta", thrift.STRING, 11) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Meta)) + + l += bthrift.Binary.FieldEndLength() } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset + return l } -func (p *TCheckWalRequest) BLength() int { +func (p *TRestoreSnapshotRequest) field12Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCheckWalRequest") - if p != nil { - l += p.field1Length() - l += p.field2Length() + if p.IsSetJobInfo() { + l += bthrift.Binary.FieldBeginLength("job_info", thrift.STRING, 12) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.JobInfo)) + + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() return l } -func (p *TCheckWalRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetWalId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "wal_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.WalId) +func (p *TRestoreSnapshotRequest) field13Length() int { + l := 0 + if p.IsSetCleanTables() { + l += bthrift.Binary.FieldBeginLength("clean_tables", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(*p.CleanTables) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TCheckWalRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) +func (p *TRestoreSnapshotRequest) field14Length() int { + l := 0 + if p.IsSetCleanPartitions() { + l += bthrift.Binary.FieldBeginLength("clean_partitions", thrift.BOOL, 14) + l += bthrift.Binary.BoolLength(*p.CleanPartitions) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TCheckWalRequest) field1Length() int { +func (p *TRestoreSnapshotRequest) field15Length() int { l := 0 - if p.IsSetWalId() { - l += bthrift.Binary.FieldBeginLength("wal_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.WalId) + if p.IsSetAtomicRestore() { + l += bthrift.Binary.FieldBeginLength("atomic_restore", thrift.BOOL, 15) + l += bthrift.Binary.BoolLength(*p.AtomicRestore) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCheckWalRequest) field2Length() int { +func (p *TRestoreSnapshotRequest) field16Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.DbId) + if p.IsSetCompressed() { + l += bthrift.Binary.FieldBeginLength("compressed", thrift.BOOL, 16) + l += bthrift.Binary.BoolLength(*p.Compressed) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCheckWalResult_) FastRead(buf []byte) (int, error) { +func (p *TRestoreSnapshotResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -20311,7 +42866,7 @@ func (p *TCheckWalResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -20350,7 +42905,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWalResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -20359,7 +42914,7 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCheckWalResult_) FastReadField1(buf []byte) (int, error) { +func (p *TRestoreSnapshotResult_) FastReadField1(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -20372,39 +42927,39 @@ func (p *TCheckWalResult_) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TCheckWalResult_) FastReadField2(buf []byte) (int, error) { +func (p *TRestoreSnapshotResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.NeedRecovery = &v - } + p.MasterAddress = tmp return offset, nil } // for compatibility -func (p *TCheckWalResult_) FastWrite(buf []byte) int { +func (p *TRestoreSnapshotResult_) FastWrite(buf []byte) int { return 0 } -func (p *TCheckWalResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckWalResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRestoreSnapshotResult") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCheckWalResult_) BLength() int { +func (p *TRestoreSnapshotResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCheckWalResult") + l += bthrift.Binary.StructBeginLength("TRestoreSnapshotResult") if p != nil { l += p.field1Length() l += p.field2Length() @@ -20414,7 +42969,7 @@ func (p *TCheckWalResult_) BLength() int { return l } -func (p *TCheckWalResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetStatus() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) @@ -20424,18 +42979,17 @@ func (p *TCheckWalResult_) fastWriteField1(buf []byte, binaryWriter bthrift.Bina return offset } -func (p *TCheckWalResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TRestoreSnapshotResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNeedRecovery() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "need_recovery", thrift.BOOL, 2) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.NeedRecovery) - + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 2) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TCheckWalResult_) field1Length() int { +func (p *TRestoreSnapshotResult_) field1Length() int { l := 0 if p.IsSetStatus() { l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) @@ -20445,204 +42999,22 @@ func (p *TCheckWalResult_) field1Length() int { return l } -func (p *TCheckWalResult_) field2Length() int { +func (p *TRestoreSnapshotResult_) field2Length() int { l := 0 - if p.IsSetNeedRecovery() { - l += bthrift.Binary.FieldBeginLength("need_recovery", thrift.BOOL, 2) - l += bthrift.Binary.BoolLength(*p.NeedRecovery) - + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 2) + l += p.MasterAddress.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TKafkaRLTaskProgress) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetPartitionCmtOffset bool = false - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPartitionCmtOffset = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - if !issetPartitionCmtOffset { - fieldId = 1 - goto RequiredFieldNotSetError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TKafkaRLTaskProgress[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TKafkaRLTaskProgress[fieldId])) -} - -func (p *TKafkaRLTaskProgress) FastReadField1(buf []byte) (int, error) { - offset := 0 - - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PartitionCmtOffset = make(map[int32]int64, size) - for i := 0; i < size; i++ { - var _key int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _val = v - - } - - p.PartitionCmtOffset[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -// for compatibility -func (p *TKafkaRLTaskProgress) FastWrite(buf []byte) int { - return 0 -} - -func (p *TKafkaRLTaskProgress) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TKafkaRLTaskProgress") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TKafkaRLTaskProgress) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TKafkaRLTaskProgress") - if p != nil { - l += p.field1Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TKafkaRLTaskProgress) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitionCmtOffset", thrift.MAP, 1) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I64, 0) - var length int - for k, v := range p.PartitionCmtOffset { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) - - offset += bthrift.Binary.WriteI64(buf[offset:], v) - - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I64, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TKafkaRLTaskProgress) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("partitionCmtOffset", thrift.MAP, 1) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I64, len(p.PartitionCmtOffset)) - var tmpK int32 - var tmpV int64 - l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.PartitionCmtOffset) - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetLoadSourceType bool = false - var issetId bool = false - var issetJobId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -20660,13 +43032,12 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetLoadSourceType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -20675,13 +43046,12 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -20696,7 +43066,6 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetJobId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -20705,7 +43074,7 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -20719,7 +43088,7 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 5: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { @@ -20733,7 +43102,7 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 6: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { @@ -20747,7 +43116,7 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 7: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { @@ -20761,50 +43130,8 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 8: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) + l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -20836,211 +43163,151 @@ func (p *TRLTaskTxnCommitAttachment) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetLoadSourceType { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetId { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetJobId { - fieldId = 3 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRLTaskTxnCommitAttachment[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlStoredProcedure[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRLTaskTxnCommitAttachment[fieldId])) -} - -func (p *TRLTaskTxnCommitAttachment) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.LoadSourceType = types.TLoadSourceType(v) - - } - return offset, nil -} - -func (p *TRLTaskTxnCommitAttachment) FastReadField2(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.Id = tmp - return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField3(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.JobId = v + p.Name = &v } return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField4(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadedRows = &v + p.CatalogId = &v } return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField5(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FilteredRows = &v + p.DbId = &v } return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField6(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.UnselectedRows = &v + p.PackageName = &v } return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField7(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ReceivedBytes = &v + p.OwnerName = &v } return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField8(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadedBytes = &v + p.Source = &v } return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField9(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField7(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LoadCostMs = &v - - } - return offset, nil -} - -func (p *TRLTaskTxnCommitAttachment) FastReadField10(buf []byte) (int, error) { - offset := 0 + p.CreateTime = &v - tmp := NewTKafkaRLTaskProgress() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.KafkaRLTaskProgress = tmp return offset, nil } -func (p *TRLTaskTxnCommitAttachment) FastReadField11(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedure) FastReadField8(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ErrorLogUrl = &v + p.ModifyTime = &v } return offset, nil } // for compatibility -func (p *TRLTaskTxnCommitAttachment) FastWrite(buf []byte) int { +func (p *TPlsqlStoredProcedure) FastWrite(buf []byte) int { return 0 } -func (p *TRLTaskTxnCommitAttachment) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRLTaskTxnCommitAttachment") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPlsqlStoredProcedure") if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TRLTaskTxnCommitAttachment) BLength() int { +func (p *TPlsqlStoredProcedure) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TRLTaskTxnCommitAttachment") + l += bthrift.Binary.StructBeginLength("TPlsqlStoredProcedure") if p != nil { l += p.field1Length() l += p.field2Length() @@ -21050,248 +43317,194 @@ func (p *TRLTaskTxnCommitAttachment) BLength() int { l += p.field6Length() l += p.field7Length() l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TRLTaskTxnCommitAttachment) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadSourceType", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.LoadSourceType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TRLTaskTxnCommitAttachment) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.STRUCT, 2) - offset += p.Id.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TRLTaskTxnCommitAttachment) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jobId", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TRLTaskTxnCommitAttachment) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadedRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadedRows", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedRows) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TRLTaskTxnCommitAttachment) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFilteredRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filteredRows", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.FilteredRows) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetUnselectedRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "unselectedRows", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.UnselectedRows) + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalogId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetReceivedBytes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "receivedBytes", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReceivedBytes) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadedBytes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadedBytes", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadedBytes) + if p.IsSetPackageName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "packageName", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PackageName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadCostMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadCostMs", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadCostMs) + if p.IsSetOwnerName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ownerName", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OwnerName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetKafkaRLTaskProgress() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "kafkaRLTaskProgress", thrift.STRUCT, 10) - offset += p.KafkaRLTaskProgress.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetSource() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "source", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Source) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedure) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetErrorLogUrl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "errorLogUrl", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ErrorLogUrl) + if p.IsSetCreateTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "createTime", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CreateTime) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRLTaskTxnCommitAttachment) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("loadSourceType", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.LoadSourceType)) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TRLTaskTxnCommitAttachment) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("id", thrift.STRUCT, 2) - l += p.Id.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TRLTaskTxnCommitAttachment) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("jobId", thrift.I64, 3) - l += bthrift.Binary.I64Length(p.JobId) +func (p *TPlsqlStoredProcedure) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetModifyTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "modifyTime", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ModifyTime) - l += bthrift.Binary.FieldEndLength() - return l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TRLTaskTxnCommitAttachment) field4Length() int { +func (p *TPlsqlStoredProcedure) field1Length() int { l := 0 - if p.IsSetLoadedRows() { - l += bthrift.Binary.FieldBeginLength("loadedRows", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.LoadedRows) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Name) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field5Length() int { +func (p *TPlsqlStoredProcedure) field2Length() int { l := 0 - if p.IsSetFilteredRows() { - l += bthrift.Binary.FieldBeginLength("filteredRows", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.FilteredRows) + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalogId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.CatalogId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field6Length() int { +func (p *TPlsqlStoredProcedure) field3Length() int { l := 0 - if p.IsSetUnselectedRows() { - l += bthrift.Binary.FieldBeginLength("unselectedRows", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.UnselectedRows) + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.DbId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field7Length() int { +func (p *TPlsqlStoredProcedure) field4Length() int { l := 0 - if p.IsSetReceivedBytes() { - l += bthrift.Binary.FieldBeginLength("receivedBytes", thrift.I64, 7) - l += bthrift.Binary.I64Length(*p.ReceivedBytes) + if p.IsSetPackageName() { + l += bthrift.Binary.FieldBeginLength("packageName", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.PackageName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field8Length() int { +func (p *TPlsqlStoredProcedure) field5Length() int { l := 0 - if p.IsSetLoadedBytes() { - l += bthrift.Binary.FieldBeginLength("loadedBytes", thrift.I64, 8) - l += bthrift.Binary.I64Length(*p.LoadedBytes) + if p.IsSetOwnerName() { + l += bthrift.Binary.FieldBeginLength("ownerName", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.OwnerName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field9Length() int { +func (p *TPlsqlStoredProcedure) field6Length() int { l := 0 - if p.IsSetLoadCostMs() { - l += bthrift.Binary.FieldBeginLength("loadCostMs", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.LoadCostMs) + if p.IsSetSource() { + l += bthrift.Binary.FieldBeginLength("source", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.Source) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field10Length() int { +func (p *TPlsqlStoredProcedure) field7Length() int { l := 0 - if p.IsSetKafkaRLTaskProgress() { - l += bthrift.Binary.FieldBeginLength("kafkaRLTaskProgress", thrift.STRUCT, 10) - l += p.KafkaRLTaskProgress.BLength() + if p.IsSetCreateTime() { + l += bthrift.Binary.FieldBeginLength("createTime", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.CreateTime) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRLTaskTxnCommitAttachment) field11Length() int { +func (p *TPlsqlStoredProcedure) field8Length() int { l := 0 - if p.IsSetErrorLogUrl() { - l += bthrift.Binary.FieldBeginLength("errorLogUrl", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.ErrorLogUrl) + if p.IsSetModifyTime() { + l += bthrift.Binary.FieldBeginLength("modifyTime", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.ModifyTime) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTxnCommitAttachment) FastRead(buf []byte) (int, error) { +func (p *TPlsqlPackage) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetLoadType bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -21309,13 +43522,12 @@ func (p *TTxnCommitAttachment) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetLoadType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21324,7 +43536,7 @@ func (p *TTxnCommitAttachment) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -21337,6 +43549,62 @@ func (p *TTxnCommitAttachment) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21357,133 +43625,274 @@ func (p *TTxnCommitAttachment) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetLoadType { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnCommitAttachment[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlPackage[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTxnCommitAttachment[fieldId])) } -func (p *TTxnCommitAttachment) FastReadField1(buf []byte) (int, error) { +func (p *TPlsqlPackage) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Name = &v - p.LoadType = types.TLoadType(v) + } + return offset, nil +} + +func (p *TPlsqlPackage) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CatalogId = &v } return offset, nil } -func (p *TTxnCommitAttachment) FastReadField2(buf []byte) (int, error) { +func (p *TPlsqlPackage) FastReadField3(buf []byte) (int, error) { offset := 0 - tmp := NewTRLTaskTxnCommitAttachment() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TPlsqlPackage) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OwnerName = &v + + } + return offset, nil +} + +func (p *TPlsqlPackage) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Header = &v + + } + return offset, nil +} + +func (p *TPlsqlPackage) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Body = &v + } - p.RlTaskTxnCommitAttachment = tmp return offset, nil } // for compatibility -func (p *TTxnCommitAttachment) FastWrite(buf []byte) int { +func (p *TPlsqlPackage) FastWrite(buf []byte) int { return 0 } -func (p *TTxnCommitAttachment) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlPackage) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTxnCommitAttachment") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPlsqlPackage") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTxnCommitAttachment) BLength() int { +func (p *TPlsqlPackage) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTxnCommitAttachment") + l += bthrift.Binary.StructBeginLength("TPlsqlPackage") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTxnCommitAttachment) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlPackage) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "loadType", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.LoadType)) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TTxnCommitAttachment) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlPackage) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRlTaskTxnCommitAttachment() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rlTaskTxnCommitAttachment", thrift.STRUCT, 2) - offset += p.RlTaskTxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalogId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTxnCommitAttachment) field1Length() int { +func (p *TPlsqlPackage) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlsqlPackage) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOwnerName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ownerName", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OwnerName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlsqlPackage) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHeader() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "header", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Header) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlsqlPackage) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBody() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "body", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Body) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlsqlPackage) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("loadType", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.LoadType)) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Name) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TTxnCommitAttachment) field2Length() int { +func (p *TPlsqlPackage) field2Length() int { l := 0 - if p.IsSetRlTaskTxnCommitAttachment() { - l += bthrift.Binary.FieldBeginLength("rlTaskTxnCommitAttachment", thrift.STRUCT, 2) - l += p.RlTaskTxnCommitAttachment.BLength() + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalogId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.CatalogId) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnCommitRequest) FastRead(buf []byte) (int, error) { +func (p *TPlsqlPackage) field3Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlsqlPackage) field4Length() int { + l := 0 + if p.IsSetOwnerName() { + l += bthrift.Binary.FieldBeginLength("ownerName", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.OwnerName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlsqlPackage) field5Length() int { + l := 0 + if p.IsSetHeader() { + l += bthrift.Binary.FieldBeginLength("header", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Header) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlsqlPackage) field6Length() int { + l := 0 + if p.IsSetBody() { + l += bthrift.Binary.FieldBeginLength("body", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.Body) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlsqlProcedureKey) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetTxnId bool = false - var issetSync bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -21514,197 +43923,9 @@ func (p *TLoadTxnCommitRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetUser = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPasswd = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetDb = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetTbl = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetTxnId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetSync = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField13(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 14: + case 2: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField14(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField15(buf[offset:]) + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -21716,9 +43937,9 @@ func (p *TLoadTxnCommitRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 16: + case 3: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField16(buf[offset:]) + l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -21750,709 +43971,920 @@ func (p *TLoadTxnCommitRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 7 - goto RequiredFieldNotSetError - } - - if !issetSync { - fieldId = 8 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlProcedureKey[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitRequest[fieldId])) } -func (p *TLoadTxnCommitRequest) FastReadField1(buf []byte) (int, error) { +func (p *TPlsqlProcedureKey) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v + p.Name = &v } return offset, nil } -func (p *TLoadTxnCommitRequest) FastReadField2(buf []byte) (int, error) { +func (p *TPlsqlProcedureKey) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.User = v + p.CatalogId = &v } return offset, nil } -func (p *TLoadTxnCommitRequest) FastReadField3(buf []byte) (int, error) { +func (p *TPlsqlProcedureKey) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Passwd = v + p.DbId = &v } return offset, nil } -func (p *TLoadTxnCommitRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +// for compatibility +func (p *TPlsqlProcedureKey) FastWrite(buf []byte) int { + return 0 +} - p.Db = v +func (p *TPlsqlProcedureKey) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPlsqlProcedureKey") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} +func (p *TPlsqlProcedureKey) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPlsqlProcedureKey") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TLoadTxnCommitRequest) FastReadField5(buf []byte) (int, error) { +func (p *TPlsqlProcedureKey) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Tbl = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TLoadTxnCommitRequest) FastReadField6(buf []byte) (int, error) { +func (p *TPlsqlProcedureKey) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalogId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TLoadTxnCommitRequest) FastReadField7(buf []byte) (int, error) { +func (p *TPlsqlProcedureKey) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.TxnId = v +func (p *TPlsqlProcedureKey) field1Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Name) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TLoadTxnCommitRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 +func (p *TPlsqlProcedureKey) field2Length() int { + l := 0 + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalogId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.CatalogId) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() + } + return l +} - p.Sync = v +func (p *TPlsqlProcedureKey) field3Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.DbId) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TLoadTxnCommitRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *TAddPlsqlStoredProcedureRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err + goto ReadStructBeginError } - p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTTabletCommitInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - p.CommitInfos = append(p.CommitInfos, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddPlsqlStoredProcedureRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxnCommitRequest) FastReadField10(buf []byte) (int, error) { +func (p *TAddPlsqlStoredProcedureRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTPlsqlStoredProcedure() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.AuthCode = &v - } + p.PlsqlStoredProcedure = tmp return offset, nil } -func (p *TLoadTxnCommitRequest) FastReadField11(buf []byte) (int, error) { +func (p *TAddPlsqlStoredProcedureRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := NewTTxnCommitAttachment() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l + p.IsForce = &v + } - p.TxnCommitAttachment = tmp return offset, nil } -func (p *TLoadTxnCommitRequest) FastReadField12(buf []byte) (int, error) { +// for compatibility +func (p *TAddPlsqlStoredProcedureRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TAddPlsqlStoredProcedureRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAddPlsqlStoredProcedureRequest") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ThriftRpcTimeoutMs = &v +func (p *TAddPlsqlStoredProcedureRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TAddPlsqlStoredProcedureRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} +func (p *TAddPlsqlStoredProcedureRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPlsqlStoredProcedure() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "plsqlStoredProcedure", thrift.STRUCT, 1) + offset += p.PlsqlStoredProcedure.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TLoadTxnCommitRequest) FastReadField13(buf []byte) (int, error) { +func (p *TAddPlsqlStoredProcedureRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetIsForce() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isForce", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsForce) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAddPlsqlStoredProcedureRequest) field1Length() int { + l := 0 + if p.IsSetPlsqlStoredProcedure() { + l += bthrift.Binary.FieldBeginLength("plsqlStoredProcedure", thrift.STRUCT, 1) + l += p.PlsqlStoredProcedure.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAddPlsqlStoredProcedureRequest) field2Length() int { + l := 0 + if p.IsSetIsForce() { + l += bthrift.Binary.FieldBeginLength("isForce", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(*p.IsForce) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TDropPlsqlStoredProcedureRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - p.Token = &v + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDropPlsqlStoredProcedureRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxnCommitRequest) FastReadField14(buf []byte) (int, error) { +func (p *TDropPlsqlStoredProcedureRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTPlsqlProcedureKey() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v - } + p.PlsqlProcedureKey = tmp return offset, nil } -func (p *TLoadTxnCommitRequest) FastReadField15(buf []byte) (int, error) { +// for compatibility +func (p *TDropPlsqlStoredProcedureRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TDropPlsqlStoredProcedureRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDropPlsqlStoredProcedureRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *TDropPlsqlStoredProcedureRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TDropPlsqlStoredProcedureRequest") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TDropPlsqlStoredProcedureRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPlsqlProcedureKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "plsqlProcedureKey", thrift.STRUCT, 1) + offset += p.PlsqlProcedureKey.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDropPlsqlStoredProcedureRequest) field1Length() int { + l := 0 + if p.IsSetPlsqlProcedureKey() { + l += bthrift.Binary.FieldBeginLength("plsqlProcedureKey", thrift.STRUCT, 1) + l += p.PlsqlProcedureKey.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlsqlStoredProcedureResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err + goto ReadStructBeginError } - p.Tbls = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - p.Tbls = append(p.Tbls, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlStoredProcedureResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxnCommitRequest) FastReadField16(buf []byte) (int, error) { +func (p *TPlsqlStoredProcedureResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TableId = &v - } + p.Status = tmp return offset, nil } // for compatibility -func (p *TLoadTxnCommitRequest) FastWrite(buf []byte) int { +func (p *TPlsqlStoredProcedureResult_) FastWrite(buf []byte) int { return 0 -} - -func (p *TLoadTxnCommitRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnCommitRequest") - if p != nil { - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) +} + +func (p *TPlsqlStoredProcedureResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPlsqlStoredProcedureResult") + if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TLoadTxnCommitRequest) BLength() int { +func (p *TPlsqlStoredProcedureResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxnCommitRequest") + l += bthrift.Binary.StructBeginLength("TPlsqlStoredProcedureResult") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() - l += p.field13Length() - l += p.field14Length() - l += p.field15Length() - l += p.field16Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxnCommitRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlStoredProcedureResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnCommitRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnCommitRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnCommitRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset +func (p *TPlsqlStoredProcedureResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l } -func (p *TLoadTxnCommitRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} +func (p *TAddPlsqlPackageRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } -func (p *TLoadTxnCommitRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset -} - -func (p *TLoadTxnCommitRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], p.TxnId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddPlsqlPackageRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxnCommitRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAddPlsqlPackageRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sync", thrift.BOOL, 8) - offset += bthrift.Binary.WriteBool(buf[offset:], p.Sync) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} -func (p *TLoadTxnCommitRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCommitInfos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commitInfos", thrift.LIST, 9) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.CommitInfos { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTPlsqlPackage() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.PlsqlPackage = tmp + return offset, nil } -func (p *TLoadTxnCommitRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAddPlsqlPackageRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 10) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsForce = &v -func (p *TLoadTxnCommitRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnCommitAttachment() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnCommitAttachment", thrift.STRUCT, 11) - offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset, nil } -func (p *TLoadTxnCommitRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetThriftRpcTimeoutMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 12) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset +// for compatibility +func (p *TAddPlsqlPackageRequest) FastWrite(buf []byte) int { + return 0 } -func (p *TLoadTxnCommitRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAddPlsqlPackageRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 13) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAddPlsqlPackageRequest") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TLoadTxnCommitRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 14) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TAddPlsqlPackageRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TAddPlsqlPackageRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() } - return offset + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TLoadTxnCommitRequest) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAddPlsqlPackageRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTbls() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbls", thrift.LIST, 15) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.Tbls { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetPlsqlPackage() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "plsqlPackage", thrift.STRUCT, 1) + offset += p.PlsqlPackage.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnCommitRequest) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAddPlsqlPackageRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 16) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + if p.IsSetIsForce() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isForce", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsForce) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnCommitRequest) field1Length() int { +func (p *TAddPlsqlPackageRequest) field1Length() int { l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - + if p.IsSetPlsqlPackage() { + l += bthrift.Binary.FieldBeginLength("plsqlPackage", thrift.STRUCT, 1) + l += p.PlsqlPackage.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnCommitRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.User) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnCommitRequest) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Passwd) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnCommitRequest) field4Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(p.Db) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnCommitRequest) field5Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(p.Tbl) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnCommitRequest) field6Length() int { +func (p *TAddPlsqlPackageRequest) field2Length() int { l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + if p.IsSetIsForce() { + l += bthrift.Binary.FieldBeginLength("isForce", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(*p.IsForce) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnCommitRequest) field7Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 7) - l += bthrift.Binary.I64Length(p.TxnId) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnCommitRequest) field8Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("sync", thrift.BOOL, 8) - l += bthrift.Binary.BoolLength(p.Sync) +func (p *TDropPlsqlPackageRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - l += bthrift.Binary.FieldEndLength() - return l -} + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } -func (p *TLoadTxnCommitRequest) field9Length() int { - l := 0 - if p.IsSetCommitInfos() { - l += bthrift.Binary.FieldBeginLength("commitInfos", thrift.LIST, 9) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.CommitInfos)) - for _, v := range p.CommitInfos { - l += v.BLength() + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() } - return l -} - -func (p *TLoadTxnCommitRequest) field10Length() int { - l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 10) - l += bthrift.Binary.I64Length(*p.AuthCode) - - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return l -} -func (p *TLoadTxnCommitRequest) field11Length() int { - l := 0 - if p.IsSetTxnCommitAttachment() { - l += bthrift.Binary.FieldBeginLength("txnCommitAttachment", thrift.STRUCT, 11) - l += p.TxnCommitAttachment.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDropPlsqlPackageRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxnCommitRequest) field12Length() int { - l := 0 - if p.IsSetThriftRpcTimeoutMs() { - l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 12) - l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) +func (p *TDropPlsqlPackageRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + tmp := NewTPlsqlProcedureKey() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + p.PlsqlProcedureKey = tmp + return offset, nil } -func (p *TLoadTxnCommitRequest) field13Length() int { - l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 13) - l += bthrift.Binary.StringLengthNocopy(*p.Token) +// for compatibility +func (p *TDropPlsqlPackageRequest) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *TDropPlsqlPackageRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDropPlsqlPackageRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TLoadTxnCommitRequest) field14Length() int { +func (p *TDropPlsqlPackageRequest) BLength() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 14) - l += bthrift.Binary.I64Length(*p.DbId) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TDropPlsqlPackageRequest") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxnCommitRequest) field15Length() int { - l := 0 - if p.IsSetTbls() { - l += bthrift.Binary.FieldBeginLength("tbls", thrift.LIST, 15) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Tbls)) - for _, v := range p.Tbls { - l += bthrift.Binary.StringLengthNocopy(v) - - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *TDropPlsqlPackageRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPlsqlProcedureKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "plsqlProcedureKey", thrift.STRUCT, 1) + offset += p.PlsqlProcedureKey.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TLoadTxnCommitRequest) field16Length() int { +func (p *TDropPlsqlPackageRequest) field1Length() int { l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 16) - l += bthrift.Binary.I64Length(*p.TableId) - + if p.IsSetPlsqlProcedureKey() { + l += bthrift.Binary.FieldBeginLength("plsqlProcedureKey", thrift.STRUCT, 1) + l += p.PlsqlProcedureKey.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnCommitResult_) FastRead(buf []byte) (int, error) { +func (p *TPlsqlPackageResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -22476,7 +44908,6 @@ func (p *TLoadTxnCommitResult_) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -22504,28 +44935,22 @@ func (p *TLoadTxnCommitResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnCommitResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlsqlPackageResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnCommitResult_[fieldId])) } -func (p *TLoadTxnCommitResult_) FastReadField1(buf []byte) (int, error) { +func (p *TPlsqlPackageResult_) FastReadField1(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -22539,13 +44964,13 @@ func (p *TLoadTxnCommitResult_) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *TLoadTxnCommitResult_) FastWrite(buf []byte) int { +func (p *TPlsqlPackageResult_) FastWrite(buf []byte) int { return 0 } -func (p *TLoadTxnCommitResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlPackageResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnCommitResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPlsqlPackageResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -22554,9 +44979,9 @@ func (p *TLoadTxnCommitResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift return offset } -func (p *TLoadTxnCommitResult_) BLength() int { +func (p *TPlsqlPackageResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxnCommitResult") + l += bthrift.Binary.StructBeginLength("TPlsqlPackageResult") if p != nil { l += p.field1Length() } @@ -22565,23 +44990,27 @@ func (p *TLoadTxnCommitResult_) BLength() int { return l } -func (p *TLoadTxnCommitResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPlsqlPackageResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } - -func (p *TLoadTxnCommitResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + +func (p *TPlsqlPackageResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TCommitTxnRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMasterTokenRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -22645,132 +45074,6 @@ func (p *TCommitTxnRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -22797,7 +45100,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -22806,7 +45109,7 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCommitTxnRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetMasterTokenRequest) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { @@ -22819,7 +45122,7 @@ func (p *TCommitTxnRequest) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TCommitTxnRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetMasterTokenRequest) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { @@ -22832,472 +45135,117 @@ func (p *TCommitTxnRequest) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TCommitTxnRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Passwd = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TxnId = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField7(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.CommitInfos = make([]*types.TTabletCommitInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTTabletCommitInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.CommitInfos = append(p.CommitInfos, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.AuthCode = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 - - tmp := NewTTxnCommitAttachment() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.TxnCommitAttachment = tmp - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField10(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ThriftRpcTimeoutMs = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField11(buf []byte) (int, error) { +func (p *TGetMasterTokenRequest) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Token = &v - - } - return offset, nil -} - -func (p *TCommitTxnRequest) FastReadField12(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DbId = &v + p.Password = &v } return offset, nil } // for compatibility -func (p *TCommitTxnRequest) FastWrite(buf []byte) int { +func (p *TGetMasterTokenRequest) FastWrite(buf []byte) int { return 0 } -func (p *TCommitTxnRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMasterTokenRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCommitTxnRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMasterTokenRequest") if p != nil { - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCommitTxnRequest) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TCommitTxnRequest") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TCommitTxnRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPasswd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCommitInfos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commit_infos", thrift.LIST, 7) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.CommitInfos { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnCommitAttachment() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_commit_attachment", thrift.STRUCT, 9) - offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetThriftRpcTimeoutMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 10) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 12) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCommitTxnRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCommitTxnRequest) field2Length() int { - l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCommitTxnRequest) field3Length() int { - l := 0 - if p.IsSetPasswd() { - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Passwd) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCommitTxnRequest) field4Length() int { - l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCommitTxnRequest) field5Length() int { - l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCommitTxnRequest) field6Length() int { - l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.TxnId) - - l += bthrift.Binary.FieldEndLength() +func (p *TGetMasterTokenRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetMasterTokenRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TCommitTxnRequest) field7Length() int { - l := 0 - if p.IsSetCommitInfos() { - l += bthrift.Binary.FieldBeginLength("commit_infos", thrift.LIST, 7) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.CommitInfos)) - for _, v := range p.CommitInfos { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *TGetMasterTokenRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TCommitTxnRequest) field8Length() int { - l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 8) - l += bthrift.Binary.I64Length(*p.AuthCode) +func (p *TGetMasterTokenRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TCommitTxnRequest) field9Length() int { - l := 0 - if p.IsSetTxnCommitAttachment() { - l += bthrift.Binary.FieldBeginLength("txn_commit_attachment", thrift.STRUCT, 9) - l += p.TxnCommitAttachment.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TGetMasterTokenRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPassword() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "password", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Password) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TCommitTxnRequest) field10Length() int { +func (p *TGetMasterTokenRequest) field1Length() int { l := 0 - if p.IsSetThriftRpcTimeoutMs() { - l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 10) - l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCommitTxnRequest) field11Length() int { +func (p *TGetMasterTokenRequest) field2Length() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.Token) + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCommitTxnRequest) field12Length() int { +func (p *TGetMasterTokenRequest) field3Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 12) - l += bthrift.Binary.I64Length(*p.DbId) + if p.IsSetPassword() { + l += bthrift.Binary.FieldBeginLength("password", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Password) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCommitTxnResult_) FastRead(buf []byte) (int, error) { +func (p *TGetMasterTokenResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -23333,6 +45281,34 @@ func (p *TCommitTxnResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -23359,7 +45335,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCommitTxnResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -23368,7 +45344,7 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCommitTxnResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetMasterTokenResult_) FastReadField1(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -23381,34 +45357,64 @@ func (p *TCommitTxnResult_) FastReadField1(buf []byte) (int, error) { return offset, nil } +func (p *TGetMasterTokenResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TGetMasterTokenResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterAddress = tmp + return offset, nil +} + // for compatibility -func (p *TCommitTxnResult_) FastWrite(buf []byte) int { +func (p *TGetMasterTokenResult_) FastWrite(buf []byte) int { return 0 } -func (p *TCommitTxnResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMasterTokenResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCommitTxnResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMasterTokenResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCommitTxnResult_) BLength() int { +func (p *TGetMasterTokenResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCommitTxnResult") + l += bthrift.Binary.StructBeginLength("TGetMasterTokenResult") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TCommitTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMasterTokenResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetStatus() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) @@ -23418,7 +45424,28 @@ func (p *TCommitTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.Bin return offset } -func (p *TCommitTxnResult_) field1Length() int { +func (p *TGetMasterTokenResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMasterTokenResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 3) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMasterTokenResult_) field1Length() int { l := 0 if p.IsSetStatus() { l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) @@ -23428,14 +45455,33 @@ func (p *TCommitTxnResult_) field1Length() int { return l } -func (p *TLoadTxn2PCRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMasterTokenResult_) field2Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMasterTokenResult_) field3Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 3) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogLagResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -23453,7 +45499,7 @@ func (p *TLoadTxn2PCRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -23467,13 +45513,12 @@ func (p *TLoadTxn2PCRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetUser = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -23482,125 +45527,12 @@ func (p *TLoadTxn2PCRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetPasswd = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -23628,465 +45560,556 @@ func (p *TLoadTxn2PCRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogLagResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCRequest[fieldId])) } -func (p *TLoadTxn2PCRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetBinlogLagResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v - } + p.Status = tmp return offset, nil } -func (p *TLoadTxn2PCRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetBinlogLagResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.User = v + p.Lag = &v } return offset, nil } -func (p *TLoadTxn2PCRequest) FastReadField3(buf []byte) (int, error) { +func (p *TGetBinlogLagResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l + } + p.MasterAddress = tmp + return offset, nil +} - p.Passwd = v +// for compatibility +func (p *TGetBinlogLagResult_) FastWrite(buf []byte) int { + return 0 +} +func (p *TGetBinlogLagResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBinlogLagResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } - return offset, nil + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TLoadTxn2PCRequest) FastReadField4(buf []byte) (int, error) { +func (p *TGetBinlogLagResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGetBinlogLagResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGetBinlogLagResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v +func (p *TGetBinlogLagResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLag() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lag", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Lag) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TLoadTxn2PCRequest) FastReadField5(buf []byte) (int, error) { +func (p *TGetBinlogLagResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 3) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v +func (p *TGetBinlogLagResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBinlogLagResult_) field2Length() int { + l := 0 + if p.IsSetLag() { + l += bthrift.Binary.FieldBeginLength("lag", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.Lag) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TLoadTxn2PCRequest) FastReadField6(buf []byte) (int, error) { - offset := 0 +func (p *TGetBinlogLagResult_) field3Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 3) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { +func (p *TUpdateFollowerStatsCacheRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - p.TxnId = &v + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateFollowerStatsCacheRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxn2PCRequest) FastReadField7(buf []byte) (int, error) { +func (p *TUpdateFollowerStatsCacheRequest) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Operation = &v + p.Key = &v } return offset, nil } -func (p *TLoadTxn2PCRequest) FastReadField8(buf []byte) (int, error) { +func (p *TUpdateFollowerStatsCacheRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - p.AuthCode = &v - } - return offset, nil -} + p.StatsRows = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l -func (p *TLoadTxn2PCRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 + _elem = v - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v + } + p.StatsRows = append(p.StatsRows, _elem) } - return offset, nil -} - -func (p *TLoadTxn2PCRequest) FastReadField10(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ThriftRpcTimeoutMs = &v - } return offset, nil } -func (p *TLoadTxn2PCRequest) FastReadField11(buf []byte) (int, error) { +func (p *TUpdateFollowerStatsCacheRequest) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Label = &v + p.ColStatsData = &v } return offset, nil } // for compatibility -func (p *TLoadTxn2PCRequest) FastWrite(buf []byte) int { +func (p *TUpdateFollowerStatsCacheRequest) FastWrite(buf []byte) int { return 0 } -func (p *TLoadTxn2PCRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUpdateFollowerStatsCacheRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxn2PCRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUpdateFollowerStatsCacheRequest") if p != nil { - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TLoadTxn2PCRequest) BLength() int { +func (p *TUpdateFollowerStatsCacheRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxn2PCRequest") + l += bthrift.Binary.StructBeginLength("TUpdateFollowerStatsCacheRequest") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxn2PCRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetOperation() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "operation", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Operation) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxn2PCRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUpdateFollowerStatsCacheRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 9) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + if p.IsSetKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxn2PCRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUpdateFollowerStatsCacheRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetThriftRpcTimeoutMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 10) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + if p.IsSetStatsRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "statsRows", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.StatsRows { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxn2PCRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUpdateFollowerStatsCacheRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLabel() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + if p.IsSetColStatsData() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "colStatsData", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColStatsData) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxn2PCRequest) field1Length() int { +func (p *TUpdateFollowerStatsCacheRequest) field1Length() int { l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + if p.IsSetKey() { + l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Key) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxn2PCRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.User) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxn2PCRequest) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Passwd) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxn2PCRequest) field4Length() int { +func (p *TUpdateFollowerStatsCacheRequest) field2Length() int { l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) + if p.IsSetStatsRows() { + l += bthrift.Binary.FieldBeginLength("statsRows", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.StatsRows)) + for _, v := range p.StatsRows { + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxn2PCRequest) field5Length() int { +func (p *TUpdateFollowerStatsCacheRequest) field3Length() int { l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + if p.IsSetColStatsData() { + l += bthrift.Binary.FieldBeginLength("colStatsData", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.ColStatsData) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxn2PCRequest) field6Length() int { - l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.TxnId) +func (p *TInvalidateFollowerStatsCacheRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - l += bthrift.Binary.FieldEndLength() + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInvalidateFollowerStatsCacheRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxn2PCRequest) field7Length() int { - l := 0 - if p.IsSetOperation() { - l += bthrift.Binary.FieldBeginLength("operation", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(*p.Operation) +func (p *TInvalidateFollowerStatsCacheRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Key = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TLoadTxn2PCRequest) field8Length() int { - l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 8) - l += bthrift.Binary.I64Length(*p.AuthCode) +// for compatibility +func (p *TInvalidateFollowerStatsCacheRequest) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *TInvalidateFollowerStatsCacheRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TInvalidateFollowerStatsCacheRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TLoadTxn2PCRequest) field9Length() int { +func (p *TInvalidateFollowerStatsCacheRequest) BLength() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 9) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TInvalidateFollowerStatsCacheRequest") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxn2PCRequest) field10Length() int { - l := 0 - if p.IsSetThriftRpcTimeoutMs() { - l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 10) - l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) +func (p *TInvalidateFollowerStatsCacheRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TLoadTxn2PCRequest) field11Length() int { +func (p *TInvalidateFollowerStatsCacheRequest) field1Length() int { l := 0 - if p.IsSetLabel() { - l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.Label) + if p.IsSetKey() { + l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Key) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxn2PCResult_) FastRead(buf []byte) (int, error) { +func (p *TUpdateFollowerPartitionStatsCacheRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -24104,13 +46127,12 @@ func (p *TLoadTxn2PCResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24138,48 +46160,42 @@ func (p *TLoadTxn2PCResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxn2PCResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateFollowerPartitionStatsCacheRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxn2PCResult_[fieldId])) } -func (p *TLoadTxn2PCResult_) FastReadField1(buf []byte) (int, error) { +func (p *TUpdateFollowerPartitionStatsCacheRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Key = &v + } - p.Status = tmp return offset, nil } // for compatibility -func (p *TLoadTxn2PCResult_) FastWrite(buf []byte) int { +func (p *TUpdateFollowerPartitionStatsCacheRequest) FastWrite(buf []byte) int { return 0 } -func (p *TLoadTxn2PCResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUpdateFollowerPartitionStatsCacheRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxn2PCResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUpdateFollowerPartitionStatsCacheRequest") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -24188,9 +46204,9 @@ func (p *TLoadTxn2PCResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bi return offset } -func (p *TLoadTxn2PCResult_) BLength() int { +func (p *TUpdateFollowerPartitionStatsCacheRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxn2PCResult") + l += bthrift.Binary.StructBeginLength("TUpdateFollowerPartitionStatsCacheRequest") if p != nil { l += p.field1Length() } @@ -24199,23 +46215,29 @@ func (p *TLoadTxn2PCResult_) BLength() int { return l } -func (p *TLoadTxn2PCResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUpdateFollowerPartitionStatsCacheRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TLoadTxn2PCResult_) field1Length() int { +func (p *TUpdateFollowerPartitionStatsCacheRequest) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetKey() { + l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Key) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { +func (p *TAutoIncrementRangeRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -24229,101 +46251,17 @@ func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { for { _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -24335,9 +46273,9 @@ func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 9: + case 2: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -24349,9 +46287,9 @@ func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 10: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField10(buf[offset:]) + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -24363,9 +46301,9 @@ func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -24377,9 +46315,9 @@ func (p *TRollbackTxnRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 12: + case 5: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField12(buf[offset:]) + l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -24417,7 +46355,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -24426,437 +46364,217 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRollbackTxnRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Cluster = &v - - } - return offset, nil -} - -func (p *TRollbackTxnRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.User = &v - - } - return offset, nil -} - -func (p *TRollbackTxnRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Passwd = &v - - } - return offset, nil -} - -func (p *TRollbackTxnRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - - } - return offset, nil -} - -func (p *TRollbackTxnRequest) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v - - } - return offset, nil -} - -func (p *TRollbackTxnRequest) FastReadField6(buf []byte) (int, error) { +func (p *TAutoIncrementRangeRequest) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TxnId = &v + p.DbId = &v } return offset, nil } -func (p *TRollbackTxnRequest) FastReadField7(buf []byte) (int, error) { +func (p *TAutoIncrementRangeRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Reason = &v + p.TableId = &v } return offset, nil } -func (p *TRollbackTxnRequest) FastReadField9(buf []byte) (int, error) { +func (p *TAutoIncrementRangeRequest) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.AuthCode = &v - - } - return offset, nil -} - -func (p *TRollbackTxnRequest) FastReadField10(buf []byte) (int, error) { - offset := 0 + p.ColumnId = &v - tmp := NewTTxnCommitAttachment() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.TxnCommitAttachment = tmp return offset, nil } -func (p *TRollbackTxnRequest) FastReadField11(buf []byte) (int, error) { +func (p *TAutoIncrementRangeRequest) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Token = &v + p.Length = &v } return offset, nil } -func (p *TRollbackTxnRequest) FastReadField12(buf []byte) (int, error) { +func (p *TAutoIncrementRangeRequest) FastReadField5(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v + p.LowerBound = &v } return offset, nil } // for compatibility -func (p *TRollbackTxnRequest) FastWrite(buf []byte) int { +func (p *TAutoIncrementRangeRequest) FastWrite(buf []byte) int { return 0 } -func (p *TRollbackTxnRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRollbackTxnRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAutoIncrementRangeRequest") if p != nil { - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TRollbackTxnRequest) BLength() int { +func (p *TAutoIncrementRangeRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TRollbackTxnRequest") + l += bthrift.Binary.StructBeginLength("TAutoIncrementRangeRequest") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TRollbackTxnRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TRollbackTxnRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TRollbackTxnRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPasswd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TRollbackTxnRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TRollbackTxnRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TRollbackTxnRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRollbackTxnRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetReason() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "reason", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Reason) + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRollbackTxnRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if p.IsSetColumnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_id", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ColumnId) -func (p *TRollbackTxnRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnCommitAttachment() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_commit_attachment", thrift.STRUCT, 10) - offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRollbackTxnRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + if p.IsSetLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "length", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Length) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRollbackTxnRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 12) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + if p.IsSetLowerBound() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lower_bound", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LowerBound) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRollbackTxnRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TRollbackTxnRequest) field2Length() int { - l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TRollbackTxnRequest) field3Length() int { - l := 0 - if p.IsSetPasswd() { - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Passwd) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TRollbackTxnRequest) field4Length() int { - l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TRollbackTxnRequest) field5Length() int { - l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TRollbackTxnRequest) field6Length() int { +func (p *TAutoIncrementRangeRequest) field1Length() int { l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.TxnId) + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.DbId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRollbackTxnRequest) field7Length() int { +func (p *TAutoIncrementRangeRequest) field2Length() int { l := 0 - if p.IsSetReason() { - l += bthrift.Binary.FieldBeginLength("reason", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(*p.Reason) + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TableId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRollbackTxnRequest) field9Length() int { +func (p *TAutoIncrementRangeRequest) field3Length() int { l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.AuthCode) - - l += bthrift.Binary.FieldEndLength() - } - return l -} + if p.IsSetColumnId() { + l += bthrift.Binary.FieldBeginLength("column_id", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.ColumnId) -func (p *TRollbackTxnRequest) field10Length() int { - l := 0 - if p.IsSetTxnCommitAttachment() { - l += bthrift.Binary.FieldBeginLength("txn_commit_attachment", thrift.STRUCT, 10) - l += p.TxnCommitAttachment.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRollbackTxnRequest) field11Length() int { +func (p *TAutoIncrementRangeRequest) field4Length() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.Token) + if p.IsSetLength() { + l += bthrift.Binary.FieldBeginLength("length", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.Length) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRollbackTxnRequest) field12Length() int { +func (p *TAutoIncrementRangeRequest) field5Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 12) - l += bthrift.Binary.I64Length(*p.DbId) + if p.IsSetLowerBound() { + l += bthrift.Binary.FieldBeginLength("lower_bound", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.LowerBound) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRollbackTxnResult_) FastRead(buf []byte) (int, error) { +func (p *TAutoIncrementRangeResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -24892,6 +46610,48 @@ func (p *TRollbackTxnResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24918,7 +46678,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRollbackTxnResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -24927,7 +46687,7 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRollbackTxnResult_) FastReadField1(buf []byte) (int, error) { +func (p *TAutoIncrementRangeResult_) FastReadField1(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -24940,64 +46700,168 @@ func (p *TRollbackTxnResult_) FastReadField1(buf []byte) (int, error) { return offset, nil } +func (p *TAutoIncrementRangeResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Start = &v + + } + return offset, nil +} + +func (p *TAutoIncrementRangeResult_) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Length = &v + + } + return offset, nil +} + +func (p *TAutoIncrementRangeResult_) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterAddress = tmp + return offset, nil +} + // for compatibility -func (p *TRollbackTxnResult_) FastWrite(buf []byte) int { +func (p *TAutoIncrementRangeResult_) FastWrite(buf []byte) int { return 0 } -func (p *TRollbackTxnResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAutoIncrementRangeResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAutoIncrementRangeResult") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TAutoIncrementRangeResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TAutoIncrementRangeResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TAutoIncrementRangeResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAutoIncrementRangeResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStart() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "start", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Start) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAutoIncrementRangeResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "length", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Length) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAutoIncrementRangeResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRollbackTxnResult") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 4) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TRollbackTxnResult_) BLength() int { +func (p *TAutoIncrementRangeResult_) field1Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("TRollbackTxnResult") - if p != nil { - l += p.field1Length() + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() return l } -func (p *TRollbackTxnResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TAutoIncrementRangeResult_) field2Length() int { + l := 0 + if p.IsSetStart() { + l += bthrift.Binary.FieldBeginLength("start", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.Start) + + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TRollbackTxnResult_) field1Length() int { +func (p *TAutoIncrementRangeResult_) field3Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() + if p.IsSetLength() { + l += bthrift.Binary.FieldBeginLength("length", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.Length) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { +func (p *TAutoIncrementRangeResult_) field4Length() int { + l := 0 + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 4) + l += p.MasterAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreatePartitionRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false - var issetDb bool = false - var issetTbl bool = false - var issetTxnId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25015,7 +46879,7 @@ func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -25029,13 +46893,12 @@ func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetUser = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25044,13 +46907,12 @@ func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetPasswd = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25059,13 +46921,12 @@ func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetDb = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25080,120 +46941,6 @@ func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetTbl = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetTxnId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField13(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25221,209 +46968,463 @@ func (p *TLoadTxnRollbackRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetDb { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetTbl { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetTxnId { - fieldId = 7 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackRequest[fieldId])) } -func (p *TLoadTxnRollbackRequest) FastReadField1(buf []byte) (int, error) { +func (p *TCreatePartitionRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v + p.TxnId = &v } return offset, nil } -func (p *TLoadTxnRollbackRequest) FastReadField2(buf []byte) (int, error) { +func (p *TCreatePartitionRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.DbId = &v - p.User = v + } + return offset, nil +} + +func (p *TCreatePartitionRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v } return offset, nil } -func (p *TLoadTxnRollbackRequest) FastReadField3(buf []byte) (int, error) { +func (p *TCreatePartitionRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionValues = make([][]*exprs.TNullableStringLiteral, 0, size) + for i := 0; i < size; i++ { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _elem := make([]*exprs.TNullableStringLiteral, 0, size) + for i := 0; i < size; i++ { + _elem1 := exprs.NewTNullableStringLiteral() + if l, err := _elem1.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PartitionValues = append(p.PartitionValues, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TCreatePartitionRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeEndpoint = &v + + } + return offset, nil +} + +// for compatibility +func (p *TCreatePartitionRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TCreatePartitionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreatePartitionRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TCreatePartitionRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TCreatePartitionRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TCreatePartitionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreatePartitionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreatePartitionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreatePartitionRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionValues() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitionValues", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) + var length int + for _, v := range p.PartitionValues { + length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreatePartitionRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeEndpoint() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_endpoint", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BeEndpoint) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCreatePartitionRequest) field1Length() int { + l := 0 + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.TxnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreatePartitionRequest) field2Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreatePartitionRequest) field3Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreatePartitionRequest) field4Length() int { + l := 0 + if p.IsSetPartitionValues() { + l += bthrift.Binary.FieldBeginLength("partitionValues", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.PartitionValues)) + for _, v := range p.PartitionValues { + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreatePartitionRequest) field5Length() int { + l := 0 + if p.IsSetBeEndpoint() { + l += bthrift.Binary.FieldBeginLength("be_endpoint", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.BeEndpoint) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCreatePartitionResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l - - p.Passwd = v - + if err != nil { + goto ReadFieldEndError + } } - return offset, nil -} - -func (p *TLoadTxnRollbackRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Db = v - + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset, nil -} - -func (p *TLoadTxnRollbackRequest) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Tbl = v - - } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TLoadTxnRollbackRequest) FastReadField6(buf []byte) (int, error) { +func (p *TCreatePartitionResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.UserIp = &v - } + p.Status = tmp return offset, nil } -func (p *TLoadTxnRollbackRequest) FastReadField7(buf []byte) (int, error) { +func (p *TCreatePartitionResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - - p.TxnId = v - } - return offset, nil -} - -func (p *TLoadTxnRollbackRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Reason = &v + p.Partitions = make([]*descriptors.TOlapTablePartition, 0, size) + for i := 0; i < size; i++ { + _elem := descriptors.NewTOlapTablePartition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Partitions = append(p.Partitions, _elem) } - return offset, nil -} - -func (p *TLoadTxnRollbackRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.AuthCode = &v - } return offset, nil } -func (p *TLoadTxnRollbackRequest) FastReadField10(buf []byte) (int, error) { +func (p *TCreatePartitionResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - tmp := NewTTxnCommitAttachment() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l } - p.TxnCommitAttachment = tmp - return offset, nil -} - -func (p *TLoadTxnRollbackRequest) FastReadField11(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v + p.Tablets = make([]*descriptors.TTabletLocation, 0, size) + for i := 0; i < size; i++ { + _elem := descriptors.NewTTabletLocation() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Tablets = append(p.Tablets, _elem) } - return offset, nil -} - -func (p *TLoadTxnRollbackRequest) FastReadField12(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v - } return offset, nil } -func (p *TLoadTxnRollbackRequest) FastReadField13(buf []byte) (int, error) { +func (p *TCreatePartitionResult_) FastReadField4(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -25431,19 +47432,16 @@ func (p *TLoadTxnRollbackRequest) FastReadField13(buf []byte) (int, error) { if err != nil { return offset, err } - p.Tbls = make([]string, 0, size) + p.Nodes = make([]*descriptors.TNodeInfo, 0, size) for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem := descriptors.NewTNodeInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _elem = v - } - p.Tbls = append(p.Tbls, _elem) + p.Nodes = append(p.Nodes, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -25454,325 +47452,147 @@ func (p *TLoadTxnRollbackRequest) FastReadField13(buf []byte) (int, error) { } // for compatibility -func (p *TLoadTxnRollbackRequest) FastWrite(buf []byte) int { +func (p *TCreatePartitionResult_) FastWrite(buf []byte) int { return 0 } -func (p *TLoadTxnRollbackRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCreatePartitionResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnRollbackRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreatePartitionResult") if p != nil { - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField13(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TLoadTxnRollbackRequest) BLength() int { +func (p *TCreatePartitionResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxnRollbackRequest") + l += bthrift.Binary.StructBeginLength("TCreatePartitionResult") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() - l += p.field13Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxnRollbackRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Tbl) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetReason() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "reason", thrift.STRING, 8) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Reason) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetAuthCode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_code", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.AuthCode) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TLoadTxnRollbackRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCreatePartitionResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnCommitAttachment() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnCommitAttachment", thrift.STRUCT, 10) - offset += p.TxnCommitAttachment.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnRollbackRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCreatePartitionResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - + if p.IsSetPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Partitions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnRollbackRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCreatePartitionResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 12) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - + if p.IsSetTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Tablets { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnRollbackRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCreatePartitionResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTbls() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbls", thrift.LIST, 13) + if p.IsSetNodes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "nodes", thrift.LIST, 4) listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.Tbls { + for _, v := range p.Nodes { length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TLoadTxnRollbackRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TLoadTxnRollbackRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.User) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnRollbackRequest) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Passwd) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnRollbackRequest) field4Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(p.Db) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnRollbackRequest) field5Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(p.Tbl) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnRollbackRequest) field6Length() int { - l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TLoadTxnRollbackRequest) field7Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 7) - l += bthrift.Binary.I64Length(p.TxnId) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TLoadTxnRollbackRequest) field8Length() int { - l := 0 - if p.IsSetReason() { - l += bthrift.Binary.FieldBeginLength("reason", thrift.STRING, 8) - l += bthrift.Binary.StringLengthNocopy(*p.Reason) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TLoadTxnRollbackRequest) field9Length() int { - l := 0 - if p.IsSetAuthCode() { - l += bthrift.Binary.FieldBeginLength("auth_code", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.AuthCode) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TLoadTxnRollbackRequest) field10Length() int { +func (p *TCreatePartitionResult_) field1Length() int { l := 0 - if p.IsSetTxnCommitAttachment() { - l += bthrift.Binary.FieldBeginLength("txnCommitAttachment", thrift.STRUCT, 10) - l += p.TxnCommitAttachment.BLength() + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnRollbackRequest) field11Length() int { +func (p *TCreatePartitionResult_) field2Length() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - + if p.IsSetPartitions() { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) + for _, v := range p.Partitions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnRollbackRequest) field12Length() int { +func (p *TCreatePartitionResult_) field3Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 12) - l += bthrift.Binary.I64Length(*p.DbId) - + if p.IsSetTablets() { + l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tablets)) + for _, v := range p.Tablets { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TLoadTxnRollbackRequest) field13Length() int { +func (p *TCreatePartitionResult_) field4Length() int { l := 0 - if p.IsSetTbls() { - l += bthrift.Binary.FieldBeginLength("tbls", thrift.LIST, 13) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Tbls)) - for _, v := range p.Tbls { - l += bthrift.Binary.StringLengthNocopy(v) - + if p.IsSetNodes() { + l += bthrift.Binary.FieldBeginLength("nodes", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Nodes)) + for _, v := range p.Nodes { + l += v.BLength() } l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() @@ -25780,13 +47600,12 @@ func (p *TLoadTxnRollbackRequest) field13Length() int { return l } -func (p *TLoadTxnRollbackResult_) FastRead(buf []byte) (int, error) { +func (p *TReplacePartitionRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25804,13 +47623,68 @@ func (p *TLoadTxnRollbackResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25838,92 +47712,264 @@ func (p *TLoadTxnRollbackResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLoadTxnRollbackResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReplacePartitionRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TLoadTxnRollbackResult_[fieldId])) } -func (p *TLoadTxnRollbackResult_) FastReadField1(buf []byte) (int, error) { +func (p *TReplacePartitionRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.OverwriteGroupId = &v + + } + return offset, nil +} + +func (p *TReplacePartitionRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v + + } + return offset, nil +} + +func (p *TReplacePartitionRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +func (p *TReplacePartitionRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionIds = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.PartitionIds = append(p.PartitionIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TReplacePartitionRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeEndpoint = &v + } - p.Status = tmp return offset, nil } // for compatibility -func (p *TLoadTxnRollbackResult_) FastWrite(buf []byte) int { +func (p *TReplacePartitionRequest) FastWrite(buf []byte) int { return 0 } -func (p *TLoadTxnRollbackResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReplacePartitionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLoadTxnRollbackResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TReplacePartitionRequest") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TLoadTxnRollbackResult_) BLength() int { +func (p *TReplacePartitionRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TLoadTxnRollbackResult") + l += bthrift.Binary.StructBeginLength("TReplacePartitionRequest") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TLoadTxnRollbackResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReplacePartitionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetOverwriteGroupId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite_group_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.OverwriteGroupId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TLoadTxnRollbackResult_) field1Length() int { +func (p *TReplacePartitionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TReplacePartitionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TReplacePartitionRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_ids", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.PartitionIds { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TReplacePartitionRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeEndpoint() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_endpoint", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BeEndpoint) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TReplacePartitionRequest) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetOverwriteGroupId() { + l += bthrift.Binary.FieldBeginLength("overwrite_group_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.OverwriteGroupId) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { +func (p *TReplacePartitionRequest) field2Length() int { + l := 0 + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.DbId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionRequest) field3Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionRequest) field4Length() int { + l := 0 + if p.IsSetPartitionIds() { + l += bthrift.Binary.FieldBeginLength("partition_ids", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.PartitionIds)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.PartitionIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionRequest) field5Length() int { + l := 0 + if p.IsSetBeEndpoint() { + l += bthrift.Binary.FieldBeginLength("be_endpoint", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.BeEndpoint) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetJobId bool = false - var issetTaskId bool = false - var issetTaskType bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25941,13 +47987,12 @@ func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetJobId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25956,13 +48001,12 @@ func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTaskId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25971,13 +48015,12 @@ func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTaskType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25986,7 +48029,7 @@ func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -25999,20 +48042,6 @@ func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26033,246 +48062,403 @@ func (p *TSnapshotLoaderReportRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetJobId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetTaskId { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetTaskType { - fieldId = 3 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotLoaderReportRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReplacePartitionResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotLoaderReportRequest[fieldId])) } -func (p *TSnapshotLoaderReportRequest) FastReadField1(buf []byte) (int, error) { +func (p *TReplacePartitionResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.JobId = v - } + p.Status = tmp return offset, nil } -func (p *TSnapshotLoaderReportRequest) FastReadField2(buf []byte) (int, error) { +func (p *TReplacePartitionResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - - p.TaskId = v - } - return offset, nil -} - -func (p *TSnapshotLoaderReportRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 + p.Partitions = make([]*descriptors.TOlapTablePartition, 0, size) + for i := 0; i < size; i++ { + _elem := descriptors.NewTOlapTablePartition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + p.Partitions = append(p.Partitions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.TaskType = types.TTaskType(v) - } return offset, nil } -func (p *TSnapshotLoaderReportRequest) FastReadField4(buf []byte) (int, error) { +func (p *TReplacePartitionResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tablets = make([]*descriptors.TTabletLocation, 0, size) + for i := 0; i < size; i++ { + _elem := descriptors.NewTTabletLocation() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Tablets = append(p.Tablets, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FinishedNum = &v - } return offset, nil } -func (p *TSnapshotLoaderReportRequest) FastReadField5(buf []byte) (int, error) { +func (p *TReplacePartitionResult_) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Nodes = make([]*descriptors.TNodeInfo, 0, size) + for i := 0; i < size; i++ { + _elem := descriptors.NewTNodeInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Nodes = append(p.Nodes, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TotalNum = &v - } return offset, nil } // for compatibility -func (p *TSnapshotLoaderReportRequest) FastWrite(buf []byte) int { +func (p *TReplacePartitionResult_) FastWrite(buf []byte) int { return 0 } -func (p *TSnapshotLoaderReportRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReplacePartitionResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSnapshotLoaderReportRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TReplacePartitionResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TSnapshotLoaderReportRequest) BLength() int { +func (p *TReplacePartitionResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TSnapshotLoaderReportRequest") + l += bthrift.Binary.StructBeginLength("TReplacePartitionResult") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() - l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TSnapshotLoaderReportRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReplacePartitionResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId) + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TReplacePartitionResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Partitions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TSnapshotLoaderReportRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReplacePartitionResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], p.TaskId) + if p.IsSetTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Tablets { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TReplacePartitionResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNodes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "nodes", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Nodes { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TSnapshotLoaderReportRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReplacePartitionResult_) field1Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionResult_) field2Length() int { + l := 0 + if p.IsSetPartitions() { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) + for _, v := range p.Partitions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionResult_) field3Length() int { + l := 0 + if p.IsSetTablets() { + l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tablets)) + for _, v := range p.Tablets { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReplacePartitionResult_) field4Length() int { + l := 0 + if p.IsSetNodes() { + l += bthrift.Binary.FieldBeginLength("nodes", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Nodes)) + for _, v := range p.Nodes { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaReplica) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaReplica[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetMetaReplica) FastReadField1(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_type", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.TaskType)) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TSnapshotLoaderReportRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFinishedNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "finished_num", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.FinishedNum) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Id = &v - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset, nil } -func (p *TSnapshotLoaderReportRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTotalNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_num", thrift.I32, 5) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.TotalNum) +// for compatibility +func (p *TGetMetaReplica) FastWrite(buf []byte) int { + return 0 +} - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TGetMetaReplica) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaReplica") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TSnapshotLoaderReportRequest) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(p.JobId) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TSnapshotLoaderReportRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("task_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(p.TaskId) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TSnapshotLoaderReportRequest) field3Length() int { +func (p *TGetMetaReplica) BLength() int { l := 0 - l += bthrift.Binary.FieldBeginLength("task_type", thrift.I32, 3) - l += bthrift.Binary.I32Length(int32(p.TaskType)) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TGetMetaReplica") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TSnapshotLoaderReportRequest) field4Length() int { - l := 0 - if p.IsSetFinishedNum() { - l += bthrift.Binary.FieldBeginLength("finished_num", thrift.I32, 4) - l += bthrift.Binary.I32Length(*p.FinishedNum) +func (p *TGetMetaReplica) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TSnapshotLoaderReportRequest) field5Length() int { +func (p *TGetMetaReplica) field1Length() int { l := 0 - if p.IsSetTotalNum() { - l += bthrift.Binary.FieldBeginLength("total_num", thrift.I32, 5) - l += bthrift.Binary.I32Length(*p.TotalNum) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFrontendPingFrontendRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMetaTablet) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetClusterId bool = false - var issetToken bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -26290,13 +48476,12 @@ func (p *TFrontendPingFrontendRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetClusterId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26305,13 +48490,12 @@ func (p *TFrontendPingFrontendRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetToken = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26339,68 +48523,69 @@ func (p *TFrontendPingFrontendRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetClusterId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetToken { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTablet[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendRequest[fieldId])) } -func (p *TFrontendPingFrontendRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaTablet) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.ClusterId = v + p.Id = &v } return offset, nil } -func (p *TFrontendPingFrontendRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaTablet) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Replicas = make([]*TGetMetaReplica, 0, size) + for i := 0; i < size; i++ { + _elem := NewTGetMetaReplica() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Replicas = append(p.Replicas, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Token = v - } return offset, nil } // for compatibility -func (p *TFrontendPingFrontendRequest) FastWrite(buf []byte) int { +func (p *TGetMetaTablet) FastWrite(buf []byte) int { return 0 } -func (p *TFrontendPingFrontendRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTablet) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendPingFrontendRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaTablet") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) @@ -26410,9 +48595,9 @@ func (p *TFrontendPingFrontendRequest) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *TFrontendPingFrontendRequest) BLength() int { +func (p *TGetMetaTablet) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFrontendPingFrontendRequest") + l += bthrift.Binary.StructBeginLength("TGetMetaTablet") if p != nil { l += p.field1Length() l += p.field2Length() @@ -26422,56 +48607,66 @@ func (p *TFrontendPingFrontendRequest) BLength() int { return l } -func (p *TFrontendPingFrontendRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTablet) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clusterId", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], p.ClusterId) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFrontendPingFrontendRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTablet) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetReplicas() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replicas", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Replicas { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFrontendPingFrontendRequest) field1Length() int { +func (p *TGetMetaTablet) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("clusterId", thrift.I32, 1) - l += bthrift.Binary.I32Length(p.ClusterId) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TFrontendPingFrontendRequest) field2Length() int { +func (p *TGetMetaTablet) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.Token) - - l += bthrift.Binary.FieldEndLength() + if p.IsSetReplicas() { + l += bthrift.Binary.FieldBeginLength("replicas", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Replicas)) + for _, v := range p.Replicas { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TDiskInfo) FastRead(buf []byte) (int, error) { +func (p *TGetMetaIndex) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetDirType bool = false - var issetDir bool = false - var issetFilesystem bool = false - var issetBlocks bool = false - var issetUsed bool = false - var issetAvailable bool = false - var issetUseRate bool = false - var issetMountedOn bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -26489,13 +48684,12 @@ func (p *TDiskInfo) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetDirType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26510,7 +48704,6 @@ func (p *TDiskInfo) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetDir = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26519,88 +48712,12 @@ func (p *TDiskInfo) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetFilesystem = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetBlocks = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetUsed = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetAvailable = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetUseRate = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetMountedOn = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26628,371 +48745,187 @@ func (p *TDiskInfo) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetDirType { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetDir { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetFilesystem { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetBlocks { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetUsed { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetAvailable { - fieldId = 6 - goto RequiredFieldNotSetError - } - - if !issetUseRate { - fieldId = 7 - goto RequiredFieldNotSetError - } - - if !issetMountedOn { - fieldId = 8 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDiskInfo[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaIndex[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TDiskInfo[fieldId])) -} - -func (p *TDiskInfo) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.DirType = v - - } - return offset, nil -} - -func (p *TDiskInfo) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Dir = v - - } - return offset, nil -} - -func (p *TDiskInfo) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Filesystem = v - - } - return offset, nil } -func (p *TDiskInfo) FastReadField4(buf []byte) (int, error) { +func (p *TGetMetaIndex) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Blocks = v + p.Id = &v } return offset, nil } -func (p *TDiskInfo) FastReadField5(buf []byte) (int, error) { +func (p *TGetMetaIndex) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Used = v + p.Name = &v } return offset, nil } -func (p *TDiskInfo) FastReadField6(buf []byte) (int, error) { +func (p *TGetMetaIndex) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l - - p.Available = v - } - return offset, nil -} - -func (p *TDiskInfo) FastReadField7(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.UseRate = v + p.Tablets = make([]*TGetMetaTablet, 0, size) + for i := 0; i < size; i++ { + _elem := NewTGetMetaTablet() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Tablets = append(p.Tablets, _elem) } - return offset, nil -} - -func (p *TDiskInfo) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.MountedOn = v - } return offset, nil } // for compatibility -func (p *TDiskInfo) FastWrite(buf []byte) int { +func (p *TGetMetaIndex) FastWrite(buf []byte) int { return 0 } -func (p *TDiskInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaIndex) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDiskInfo") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaIndex") if p != nil { - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TDiskInfo) BLength() int { +func (p *TGetMetaIndex) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TDiskInfo") + l += bthrift.Binary.StructBeginLength("TGetMetaIndex") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TDiskInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dirType", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.DirType) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dir", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Dir) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filesystem", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Filesystem) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "blocks", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], p.Blocks) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "used", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], p.Used) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "available", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], p.Available) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "useRate", thrift.I32, 7) - offset += bthrift.Binary.WriteI32(buf[offset:], p.UseRate) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mountedOn", thrift.STRING, 8) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.MountedOn) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TDiskInfo) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("dirType", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.DirType) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TDiskInfo) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("dir", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.Dir) - - l += bthrift.Binary.FieldEndLength() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TDiskInfo) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("filesystem", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Filesystem) +func (p *TGetMetaIndex) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) - l += bthrift.Binary.FieldEndLength() - return l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TDiskInfo) field4Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("blocks", thrift.I64, 4) - l += bthrift.Binary.I64Length(p.Blocks) +func (p *TGetMetaIndex) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) - l += bthrift.Binary.FieldEndLength() - return l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TDiskInfo) field5Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("used", thrift.I64, 5) - l += bthrift.Binary.I64Length(p.Used) - - l += bthrift.Binary.FieldEndLength() - return l +func (p *TGetMetaIndex) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Tablets { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset } -func (p *TDiskInfo) field6Length() int { +func (p *TGetMetaIndex) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("available", thrift.I64, 6) - l += bthrift.Binary.I64Length(p.Available) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TDiskInfo) field7Length() int { +func (p *TGetMetaIndex) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("useRate", thrift.I32, 7) - l += bthrift.Binary.I32Length(p.UseRate) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TDiskInfo) field8Length() int { +func (p *TGetMetaIndex) field3Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("mountedOn", thrift.STRING, 8) - l += bthrift.Binary.StringLengthNocopy(p.MountedOn) - - l += bthrift.Binary.FieldEndLength() + if p.IsSetTablets() { + l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tablets)) + for _, v := range p.Tablets { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false - var issetMsg bool = false - var issetQueryPort bool = false - var issetRpcPort bool = false - var issetReplayedJournalId bool = false - var issetVersion bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -27010,13 +48943,12 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -27031,7 +48963,6 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetMsg = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -27040,13 +48971,12 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetQueryPort = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -27055,13 +48985,12 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetRpcPort = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -27070,13 +48999,12 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { } } case 5: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetReplayedJournalId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -27085,65 +49013,8 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { } } case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetVersion = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: if fieldTypeId == thrift.LIST { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField10(buf[offset:]) + l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -27175,150 +49046,87 @@ func (p *TFrontendPingFrontendResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetMsg { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetQueryPort { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetRpcPort { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetReplayedJournalId { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetVersion { - fieldId = 6 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendPingFrontendResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaPartition[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFrontendPingFrontendResult_[fieldId])) } -func (p *TFrontendPingFrontendResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Status = TFrontendPingFrontendStatusCode(v) + p.Id = &v } return offset, nil } -func (p *TFrontendPingFrontendResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Msg = v - - } - return offset, nil -} - -func (p *TFrontendPingFrontendResult_) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.QueryPort = v - - } - return offset, nil -} - -func (p *TFrontendPingFrontendResult_) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.RpcPort = v + p.Name = &v } return offset, nil } -func (p *TFrontendPingFrontendResult_) FastReadField5(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.ReplayedJournalId = v + p.Key = &v } return offset, nil } -func (p *TFrontendPingFrontendResult_) FastReadField6(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastReadField4(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Version = v + p.Range = &v } return offset, nil } -func (p *TFrontendPingFrontendResult_) FastReadField7(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LastStartupTime = &v + p.IsTemp = &v } return offset, nil } -func (p *TFrontendPingFrontendResult_) FastReadField8(buf []byte) (int, error) { +func (p *TGetMetaPartition) FastReadField6(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -27326,16 +49134,16 @@ func (p *TFrontendPingFrontendResult_) FastReadField8(buf []byte) (int, error) { if err != nil { return offset, err } - p.DiskInfos = make([]*TDiskInfo, 0, size) + p.Indexes = make([]*TGetMetaIndex, 0, size) for i := 0; i < size; i++ { - _elem := NewTDiskInfo() + _elem := NewTGetMetaIndex() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.DiskInfos = append(p.DiskInfos, _elem) + p.Indexes = append(p.Indexes, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -27345,60 +49153,30 @@ func (p *TFrontendPingFrontendResult_) FastReadField8(buf []byte) (int, error) { return offset, nil } -func (p *TFrontendPingFrontendResult_) FastReadField9(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ProcessUUID = &v - - } - return offset, nil -} - -func (p *TFrontendPingFrontendResult_) FastReadField10(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ArrowFlightSqlPort = &v - - } - return offset, nil -} - // for compatibility -func (p *TFrontendPingFrontendResult_) FastWrite(buf []byte) int { +func (p *TGetMetaPartition) FastWrite(buf []byte) int { return 0 } -func (p *TFrontendPingFrontendResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendPingFrontendResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaPartition") if p != nil { - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFrontendPingFrontendResult_) BLength() int { +func (p *TGetMetaPartition) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFrontendPingFrontendResult") + l += bthrift.Binary.StructBeginLength("TGetMetaPartition") if p != nil { l += p.field1Length() l += p.field2Length() @@ -27406,89 +49184,75 @@ func (p *TFrontendPingFrontendResult_) BLength() int { l += p.field4Length() l += p.field5Length() l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFrontendPingFrontendResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Status)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TFrontendPingFrontendResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "msg", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Msg) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TFrontendPingFrontendResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queryPort", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], p.QueryPort) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFrontendPingFrontendResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rpcPort", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], p.RpcPort) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFrontendPingFrontendResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replayedJournalId", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], p.ReplayedJournalId) + if p.IsSetKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFrontendPingFrontendResult_) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Version) + if p.IsSetRange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "range", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Range) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFrontendPingFrontendResult_) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLastStartupTime() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lastStartupTime", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LastStartupTime) + if p.IsSetIsTemp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_temp", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsTemp) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFrontendPingFrontendResult_) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartition) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDiskInfos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "diskInfos", thrift.LIST, 8) + if p.IsSetIndexes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "indexes", thrift.LIST, 6) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.DiskInfos { + for _, v := range p.Indexes { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -27499,130 +49263,76 @@ func (p *TFrontendPingFrontendResult_) fastWriteField8(buf []byte, binaryWriter return offset } -func (p *TFrontendPingFrontendResult_) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetProcessUUID() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "processUUID", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ProcessUUID) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFrontendPingFrontendResult_) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetArrowFlightSqlPort() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "arrowFlightSqlPort", thrift.I32, 10) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.ArrowFlightSqlPort) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFrontendPingFrontendResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.Status)) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFrontendPingFrontendResult_) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("msg", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.Msg) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFrontendPingFrontendResult_) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("queryPort", thrift.I32, 3) - l += bthrift.Binary.I32Length(p.QueryPort) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFrontendPingFrontendResult_) field4Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("rpcPort", thrift.I32, 4) - l += bthrift.Binary.I32Length(p.RpcPort) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFrontendPingFrontendResult_) field5Length() int { +func (p *TGetMetaPartition) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("replayedJournalId", thrift.I64, 5) - l += bthrift.Binary.I64Length(p.ReplayedJournalId) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TFrontendPingFrontendResult_) field6Length() int { +func (p *TGetMetaPartition) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("version", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(p.Version) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TFrontendPingFrontendResult_) field7Length() int { +func (p *TGetMetaPartition) field3Length() int { l := 0 - if p.IsSetLastStartupTime() { - l += bthrift.Binary.FieldBeginLength("lastStartupTime", thrift.I64, 7) - l += bthrift.Binary.I64Length(*p.LastStartupTime) + if p.IsSetKey() { + l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Key) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFrontendPingFrontendResult_) field8Length() int { +func (p *TGetMetaPartition) field4Length() int { l := 0 - if p.IsSetDiskInfos() { - l += bthrift.Binary.FieldBeginLength("diskInfos", thrift.LIST, 8) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DiskInfos)) - for _, v := range p.DiskInfos { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetRange() { + l += bthrift.Binary.FieldBeginLength("range", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Range) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFrontendPingFrontendResult_) field9Length() int { +func (p *TGetMetaPartition) field5Length() int { l := 0 - if p.IsSetProcessUUID() { - l += bthrift.Binary.FieldBeginLength("processUUID", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.ProcessUUID) + if p.IsSetIsTemp() { + l += bthrift.Binary.FieldBeginLength("is_temp", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.IsTemp) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFrontendPingFrontendResult_) field10Length() int { +func (p *TGetMetaPartition) field6Length() int { l := 0 - if p.IsSetArrowFlightSqlPort() { - l += bthrift.Binary.FieldBeginLength("arrowFlightSqlPort", thrift.I32, 10) - l += bthrift.Binary.I32Length(*p.ArrowFlightSqlPort) - + if p.IsSetIndexes() { + l += bthrift.Binary.FieldBeginLength("indexes", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Indexes)) + for _, v := range p.Indexes { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPropertyVal) FastRead(buf []byte) (int, error) { +func (p *TGetMetaTable) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -27645,7 +49355,7 @@ func (p *TPropertyVal) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -27659,7 +49369,7 @@ func (p *TPropertyVal) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -27673,7 +49383,7 @@ func (p *TPropertyVal) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -27687,7 +49397,7 @@ func (p *TPropertyVal) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -27726,7 +49436,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPropertyVal[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTable[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -27735,80 +49445,94 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPropertyVal) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaTable) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.StrVal = &v + p.Id = &v } return offset, nil } -func (p *TPropertyVal) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaTable) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.IntVal = &v + p.Name = &v } return offset, nil } -func (p *TPropertyVal) FastReadField3(buf []byte) (int, error) { +func (p *TGetMetaTable) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LongVal = &v + p.InTrash = &v } return offset, nil } -func (p *TPropertyVal) FastReadField4(buf []byte) (int, error) { +func (p *TGetMetaTable) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Partitions = make([]*TGetMetaPartition, 0, size) + for i := 0; i < size; i++ { + _elem := NewTGetMetaPartition() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Partitions = append(p.Partitions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BoolVal = &v - } return offset, nil } // for compatibility -func (p *TPropertyVal) FastWrite(buf []byte) int { +func (p *TGetMetaTable) FastWrite(buf []byte) int { return 0 } -func (p *TPropertyVal) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPropertyVal") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaTable") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPropertyVal) BLength() int { +func (p *TGetMetaTable) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPropertyVal") + l += bthrift.Binary.StructBeginLength("TGetMetaTable") if p != nil { l += p.field1Length() l += p.field2Length() @@ -27820,95 +49544,105 @@ func (p *TPropertyVal) BLength() int { return l } -func (p *TPropertyVal) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTable) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStrVal() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strVal", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StrVal) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPropertyVal) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTable) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetIntVal() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "intVal", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.IntVal) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPropertyVal) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTable) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLongVal() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "longVal", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LongVal) + if p.IsSetInTrash() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "in_trash", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.InTrash) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPropertyVal) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTable) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetBoolVal() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "boolVal", thrift.BOOL, 4) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.BoolVal) - + if p.IsSetPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Partitions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPropertyVal) field1Length() int { +func (p *TGetMetaTable) field1Length() int { l := 0 - if p.IsSetStrVal() { - l += bthrift.Binary.FieldBeginLength("strVal", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.StrVal) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPropertyVal) field2Length() int { +func (p *TGetMetaTable) field2Length() int { l := 0 - if p.IsSetIntVal() { - l += bthrift.Binary.FieldBeginLength("intVal", thrift.I32, 2) - l += bthrift.Binary.I32Length(*p.IntVal) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPropertyVal) field3Length() int { +func (p *TGetMetaTable) field3Length() int { l := 0 - if p.IsSetLongVal() { - l += bthrift.Binary.FieldBeginLength("longVal", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.LongVal) + if p.IsSetInTrash() { + l += bthrift.Binary.FieldBeginLength("in_trash", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(*p.InTrash) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPropertyVal) field4Length() int { +func (p *TGetMetaTable) field4Length() int { l := 0 - if p.IsSetBoolVal() { - l += bthrift.Binary.FieldBeginLength("boolVal", thrift.BOOL, 4) - l += bthrift.Binary.BoolLength(*p.BoolVal) - + if p.IsSetPartitions() { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) + for _, v := range p.Partitions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TWaitingTxnStatusRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMetaDB) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -27945,7 +49679,7 @@ func (p *TWaitingTxnStatusRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -27959,7 +49693,7 @@ func (p *TWaitingTxnStatusRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -27972,6 +49706,20 @@ func (p *TWaitingTxnStatusRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -27998,7 +49746,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaDB[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -28007,143 +49755,204 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TWaitingTxnStatusRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaDB) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v + p.Id = &v } return offset, nil } -func (p *TWaitingTxnStatusRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaDB) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TxnId = &v + p.Name = &v } return offset, nil } -func (p *TWaitingTxnStatusRequest) FastReadField3(buf []byte) (int, error) { +func (p *TGetMetaDB) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Label = &v + p.OnlyTableNames = &v + + } + return offset, nil +} + +func (p *TGetMetaDB) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Tables = make([]*TGetMetaTable, 0, size) + for i := 0; i < size; i++ { + _elem := NewTGetMetaTable() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Tables = append(p.Tables, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } return offset, nil } // for compatibility -func (p *TWaitingTxnStatusRequest) FastWrite(buf []byte) int { +func (p *TGetMetaDB) FastWrite(buf []byte) int { return 0 } -func (p *TWaitingTxnStatusRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDB) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWaitingTxnStatusRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaDB") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TWaitingTxnStatusRequest) BLength() int { +func (p *TGetMetaDB) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TWaitingTxnStatusRequest") + l += bthrift.Binary.StructBeginLength("TGetMetaDB") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TWaitingTxnStatusRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDB) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TWaitingTxnStatusRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDB) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TWaitingTxnStatusRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDB) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLabel() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) + if p.IsSetOnlyTableNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "only_table_names", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.OnlyTableNames) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TWaitingTxnStatusRequest) field1Length() int { +func (p *TGetMetaDB) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tables", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Tables { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaDB) field1Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.DbId) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TWaitingTxnStatusRequest) field2Length() int { +func (p *TGetMetaDB) field2Length() int { l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.TxnId) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TWaitingTxnStatusRequest) field3Length() int { +func (p *TGetMetaDB) field3Length() int { l := 0 - if p.IsSetLabel() { - l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Label) + if p.IsSetOnlyTableNames() { + l += bthrift.Binary.FieldBeginLength("only_table_names", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(*p.OnlyTableNames) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TWaitingTxnStatusResult_) FastRead(buf []byte) (int, error) { +func (p *TGetMetaDB) field4Length() int { + l := 0 + if p.IsSetTables() { + l += bthrift.Binary.FieldBeginLength("tables", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tables)) + for _, v := range p.Tables { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -28166,7 +49975,7 @@ func (p *TWaitingTxnStatusResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -28180,7 +49989,7 @@ func (p *TWaitingTxnStatusResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -28193,6 +50002,62 @@ func (p *TWaitingTxnStatusResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -28219,7 +50084,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWaitingTxnStatusResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -28228,104 +50093,252 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TWaitingTxnStatusResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Cluster = &v + } - p.Status = tmp return offset, nil } -func (p *TWaitingTxnStatusResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TxnStatusId = &v + p.User = &v + + } + return offset, nil +} + +func (p *TGetMetaRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v + + } + return offset, nil +} + +func (p *TGetMetaRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UserIp = &v + + } + return offset, nil +} + +func (p *TGetMetaRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TGetMetaRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + tmp := NewTGetMetaDB() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } + p.Db = tmp return offset, nil } // for compatibility -func (p *TWaitingTxnStatusResult_) FastWrite(buf []byte) int { +func (p *TGetMetaRequest) FastWrite(buf []byte) int { return 0 } -func (p *TWaitingTxnStatusResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWaitingTxnStatusResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaRequest") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TWaitingTxnStatusResult_) BLength() int { +func (p *TGetMetaRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TWaitingTxnStatusResult") + l += bthrift.Binary.StructBeginLength("TGetMetaRequest") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TWaitingTxnStatusResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TWaitingTxnStatusResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTxnStatusId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_status_id", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.TxnStatusId) + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TWaitingTxnStatusResult_) field1Length() int { +func (p *TGetMetaRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDb() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRUCT, 6) + offset += p.Db.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaRequest) field1Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TWaitingTxnStatusResult_) field2Length() int { +func (p *TGetMetaRequest) field2Length() int { l := 0 - if p.IsSetTxnStatusId() { - l += bthrift.Binary.FieldBeginLength("txn_status_id", thrift.I32, 2) - l += bthrift.Binary.I32Length(*p.TxnStatusId) + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TInitExternalCtlMetaRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMetaRequest) field3Length() int { + l := 0 + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaRequest) field4Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaRequest) field5Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaRequest) field6Length() int { + l := 0 + if p.IsSetDb() { + l += bthrift.Binary.FieldBeginLength("db", thrift.STRUCT, 6) + l += p.Db.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaReplicaMeta) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -28415,7 +50428,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaReplicaMeta[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -28424,53 +50437,53 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TInitExternalCtlMetaRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaReplicaMeta) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.CatalogId = &v + p.Id = &v } return offset, nil } -func (p *TInitExternalCtlMetaRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaReplicaMeta) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbId = &v + p.BackendId = &v } return offset, nil } -func (p *TInitExternalCtlMetaRequest) FastReadField3(buf []byte) (int, error) { +func (p *TGetMetaReplicaMeta) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TableId = &v + p.Version = &v } return offset, nil } // for compatibility -func (p *TInitExternalCtlMetaRequest) FastWrite(buf []byte) int { +func (p *TGetMetaReplicaMeta) FastWrite(buf []byte) int { return 0 } -func (p *TInitExternalCtlMetaRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaReplicaMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TInitExternalCtlMetaRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaReplicaMeta") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) @@ -28481,9 +50494,9 @@ func (p *TInitExternalCtlMetaRequest) FastWriteNocopy(buf []byte, binaryWriter b return offset } -func (p *TInitExternalCtlMetaRequest) BLength() int { +func (p *TGetMetaReplicaMeta) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TInitExternalCtlMetaRequest") + l += bthrift.Binary.StructBeginLength("TGetMetaReplicaMeta") if p != nil { l += p.field1Length() l += p.field2Length() @@ -28494,73 +50507,73 @@ func (p *TInitExternalCtlMetaRequest) BLength() int { return l } -func (p *TInitExternalCtlMetaRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaReplicaMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCatalogId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalogId", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TInitExternalCtlMetaRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaReplicaMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TInitExternalCtlMetaRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaReplicaMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tableId", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TInitExternalCtlMetaRequest) field1Length() int { +func (p *TGetMetaReplicaMeta) field1Length() int { l := 0 - if p.IsSetCatalogId() { - l += bthrift.Binary.FieldBeginLength("catalogId", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.CatalogId) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TInitExternalCtlMetaRequest) field2Length() int { +func (p *TGetMetaReplicaMeta) field2Length() int { l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.DbId) + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.BackendId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TInitExternalCtlMetaRequest) field3Length() int { +func (p *TGetMetaReplicaMeta) field3Length() int { l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("tableId", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.TableId) + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.Version) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TInitExternalCtlMetaResult_) FastRead(buf []byte) (int, error) { +func (p *TGetMetaTabletMeta) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -28597,7 +50610,7 @@ func (p *TInitExternalCtlMetaResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -28636,7 +50649,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TInitExternalCtlMetaResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTabletMeta[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -28645,40 +50658,54 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TInitExternalCtlMetaResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaTabletMeta) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.MaxJournalId = &v + p.Id = &v } return offset, nil } -func (p *TInitExternalCtlMetaResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaTabletMeta) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Replicas = make([]*TGetMetaReplicaMeta, 0, size) + for i := 0; i < size; i++ { + _elem := NewTGetMetaReplicaMeta() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Replicas = append(p.Replicas, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Status = &v - } return offset, nil } // for compatibility -func (p *TInitExternalCtlMetaResult_) FastWrite(buf []byte) int { +func (p *TGetMetaTabletMeta) FastWrite(buf []byte) int { return 0 } -func (p *TInitExternalCtlMetaResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTabletMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TInitExternalCtlMetaResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaTabletMeta") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) @@ -28688,9 +50715,9 @@ func (p *TInitExternalCtlMetaResult_) FastWriteNocopy(buf []byte, binaryWriter b return offset } -func (p *TInitExternalCtlMetaResult_) BLength() int { +func (p *TGetMetaTabletMeta) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TInitExternalCtlMetaResult") + l += bthrift.Binary.StructBeginLength("TGetMetaTabletMeta") if p != nil { l += p.field1Length() l += p.field2Length() @@ -28700,51 +50727,61 @@ func (p *TInitExternalCtlMetaResult_) BLength() int { return l } -func (p *TInitExternalCtlMetaResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTabletMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxJournalId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "maxJournalId", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.MaxJournalId) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TInitExternalCtlMetaResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTabletMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Status) - + if p.IsSetReplicas() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replicas", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Replicas { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TInitExternalCtlMetaResult_) field1Length() int { +func (p *TGetMetaTabletMeta) field1Length() int { l := 0 - if p.IsSetMaxJournalId() { - l += bthrift.Binary.FieldBeginLength("maxJournalId", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.MaxJournalId) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TInitExternalCtlMetaResult_) field2Length() int { +func (p *TGetMetaTabletMeta) field2Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Status) - + if p.IsSetReplicas() { + l += bthrift.Binary.FieldBeginLength("replicas", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Replicas)) + for _, v := range p.Replicas { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMetadataTableRequestParams) FastRead(buf []byte) (int, error) { +func (p *TGetMetaIndexMeta) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -28767,7 +50804,7 @@ func (p *TMetadataTableRequestParams) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -28781,7 +50818,7 @@ func (p *TMetadataTableRequestParams) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -28795,50 +50832,8 @@ func (p *TMetadataTableRequestParams) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: if fieldTypeId == thrift.LIST { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField6(buf[offset:]) + l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -28876,7 +50871,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMetadataTableRequestParams[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaIndexMeta[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -28885,48 +50880,33 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TMetadataTableRequestParams) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaIndexMeta) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := types.TMetadataType(v) - p.MetadataType = &tmp + p.Id = &v } return offset, nil } -func (p *TMetadataTableRequestParams) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaIndexMeta) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := plannodes.NewTIcebergMetadataParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - } - p.IcebergMetadataParams = tmp - return offset, nil -} - -func (p *TMetadataTableRequestParams) FastReadField3(buf []byte) (int, error) { - offset := 0 + p.Name = &v - tmp := plannodes.NewTBackendsMetadataParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.BackendsMetadataParams = tmp return offset, nil } -func (p *TMetadataTableRequestParams) FastReadField4(buf []byte) (int, error) { +func (p *TGetMetaIndexMeta) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -28934,19 +50914,16 @@ func (p *TMetadataTableRequestParams) FastReadField4(buf []byte) (int, error) { if err != nil { return offset, err } - p.ColumnsName = make([]string, 0, size) + p.Tablets = make([]*TGetMetaTabletMeta, 0, size) for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem := NewTGetMetaTabletMeta() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _elem = v - } - p.ColumnsName = append(p.ColumnsName, _elem) + p.Tablets = append(p.Tablets, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -28956,178 +50933,106 @@ func (p *TMetadataTableRequestParams) FastReadField4(buf []byte) (int, error) { return offset, nil } -func (p *TMetadataTableRequestParams) FastReadField5(buf []byte) (int, error) { - offset := 0 - - tmp := plannodes.NewTFrontendsMetadataParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.FrontendsMetadataParams = tmp - return offset, nil -} - -func (p *TMetadataTableRequestParams) FastReadField6(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUserIdentity() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.CurrentUserIdent = tmp - return offset, nil -} - // for compatibility -func (p *TMetadataTableRequestParams) FastWrite(buf []byte) int { +func (p *TGetMetaIndexMeta) FastWrite(buf []byte) int { return 0 } -func (p *TMetadataTableRequestParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaIndexMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMetadataTableRequestParams") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaIndexMeta") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TMetadataTableRequestParams) BLength() int { +func (p *TGetMetaIndexMeta) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMetadataTableRequestParams") + l += bthrift.Binary.StructBeginLength("TGetMetaIndexMeta") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMetadataTableRequestParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaIndexMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMetadataType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metadata_type", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MetadataType)) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMetadataTableRequestParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaIndexMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetIcebergMetadataParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_metadata_params", thrift.STRUCT, 2) - offset += p.IcebergMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) -func (p *TMetadataTableRequestParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBackendsMetadataParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backends_metadata_params", thrift.STRUCT, 3) - offset += p.BackendsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMetadataTableRequestParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaIndexMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumnsName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_name", thrift.LIST, 4) + if p.IsSetTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 3) listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.ColumnsName { + for _, v := range p.Tablets { length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMetadataTableRequestParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFrontendsMetadataParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "frontends_metadata_params", thrift.STRUCT, 5) - offset += p.FrontendsMetadataParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TMetadataTableRequestParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCurrentUserIdent() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 6) - offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TMetadataTableRequestParams) field1Length() int { +func (p *TGetMetaIndexMeta) field1Length() int { l := 0 - if p.IsSetMetadataType() { - l += bthrift.Binary.FieldBeginLength("metadata_type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(*p.MetadataType)) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMetadataTableRequestParams) field2Length() int { +func (p *TGetMetaIndexMeta) field2Length() int { l := 0 - if p.IsSetIcebergMetadataParams() { - l += bthrift.Binary.FieldBeginLength("iceberg_metadata_params", thrift.STRUCT, 2) - l += p.IcebergMetadataParams.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) -func (p *TMetadataTableRequestParams) field3Length() int { - l := 0 - if p.IsSetBackendsMetadataParams() { - l += bthrift.Binary.FieldBeginLength("backends_metadata_params", thrift.STRUCT, 3) - l += p.BackendsMetadataParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMetadataTableRequestParams) field4Length() int { +func (p *TGetMetaIndexMeta) field3Length() int { l := 0 - if p.IsSetColumnsName() { - l += bthrift.Binary.FieldBeginLength("columns_name", thrift.LIST, 4) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsName)) - for _, v := range p.ColumnsName { - l += bthrift.Binary.StringLengthNocopy(v) - + if p.IsSetTablets() { + l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tablets)) + for _, v := range p.Tablets { + l += v.BLength() } l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() @@ -29135,27 +51040,7 @@ func (p *TMetadataTableRequestParams) field4Length() int { return l } -func (p *TMetadataTableRequestParams) field5Length() int { - l := 0 - if p.IsSetFrontendsMetadataParams() { - l += bthrift.Binary.FieldBeginLength("frontends_metadata_params", thrift.STRUCT, 5) - l += p.FrontendsMetadataParams.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TMetadataTableRequestParams) field6Length() int { - l := 0 - if p.IsSetCurrentUserIdent() { - l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 6) - l += p.CurrentUserIdent.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TFetchSchemaTableDataRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMetaPartitionMeta) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -29178,7 +51063,7 @@ func (p *TFetchSchemaTableDataRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -29192,7 +51077,7 @@ func (p *TFetchSchemaTableDataRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -29206,7 +51091,7 @@ func (p *TFetchSchemaTableDataRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -29219,6 +51104,62 @@ func (p *TFetchSchemaTableDataRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -29245,7 +51186,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaPartitionMeta[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -29254,149 +51195,320 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFetchSchemaTableDataRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaPartitionMeta) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Id = &v + + } + return offset, nil +} + +func (p *TGetMetaPartitionMeta) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ClusterName = &v + p.Name = &v } return offset, nil } -func (p *TFetchSchemaTableDataRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaPartitionMeta) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Key = &v - tmp := TSchemaTableName(v) - p.SchemaTableName = &tmp + } + return offset, nil +} + +func (p *TGetMetaPartitionMeta) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Range = &v } return offset, nil } -func (p *TFetchSchemaTableDataRequest) FastReadField3(buf []byte) (int, error) { +func (p *TGetMetaPartitionMeta) FastReadField5(buf []byte) (int, error) { offset := 0 - tmp := NewTMetadataTableRequestParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.VisibleVersion = &v + + } + return offset, nil +} + +func (p *TGetMetaPartitionMeta) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsTemp = &v + + } + return offset, nil +} + +func (p *TGetMetaPartitionMeta) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Indexes = make([]*TGetMetaIndexMeta, 0, size) + for i := 0; i < size; i++ { + _elem := NewTGetMetaIndexMeta() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Indexes = append(p.Indexes, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.MetadaTableParams = tmp return offset, nil } // for compatibility -func (p *TFetchSchemaTableDataRequest) FastWrite(buf []byte) int { +func (p *TGetMetaPartitionMeta) FastWrite(buf []byte) int { return 0 } -func (p *TFetchSchemaTableDataRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartitionMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchSchemaTableDataRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaPartitionMeta") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFetchSchemaTableDataRequest) BLength() int { +func (p *TGetMetaPartitionMeta) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFetchSchemaTableDataRequest") + l += bthrift.Binary.StructBeginLength("TGetMetaPartitionMeta") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFetchSchemaTableDataRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartitionMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetClusterName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_name", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClusterName) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFetchSchemaTableDataRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartitionMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSchemaTableName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_table_name", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.SchemaTableName)) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFetchSchemaTableDataRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaPartitionMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMetadaTableParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metada_table_params", thrift.STRUCT, 3) - offset += p.MetadaTableParams.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFetchSchemaTableDataRequest) field1Length() int { +func (p *TGetMetaPartitionMeta) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "range", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Range) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaPartitionMeta) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVisibleVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.VisibleVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaPartitionMeta) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsTemp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_temp", thrift.BOOL, 6) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsTemp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaPartitionMeta) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIndexes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "indexes", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Indexes { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaPartitionMeta) field1Length() int { l := 0 - if p.IsSetClusterName() { - l += bthrift.Binary.FieldBeginLength("cluster_name", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.ClusterName) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFetchSchemaTableDataRequest) field2Length() int { +func (p *TGetMetaPartitionMeta) field2Length() int { l := 0 - if p.IsSetSchemaTableName() { - l += bthrift.Binary.FieldBeginLength("schema_table_name", thrift.I32, 2) - l += bthrift.Binary.I32Length(int32(*p.SchemaTableName)) + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFetchSchemaTableDataRequest) field3Length() int { +func (p *TGetMetaPartitionMeta) field3Length() int { l := 0 - if p.IsSetMetadaTableParams() { - l += bthrift.Binary.FieldBeginLength("metada_table_params", thrift.STRUCT, 3) - l += p.MetadaTableParams.BLength() + if p.IsSetKey() { + l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Key) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFetchSchemaTableDataResult_) FastRead(buf []byte) (int, error) { +func (p *TGetMetaPartitionMeta) field4Length() int { + l := 0 + if p.IsSetRange() { + l += bthrift.Binary.FieldBeginLength("range", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Range) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaPartitionMeta) field5Length() int { + l := 0 + if p.IsSetVisibleVersion() { + l += bthrift.Binary.FieldBeginLength("visible_version", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.VisibleVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaPartitionMeta) field6Length() int { + l := 0 + if p.IsSetIsTemp() { + l += bthrift.Binary.FieldBeginLength("is_temp", thrift.BOOL, 6) + l += bthrift.Binary.BoolLength(*p.IsTemp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaPartitionMeta) field7Length() int { + l := 0 + if p.IsSetIndexes() { + l += bthrift.Binary.FieldBeginLength("indexes", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Indexes)) + for _, v := range p.Indexes { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaTableMeta) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -29414,13 +51526,12 @@ func (p *TFetchSchemaTableDataResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -29428,9 +51539,37 @@ func (p *TFetchSchemaTableDataResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: if fieldTypeId == thrift.LIST { - l, err = p.FastReadField2(buf[offset:]) + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -29462,41 +51601,61 @@ func (p *TFetchSchemaTableDataResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSchemaTableDataResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaTableMeta[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchSchemaTableDataResult_[fieldId])) } -func (p *TFetchSchemaTableDataResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaTableMeta) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Id = &v + } - p.Status = tmp return offset, nil } -func (p *TFetchSchemaTableDataResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaTableMeta) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Name = &v + + } + return offset, nil +} + +func (p *TGetMetaTableMeta) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.InTrash = &v + + } + return offset, nil +} + +func (p *TGetMetaTableMeta) FastReadField4(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -29504,16 +51663,16 @@ func (p *TFetchSchemaTableDataResult_) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.DataBatch = make([]*data.TRow, 0, size) + p.Partitions = make([]*TGetMetaPartitionMeta, 0, size) for i := 0; i < size; i++ { - _elem := data.NewTRow() + _elem := NewTGetMetaPartitionMeta() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.DataBatch = append(p.DataBatch, _elem) + p.Partitions = append(p.Partitions, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -29524,50 +51683,79 @@ func (p *TFetchSchemaTableDataResult_) FastReadField2(buf []byte) (int, error) { } // for compatibility -func (p *TFetchSchemaTableDataResult_) FastWrite(buf []byte) int { +func (p *TGetMetaTableMeta) FastWrite(buf []byte) int { return 0 } -func (p *TFetchSchemaTableDataResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTableMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchSchemaTableDataResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaTableMeta") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFetchSchemaTableDataResult_) BLength() int { +func (p *TGetMetaTableMeta) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFetchSchemaTableDataResult") + l += bthrift.Binary.StructBeginLength("TGetMetaTableMeta") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFetchSchemaTableDataResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTableMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TFetchSchemaTableDataResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaTableMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDataBatch() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_batch", thrift.LIST, 2) + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaTableMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInTrash() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "in_trash", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.InTrash) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaTableMeta) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 4) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.DataBatch { + for _, v := range p.Partitions { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -29578,20 +51766,45 @@ func (p *TFetchSchemaTableDataResult_) fastWriteField2(buf []byte, binaryWriter return offset } -func (p *TFetchSchemaTableDataResult_) field1Length() int { +func (p *TGetMetaTableMeta) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TFetchSchemaTableDataResult_) field2Length() int { +func (p *TGetMetaTableMeta) field2Length() int { l := 0 - if p.IsSetDataBatch() { - l += bthrift.Binary.FieldBeginLength("data_batch", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DataBatch)) - for _, v := range p.DataBatch { + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaTableMeta) field3Length() int { + l := 0 + if p.IsSetInTrash() { + l += bthrift.Binary.FieldBeginLength("in_trash", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(*p.InTrash) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaTableMeta) field4Length() int { + l := 0 + if p.IsSetPartitions() { + l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) + for _, v := range p.Partitions { l += v.BLength() } l += bthrift.Binary.ListEndLength() @@ -29600,7 +51813,7 @@ func (p *TFetchSchemaTableDataResult_) field2Length() int { return l } -func (p *TAddColumnsRequest) FastRead(buf []byte) (int, error) { +func (p *TGetMetaDBMeta) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -29637,7 +51850,7 @@ func (p *TAddColumnsRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -29651,7 +51864,7 @@ func (p *TAddColumnsRequest) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -29665,7 +51878,7 @@ func (p *TAddColumnsRequest) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -29679,7 +51892,7 @@ func (p *TAddColumnsRequest) FastRead(buf []byte) (int, error) { } } case 5: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { @@ -29692,6 +51905,20 @@ func (p *TAddColumnsRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -29718,7 +51945,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddColumnsRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaDBMeta[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -29727,20 +51954,33 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TAddColumnsRequest) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaDBMeta) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TableId = &v + p.Id = &v + + } + return offset, nil +} + +func (p *TGetMetaDBMeta) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Name = &v } return offset, nil } -func (p *TAddColumnsRequest) FastReadField2(buf []byte) (int, error) { +func (p *TGetMetaDBMeta) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -29748,16 +51988,16 @@ func (p *TAddColumnsRequest) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.AddColumns = make([]*TColumnDef, 0, size) + p.Tables = make([]*TGetMetaTableMeta, 0, size) for i := 0; i < size; i++ { - _elem := NewTColumnDef() + _elem := NewTGetMetaTableMeta() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.AddColumns = append(p.AddColumns, _elem) + p.Tables = append(p.Tables, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -29767,99 +52007,163 @@ func (p *TAddColumnsRequest) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TAddColumnsRequest) FastReadField3(buf []byte) (int, error) { +func (p *TGetMetaDBMeta) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DroppedPartitions = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.DroppedPartitions = append(p.DroppedPartitions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TableName = &v - } return offset, nil } -func (p *TAddColumnsRequest) FastReadField4(buf []byte) (int, error) { +func (p *TGetMetaDBMeta) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DroppedTables = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.DroppedTables = append(p.DroppedTables, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbName = &v - } return offset, nil } -func (p *TAddColumnsRequest) FastReadField5(buf []byte) (int, error) { +func (p *TGetMetaDBMeta) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DroppedIndexes = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.DroppedIndexes = append(p.DroppedIndexes, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.AllowTypeConflict = &v - } return offset, nil } // for compatibility -func (p *TAddColumnsRequest) FastWrite(buf []byte) int { +func (p *TGetMetaDBMeta) FastWrite(buf []byte) int { return 0 } -func (p *TAddColumnsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDBMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAddColumnsRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaDBMeta") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAddColumnsRequest) BLength() int { +func (p *TGetMetaDBMeta) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAddColumnsRequest") + l += bthrift.Binary.StructBeginLength("TGetMetaDBMeta") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAddColumnsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDBMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetMetaDBMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAddColumnsRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDBMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAddColumns() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "addColumns", thrift.LIST, 2) + if p.IsSetTables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tables", thrift.LIST, 3) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.AddColumns { + for _, v := range p.Tables { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -29870,56 +52174,91 @@ func (p *TAddColumnsRequest) fastWriteField2(buf []byte, binaryWriter bthrift.Bi return offset } -func (p *TAddColumnsRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDBMeta) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) + if p.IsSetDroppedPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dropped_partitions", thrift.LIST, 4) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.DroppedPartitions { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAddColumnsRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDBMeta) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDbName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) + if p.IsSetDroppedTables() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dropped_tables", thrift.LIST, 5) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.DroppedTables { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAddColumnsRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaDBMeta) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAllowTypeConflict() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "allow_type_conflict", thrift.BOOL, 5) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.AllowTypeConflict) + if p.IsSetDroppedIndexes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dropped_indexes", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.DroppedIndexes { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAddColumnsRequest) field1Length() int { +func (p *TGetMetaDBMeta) field1Length() int { l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.TableId) + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetMetaDBMeta) field2Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAddColumnsRequest) field2Length() int { +func (p *TGetMetaDBMeta) field3Length() int { l := 0 - if p.IsSetAddColumns() { - l += bthrift.Binary.FieldBeginLength("addColumns", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.AddColumns)) - for _, v := range p.AddColumns { + if p.IsSetTables() { + l += bthrift.Binary.FieldBeginLength("tables", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tables)) + for _, v := range p.Tables { l += v.BLength() } l += bthrift.Binary.ListEndLength() @@ -29928,45 +52267,52 @@ func (p *TAddColumnsRequest) field2Length() int { return l } -func (p *TAddColumnsRequest) field3Length() int { +func (p *TGetMetaDBMeta) field4Length() int { l := 0 - if p.IsSetTableName() { - l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.TableName) - + if p.IsSetDroppedPartitions() { + l += bthrift.Binary.FieldBeginLength("dropped_partitions", thrift.LIST, 4) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.DroppedPartitions)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.DroppedPartitions) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAddColumnsRequest) field4Length() int { +func (p *TGetMetaDBMeta) field5Length() int { l := 0 - if p.IsSetDbName() { - l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.DbName) - + if p.IsSetDroppedTables() { + l += bthrift.Binary.FieldBeginLength("dropped_tables", thrift.LIST, 5) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.DroppedTables)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.DroppedTables) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAddColumnsRequest) field5Length() int { +func (p *TGetMetaDBMeta) field6Length() int { l := 0 - if p.IsSetAllowTypeConflict() { - l += bthrift.Binary.FieldBeginLength("allow_type_conflict", thrift.BOOL, 5) - l += bthrift.Binary.BoolLength(*p.AllowTypeConflict) - + if p.IsSetDroppedIndexes() { + l += bthrift.Binary.FieldBeginLength("dropped_indexes", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.DroppedIndexes)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.DroppedIndexes) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAddColumnsResult_) FastRead(buf []byte) (int, error) { +func (p *TGetMetaResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -29990,6 +52336,7 @@ func (p *TAddColumnsResult_) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } + issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -29998,7 +52345,7 @@ func (p *TAddColumnsResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -30012,7 +52359,7 @@ func (p *TAddColumnsResult_) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -30025,20 +52372,6 @@ func (p *TAddColumnsResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -30059,22 +52392,28 @@ func (p *TAddColumnsResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAddColumnsResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMetaResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetMetaResult_[fieldId])) } -func (p *TAddColumnsResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetMetaResult_) FastReadField1(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -30087,71 +52426,43 @@ func (p *TAddColumnsResult_) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TAddColumnsResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TableId = &v - - } - return offset, nil -} - -func (p *TAddColumnsResult_) FastReadField3(buf []byte) (int, error) { +func (p *TGetMetaResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.AllColumns = make([]*descriptors.TColumn, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.AllColumns = append(p.AllColumns, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + tmp := NewTGetMetaDBMeta() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.DbMeta = tmp return offset, nil } -func (p *TAddColumnsResult_) FastReadField4(buf []byte) (int, error) { +func (p *TGetMetaResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SchemaVersion = &v - } + p.MasterAddress = tmp return offset, nil } // for compatibility -func (p *TAddColumnsResult_) FastWrite(buf []byte) int { +func (p *TGetMetaResult_) FastWrite(buf []byte) int { return 0 } -func (p *TAddColumnsResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAddColumnsResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMetaResult") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -30159,117 +52470,76 @@ func (p *TAddColumnsResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bi return offset } -func (p *TAddColumnsResult_) BLength() int { +func (p *TGetMetaResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAddColumnsResult") + l += bthrift.Binary.StructBeginLength("TGetMetaResult") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAddColumnsResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAddColumnsResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAddColumnsResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAllColumns() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "allColumns", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.AllColumns { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetDbMeta() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_meta", thrift.STRUCT, 2) + offset += p.DbMeta.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAddColumnsResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetMetaResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSchemaVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_version", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.SchemaVersion) - + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 3) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAddColumnsResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAddColumnsResult_) field2Length() int { +func (p *TGetMetaResult_) field1Length() int { l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.TableId) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TAddColumnsResult_) field3Length() int { +func (p *TGetMetaResult_) field2Length() int { l := 0 - if p.IsSetAllColumns() { - l += bthrift.Binary.FieldBeginLength("allColumns", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.AllColumns)) - for _, v := range p.AllColumns { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetDbMeta() { + l += bthrift.Binary.FieldBeginLength("db_meta", thrift.STRUCT, 2) + l += p.DbMeta.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAddColumnsResult_) field4Length() int { +func (p *TGetMetaResult_) field3Length() int { l := 0 - if p.IsSetSchemaVersion() { - l += bthrift.Binary.FieldBeginLength("schema_version", thrift.I32, 4) - l += bthrift.Binary.I32Length(*p.SchemaVersion) - + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 3) + l += p.MasterAddress.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMySqlLoadAcquireTokenResult_) FastRead(buf []byte) (int, error) { +func (p *TGetBackendMetaRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -30292,7 +52562,7 @@ func (p *TMySqlLoadAcquireTokenResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -30319,6 +52589,62 @@ func (p *TMySqlLoadAcquireTokenResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -30340,34 +52666,73 @@ func (p *TMySqlLoadAcquireTokenResult_) FastRead(buf []byte) (int, error) { } return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMySqlLoadAcquireTokenResult_[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBackendMetaRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGetBackendMetaRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TGetBackendMetaRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.User = &v + + } + return offset, nil +} + +func (p *TGetBackendMetaRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Passwd = &v + + } + return offset, nil } -func (p *TMySqlLoadAcquireTokenResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetBackendMetaRequest) FastReadField4(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.UserIp = &v + } - p.Status = tmp return offset, nil } -func (p *TMySqlLoadAcquireTokenResult_) FastReadField2(buf []byte) (int, error) { +func (p *TGetBackendMetaRequest) FastReadField5(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { @@ -30380,49 +52745,104 @@ func (p *TMySqlLoadAcquireTokenResult_) FastReadField2(buf []byte) (int, error) return offset, nil } +func (p *TGetBackendMetaRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + // for compatibility -func (p *TMySqlLoadAcquireTokenResult_) FastWrite(buf []byte) int { +func (p *TGetBackendMetaRequest) FastWrite(buf []byte) int { return 0 } -func (p *TMySqlLoadAcquireTokenResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMySqlLoadAcquireTokenResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBackendMetaRequest") if p != nil { + offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TMySqlLoadAcquireTokenResult_) BLength() int { +func (p *TGetBackendMetaRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMySqlLoadAcquireTokenResult") + l += bthrift.Binary.StructBeginLength("TGetBackendMetaRequest") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMySqlLoadAcquireTokenResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TMySqlLoadAcquireTokenResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUser() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBackendMetaRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPasswd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBackendMetaRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUserIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBackendMetaRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 2) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 5) offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) @@ -30430,20 +52850,65 @@ func (p *TMySqlLoadAcquireTokenResult_) fastWriteField2(buf []byte, binaryWriter return offset } -func (p *TMySqlLoadAcquireTokenResult_) field1Length() int { +func (p *TGetBackendMetaRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetBackendMetaRequest) field1Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMySqlLoadAcquireTokenResult_) field2Length() int { +func (p *TGetBackendMetaRequest) field2Length() int { + l := 0 + if p.IsSetUser() { + l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.User) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBackendMetaRequest) field3Length() int { + l := 0 + if p.IsSetPasswd() { + l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Passwd) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBackendMetaRequest) field4Length() int { + l := 0 + if p.IsSetUserIp() { + l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBackendMetaRequest) field5Length() int { l := 0 if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 2) + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 5) l += bthrift.Binary.StringLengthNocopy(*p.Token) l += bthrift.Binary.FieldEndLength() @@ -30451,12 +52916,24 @@ func (p *TMySqlLoadAcquireTokenResult_) field2Length() int { return l } -func (p *TTabletCooldownInfo) FastRead(buf []byte) (int, error) { +func (p *TGetBackendMetaRequest) field6Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.BackendId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetBackendMetaResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -30474,12 +52951,13 @@ func (p *TTabletCooldownInfo) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -30488,7 +52966,7 @@ func (p *TTabletCooldownInfo) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -30535,68 +53013,88 @@ func (p *TTabletCooldownInfo) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletCooldownInfo[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBackendMetaResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetBackendMetaResult_[fieldId])) } -func (p *TTabletCooldownInfo) FastReadField1(buf []byte) (int, error) { +func (p *TGetBackendMetaResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TabletId = &v - } + p.Status = tmp return offset, nil } -func (p *TTabletCooldownInfo) FastReadField2(buf []byte) (int, error) { +func (p *TGetBackendMetaResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Backends = make([]*types.TBackend, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTBackend() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Backends = append(p.Backends, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.CooldownReplicaId = &v - } return offset, nil } -func (p *TTabletCooldownInfo) FastReadField3(buf []byte) (int, error) { +func (p *TGetBackendMetaResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - tmp := types.NewTUniqueId() + tmp := types.NewTNetworkAddress() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.CooldownMetaId = tmp + p.MasterAddress = tmp return offset, nil } // for compatibility -func (p *TTabletCooldownInfo) FastWrite(buf []byte) int { +func (p *TGetBackendMetaResult_) FastWrite(buf []byte) int { return 0 } -func (p *TTabletCooldownInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTabletCooldownInfo") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBackendMetaResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) @@ -30607,9 +53105,9 @@ func (p *TTabletCooldownInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.B return offset } -func (p *TTabletCooldownInfo) BLength() int { +func (p *TGetBackendMetaResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTabletCooldownInfo") + l += bthrift.Binary.StructBeginLength("TGetBackendMetaResult") if p != nil { l += p.field1Length() l += p.field2Length() @@ -30620,71 +53118,75 @@ func (p *TTabletCooldownInfo) BLength() int { return l } -func (p *TTabletCooldownInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTabletId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TabletId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TTabletCooldownInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCooldownReplicaId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cooldown_replica_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.CooldownReplicaId) - + if p.IsSetBackends() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backends", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Backends { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTabletCooldownInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetBackendMetaResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCooldownMetaId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cooldown_meta_id", thrift.STRUCT, 3) - offset += p.CooldownMetaId.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetMasterAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_address", thrift.STRUCT, 3) + offset += p.MasterAddress.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTabletCooldownInfo) field1Length() int { +func (p *TGetBackendMetaResult_) field1Length() int { l := 0 - if p.IsSetTabletId() { - l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.TabletId) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TTabletCooldownInfo) field2Length() int { +func (p *TGetBackendMetaResult_) field2Length() int { l := 0 - if p.IsSetCooldownReplicaId() { - l += bthrift.Binary.FieldBeginLength("cooldown_replica_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.CooldownReplicaId) - + if p.IsSetBackends() { + l += bthrift.Binary.FieldBeginLength("backends", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Backends)) + for _, v := range p.Backends { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTabletCooldownInfo) field3Length() int { +func (p *TGetBackendMetaResult_) field3Length() int { l := 0 - if p.IsSetCooldownMetaId() { - l += bthrift.Binary.FieldBeginLength("cooldown_meta_id", thrift.STRUCT, 3) - l += p.CooldownMetaId.BLength() + if p.IsSetMasterAddress() { + l += bthrift.Binary.FieldBeginLength("master_address", thrift.STRUCT, 3) + l += p.MasterAddress.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TConfirmUnusedRemoteFilesRequest) FastRead(buf []byte) (int, error) { +func (p *TColumnInfo) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -30707,7 +53209,7 @@ func (p *TConfirmUnusedRemoteFilesRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -30720,6 +53222,20 @@ func (p *TConfirmUnusedRemoteFilesRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -30746,7 +53262,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnInfo[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -30755,42 +53271,42 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesRequest) FastReadField1(buf []byte) (int, error) { +func (p *TColumnInfo) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err - } - p.ConfirmList = make([]*TTabletCooldownInfo, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTabletCooldownInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + } else { + offset += l + p.ColumnName = &v - p.ConfirmList = append(p.ConfirmList, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TColumnInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.ColumnId = &v + } return offset, nil } // for compatibility -func (p *TConfirmUnusedRemoteFilesRequest) FastWrite(buf []byte) int { +func (p *TColumnInfo) FastWrite(buf []byte) int { return 0 } -func (p *TConfirmUnusedRemoteFilesRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TConfirmUnusedRemoteFilesRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TColumnInfo") if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -30798,50 +53314,63 @@ func (p *TConfirmUnusedRemoteFilesRequest) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *TConfirmUnusedRemoteFilesRequest) BLength() int { +func (p *TColumnInfo) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TConfirmUnusedRemoteFilesRequest") + l += bthrift.Binary.StructBeginLength("TColumnInfo") if p != nil { l += p.field1Length() + l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TConfirmUnusedRemoteFilesRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TColumnInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetConfirmList() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "confirm_list", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.ConfirmList { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetColumnName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColumnName) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TConfirmUnusedRemoteFilesRequest) field1Length() int { +func (p *TColumnInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ColumnId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TColumnInfo) field1Length() int { l := 0 - if p.IsSetConfirmList() { - l += bthrift.Binary.FieldBeginLength("confirm_list", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ConfirmList)) - for _, v := range p.ConfirmList { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetColumnName() { + l += bthrift.Binary.FieldBeginLength("column_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.ColumnName) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TConfirmUnusedRemoteFilesResult_) FastRead(buf []byte) (int, error) { +func (p *TColumnInfo) field2Length() int { + l := 0 + if p.IsSetColumnId() { + l += bthrift.Binary.FieldBeginLength("column_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.ColumnId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetColumnInfoRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -30864,7 +53393,7 @@ func (p *TConfirmUnusedRemoteFilesResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -30877,6 +53406,20 @@ func (p *TConfirmUnusedRemoteFilesResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -30903,7 +53446,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TConfirmUnusedRemoteFilesResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetColumnInfoRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -30912,178 +53455,130 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TConfirmUnusedRemoteFilesResult_) FastReadField1(buf []byte) (int, error) { +func (p *TGetColumnInfoRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err - } - p.ConfirmedTablets = make([]types.TTabletId, 0, size) - for i := 0; i < size; i++ { - var _elem types.TTabletId - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + } else { + offset += l + p.DbId = &v - _elem = v + } + return offset, nil +} - } +func (p *TGetColumnInfoRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 - p.ConfirmedTablets = append(p.ConfirmedTablets, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.TableId = &v + } return offset, nil } // for compatibility -func (p *TConfirmUnusedRemoteFilesResult_) FastWrite(buf []byte) int { +func (p *TGetColumnInfoRequest) FastWrite(buf []byte) int { return 0 } -func (p *TConfirmUnusedRemoteFilesResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetColumnInfoRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TConfirmUnusedRemoteFilesResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetColumnInfoRequest") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TConfirmUnusedRemoteFilesResult_) BLength() int { +func (p *TGetColumnInfoRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TConfirmUnusedRemoteFilesResult") + l += bthrift.Binary.StructBeginLength("TGetColumnInfoRequest") if p != nil { l += p.field1Length() + l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TConfirmUnusedRemoteFilesResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetColumnInfoRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetConfirmedTablets() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "confirmed_tablets", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) - var length int - for _, v := range p.ConfirmedTablets { - length++ - offset += bthrift.Binary.WriteI64(buf[offset:], v) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TConfirmUnusedRemoteFilesResult_) field1Length() int { +func (p *TGetColumnInfoRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGetColumnInfoRequest) field1Length() int { l := 0 - if p.IsSetConfirmedTablets() { - l += bthrift.Binary.FieldBeginLength("confirmed_tablets", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.ConfirmedTablets)) - var tmpV types.TTabletId - l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.ConfirmedTablets) - l += bthrift.Binary.ListEndLength() + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.DbId) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPrivilegeCtrl) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetPrivHier bool = false - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } +func (p *TGetColumnInfoRequest) field2Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TableId) - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPrivHier = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.SET { - l, err = p.FastReadField5(buf[offset:]) + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGetColumnInfoResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -31095,9 +53590,9 @@ func (p *TPrivilegeCtrl) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -31129,334 +53624,148 @@ func (p *TPrivilegeCtrl) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetPrivHier { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPrivilegeCtrl[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetColumnInfoResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPrivilegeCtrl[fieldId])) -} - -func (p *TPrivilegeCtrl) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.PrivHier = TPrivilegeHier(v) - - } - return offset, nil -} - -func (p *TPrivilegeCtrl) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Ctl = &v - - } - return offset, nil -} - -func (p *TPrivilegeCtrl) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - - } - return offset, nil } -func (p *TPrivilegeCtrl) FastReadField4(buf []byte) (int, error) { +func (p *TGetColumnInfoResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Tbl = &v - } + p.Status = tmp return offset, nil } -func (p *TPrivilegeCtrl) FastReadField5(buf []byte) (int, error) { +func (p *TGetColumnInfoResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadSetBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.Cols = make([]string, 0, size) + p.Columns = make([]*TColumnInfo, 0, size) for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem := NewTColumnInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _elem = v - } - p.Cols = append(p.Cols, _elem) - } - if l, err := bthrift.Binary.ReadSetEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + p.Columns = append(p.Columns, _elem) } - return offset, nil -} - -func (p *TPrivilegeCtrl) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Res = &v - } return offset, nil } // for compatibility -func (p *TPrivilegeCtrl) FastWrite(buf []byte) int { +func (p *TGetColumnInfoResult_) FastWrite(buf []byte) int { return 0 } -func (p *TPrivilegeCtrl) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetColumnInfoResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPrivilegeCtrl") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetColumnInfoResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPrivilegeCtrl) BLength() int { +func (p *TGetColumnInfoResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPrivilegeCtrl") + l += bthrift.Binary.StructBeginLength("TGetColumnInfoResult") if p != nil { l += p.field1Length() l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPrivilegeCtrl) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priv_hier", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.PrivHier)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPrivilegeCtrl) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCtl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ctl", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Ctl) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPrivilegeCtrl) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPrivilegeCtrl) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetColumnInfoResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTbl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tbl) - + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPrivilegeCtrl) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGetColumnInfoResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCols() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cols", thrift.SET, 5) - setBeginOffset := offset - offset += bthrift.Binary.SetBeginLength(thrift.STRING, 0) - - for i := 0; i < len(p.Cols); i++ { - for j := i + 1; j < len(p.Cols); j++ { - if func(tgt, src string) bool { - if strings.Compare(tgt, src) != 0 { - return false - } - return true - }(p.Cols[i], p.Cols[j]) { - panic(fmt.Errorf("%T error writing set field: slice is not unique", p.Cols[i])) - } - } - } + if p.IsSetColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.Cols { + for _, v := range p.Columns { length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteSetBegin(buf[setBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteSetEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPrivilegeCtrl) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "res", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Res) - + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPrivilegeCtrl) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("priv_hier", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.PrivHier)) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPrivilegeCtrl) field2Length() int { - l := 0 - if p.IsSetCtl() { - l += bthrift.Binary.FieldBeginLength("ctl", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Ctl) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPrivilegeCtrl) field3Length() int { - l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Db) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPrivilegeCtrl) field4Length() int { +func (p *TGetColumnInfoResult_) field1Length() int { l := 0 - if p.IsSetTbl() { - l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Tbl) - + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPrivilegeCtrl) field5Length() int { +func (p *TGetColumnInfoResult_) field2Length() int { l := 0 - if p.IsSetCols() { - l += bthrift.Binary.FieldBeginLength("cols", thrift.SET, 5) - l += bthrift.Binary.SetBeginLength(thrift.STRING, len(p.Cols)) - - for i := 0; i < len(p.Cols); i++ { - for j := i + 1; j < len(p.Cols); j++ { - if func(tgt, src string) bool { - if strings.Compare(tgt, src) != 0 { - return false - } - return true - }(p.Cols[i], p.Cols[j]) { - panic(fmt.Errorf("%T error writing set field: slice is not unique", p.Cols[i])) - } - } - } - for _, v := range p.Cols { - l += bthrift.Binary.StringLengthNocopy(v) - + if p.IsSetColumns() { + l += bthrift.Binary.FieldBeginLength("columns", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Columns)) + for _, v := range p.Columns { + l += v.BLength() } - l += bthrift.Binary.SetEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPrivilegeCtrl) field6Length() int { - l := 0 - if p.IsSetRes() { - l += bthrift.Binary.FieldBeginLength("res", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.Res) - + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCheckAuthRequest) FastRead(buf []byte) (int, error) { +func (p *TShowProcessListRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetUser bool = false - var issetPasswd bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -31474,7 +53783,7 @@ func (p *TCheckAuthRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -31488,80 +53797,8 @@ func (p *TCheckAuthRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetUser = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPasswd = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField7(buf[offset:]) + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -31593,317 +53830,390 @@ func (p *TCheckAuthRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetUser { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPasswd { - fieldId = 3 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowProcessListRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthRequest[fieldId])) } -func (p *TCheckAuthRequest) FastReadField1(buf []byte) (int, error) { +func (p *TShowProcessListRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v + p.ShowFullSql = &v } return offset, nil } -func (p *TCheckAuthRequest) FastReadField2(buf []byte) (int, error) { +func (p *TShowProcessListRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := types.NewTUserIdentity() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.User = v - } + p.CurrentUserIdent = tmp return offset, nil } -func (p *TCheckAuthRequest) FastReadField3(buf []byte) (int, error) { +// for compatibility +func (p *TShowProcessListRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TShowProcessListRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowProcessListRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TShowProcessListRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TShowProcessListRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} - p.Passwd = v +func (p *TShowProcessListRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetShowFullSql() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "show_full_sql", thrift.BOOL, 1) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ShowFullSql) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TCheckAuthRequest) FastReadField4(buf []byte) (int, error) { +func (p *TShowProcessListRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 2) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v +func (p *TShowProcessListRequest) field1Length() int { + l := 0 + if p.IsSetShowFullSql() { + l += bthrift.Binary.FieldBeginLength("show_full_sql", thrift.BOOL, 1) + l += bthrift.Binary.BoolLength(*p.ShowFullSql) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TCheckAuthRequest) FastReadField5(buf []byte) (int, error) { - offset := 0 +func (p *TShowProcessListRequest) field2Length() int { + l := 0 + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 2) + l += p.CurrentUserIdent.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - tmp := NewTPrivilegeCtrl() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { +func (p *TShowProcessListResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } } - p.PrivCtrl = tmp + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowProcessListResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCheckAuthRequest) FastReadField6(buf []byte) (int, error) { +func (p *TShowProcessListResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { + } + p.ProcessList = make([][]string, 0, size) + for i := 0; i < size; i++ { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l + if err != nil { + return offset, err + } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem1 string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l - tmp := TPrivilegeType(v) - p.PrivType = &tmp + _elem1 = v - } - return offset, nil -} + } -func (p *TCheckAuthRequest) FastReadField7(buf []byte) (int, error) { - offset := 0 + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + p.ProcessList = append(p.ProcessList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ThriftRpcTimeoutMs = &v - } return offset, nil } // for compatibility -func (p *TCheckAuthRequest) FastWrite(buf []byte) int { +func (p *TShowProcessListResult_) FastWrite(buf []byte) int { return 0 } -func (p *TCheckAuthRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TShowProcessListResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckAuthRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowProcessListResult") if p != nil { - offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCheckAuthRequest) BLength() int { +func (p *TShowProcessListResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCheckAuthRequest") + l += bthrift.Binary.StructBeginLength("TShowProcessListResult") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TCheckAuthRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCheckAuthRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TCheckAuthRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TCheckAuthRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCheckAuthRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPrivCtrl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priv_ctrl", thrift.STRUCT, 5) - offset += p.PrivCtrl.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCheckAuthRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPrivType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priv_type", thrift.I32, 6) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.PrivType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCheckAuthRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TShowProcessListResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetThriftRpcTimeoutMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "thrift_rpc_timeout_ms", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ThriftRpcTimeoutMs) + if p.IsSetProcessList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "process_list", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) + var length int + for _, v := range p.ProcessList { + length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range v { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TCheckAuthRequest) field1Length() int { +func (p *TShowProcessListResult_) field1Length() int { l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + if p.IsSetProcessList() { + l += bthrift.Binary.FieldBeginLength("process_list", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ProcessList)) + for _, v := range p.ProcessList { + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(v)) + for _, v := range v { + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCheckAuthRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.User) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TCheckAuthRequest) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(p.Passwd) - - l += bthrift.Binary.FieldEndLength() - return l -} +func (p *TShowUserRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } -func (p *TCheckAuthRequest) field4Length() int { - l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l -} - -func (p *TCheckAuthRequest) field5Length() int { - l := 0 - if p.IsSetPrivCtrl() { - l += bthrift.Binary.FieldBeginLength("priv_ctrl", thrift.STRUCT, 5) - l += p.PrivCtrl.BLength() - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return l + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCheckAuthRequest) field6Length() int { - l := 0 - if p.IsSetPrivType() { - l += bthrift.Binary.FieldBeginLength("priv_type", thrift.I32, 6) - l += bthrift.Binary.I32Length(int32(*p.PrivType)) +// for compatibility +func (p *TShowUserRequest) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *TShowUserRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowUserRequest") + if p != nil { } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TCheckAuthRequest) field7Length() int { +func (p *TShowUserRequest) BLength() int { l := 0 - if p.IsSetThriftRpcTimeoutMs() { - l += bthrift.Binary.FieldBeginLength("thrift_rpc_timeout_ms", thrift.I64, 7) - l += bthrift.Binary.I64Length(*p.ThriftRpcTimeoutMs) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TShowUserRequest") + if p != nil { } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TCheckAuthResult_) FastRead(buf []byte) (int, error) { +func (p *TShowUserResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -31921,13 +54231,12 @@ func (p *TCheckAuthResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -31955,48 +54264,74 @@ func (p *TCheckAuthResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckAuthResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TShowUserResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckAuthResult_[fieldId])) } -func (p *TCheckAuthResult_) FastReadField1(buf []byte) (int, error) { +func (p *TShowUserResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.UserinfoList = make([][]string, 0, size) + for i := 0; i < size; i++ { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem1 string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem1 = v + + } + + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.UserinfoList = append(p.UserinfoList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp return offset, nil } // for compatibility -func (p *TCheckAuthResult_) FastWrite(buf []byte) int { +func (p *TShowUserResult_) FastWrite(buf []byte) int { return 0 } -func (p *TCheckAuthResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TShowUserResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckAuthResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TShowUserResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -32005,9 +54340,9 @@ func (p *TCheckAuthResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bin return offset } -func (p *TCheckAuthResult_) BLength() int { +func (p *TShowUserResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCheckAuthResult") + l += bthrift.Binary.StructBeginLength("TShowUserResult") if p != nil { l += p.field1Length() } @@ -32016,23 +54351,53 @@ func (p *TCheckAuthResult_) BLength() int { return l } -func (p *TCheckAuthResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TShowUserResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetUserinfoList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "userinfo_list", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) + var length int + for _, v := range p.UserinfoList { + length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range v { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TCheckAuthResult_) field1Length() int { +func (p *TShowUserResult_) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetUserinfoList() { + l += bthrift.Binary.FieldBeginLength("userinfo_list", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.UserinfoList)) + for _, v := range p.UserinfoList { + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(v)) + for _, v := range v { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TGetQueryStatsRequest) FastRead(buf []byte) (int, error) { +func (p *TReportCommitTxnResultRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -32055,7 +54420,7 @@ func (p *TGetQueryStatsRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -32069,7 +54434,7 @@ func (p *TGetQueryStatsRequest) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -32110,34 +54475,6 @@ func (p *TGetQueryStatsRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -32164,7 +54501,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetQueryStatsRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReportCommitTxnResultRequest[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -32173,283 +54510,181 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetQueryStatsRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - tmp := TQueryStatsType(v) - p.Type = &tmp - - } - return offset, nil -} - -func (p *TGetQueryStatsRequest) FastReadField2(buf []byte) (int, error) { +func (p *TReportCommitTxnResultRequest) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Catalog = &v + p.DbId = &v } return offset, nil } -func (p *TGetQueryStatsRequest) FastReadField3(buf []byte) (int, error) { +func (p *TReportCommitTxnResultRequest) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Db = &v + p.TxnId = &v } return offset, nil } -func (p *TGetQueryStatsRequest) FastReadField4(buf []byte) (int, error) { +func (p *TReportCommitTxnResultRequest) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Tbl = &v + p.Label = &v } return offset, nil } -func (p *TGetQueryStatsRequest) FastReadField5(buf []byte) (int, error) { +func (p *TReportCommitTxnResultRequest) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ReplicaId = &v - - } - return offset, nil -} - -func (p *TGetQueryStatsRequest) FastReadField6(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.ReplicaIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - } + p.Payload = []byte(v) - p.ReplicaIds = append(p.ReplicaIds, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } return offset, nil } // for compatibility -func (p *TGetQueryStatsRequest) FastWrite(buf []byte) int { +func (p *TReportCommitTxnResultRequest) FastWrite(buf []byte) int { return 0 } -func (p *TGetQueryStatsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportCommitTxnResultRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetQueryStatsRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TReportCommitTxnResultRequest") if p != nil { - offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetQueryStatsRequest) BLength() int { +func (p *TReportCommitTxnResultRequest) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetQueryStatsRequest") + l += bthrift.Binary.StructBeginLength("TReportCommitTxnResultRequest") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() - l += p.field5Length() - l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetQueryStatsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Type)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetQueryStatsRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCatalog() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetQueryStatsRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportCommitTxnResultRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetQueryStatsRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportCommitTxnResultRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTbl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tbl) + if p.IsSetTxnId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txnId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetQueryStatsRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportCommitTxnResultRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetReplicaId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replica_id", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReplicaId) + if p.IsSetLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Label) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetQueryStatsRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TReportCommitTxnResultRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetReplicaIds() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "replica_ids", thrift.LIST, 6) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) - var length int - for _, v := range p.ReplicaIds { - length++ - offset += bthrift.Binary.WriteI64(buf[offset:], v) + if p.IsSetPayload() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "payload", thrift.STRING, 4) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Payload)) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetQueryStatsRequest) field1Length() int { - l := 0 - if p.IsSetType() { - l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(*p.Type)) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetQueryStatsRequest) field2Length() int { - l := 0 - if p.IsSetCatalog() { - l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Catalog) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetQueryStatsRequest) field3Length() int { +func (p *TReportCommitTxnResultRequest) field1Length() int { l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Db) + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.DbId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetQueryStatsRequest) field4Length() int { +func (p *TReportCommitTxnResultRequest) field2Length() int { l := 0 - if p.IsSetTbl() { - l += bthrift.Binary.FieldBeginLength("tbl", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Tbl) + if p.IsSetTxnId() { + l += bthrift.Binary.FieldBeginLength("txnId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.TxnId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetQueryStatsRequest) field5Length() int { +func (p *TReportCommitTxnResultRequest) field3Length() int { l := 0 - if p.IsSetReplicaId() { - l += bthrift.Binary.FieldBeginLength("replica_id", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.ReplicaId) + if p.IsSetLabel() { + l += bthrift.Binary.FieldBeginLength("label", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Label) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetQueryStatsRequest) field6Length() int { +func (p *TReportCommitTxnResultRequest) field4Length() int { l := 0 - if p.IsSetReplicaIds() { - l += bthrift.Binary.FieldBeginLength("replica_ids", thrift.LIST, 6) - l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.ReplicaIds)) - var tmpV int64 - l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.ReplicaIds) - l += bthrift.Binary.ListEndLength() + if p.IsSetPayload() { + l += bthrift.Binary.FieldBeginLength("payload", thrift.STRING, 4) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Payload)) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableQueryStats) FastRead(buf []byte) (int, error) { +func (p *TQueryColumn) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -32486,7 +54721,7 @@ func (p *TTableQueryStats) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -32500,7 +54735,7 @@ func (p *TTableQueryStats) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -32513,6 +54748,20 @@ func (p *TTableQueryStats) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -32539,7 +54788,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableQueryStats[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryColumn[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -32548,143 +54797,180 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableQueryStats) FastReadField1(buf []byte) (int, error) { +func (p *TQueryColumn) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Field = &v + p.CatalogId = &v } return offset, nil } -func (p *TTableQueryStats) FastReadField2(buf []byte) (int, error) { +func (p *TQueryColumn) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.QueryStats = &v + p.DbId = &v } return offset, nil } -func (p *TTableQueryStats) FastReadField3(buf []byte) (int, error) { +func (p *TQueryColumn) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FilterStats = &v + p.TblId = &v + + } + return offset, nil +} + +func (p *TQueryColumn) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ColName = &v } return offset, nil } // for compatibility -func (p *TTableQueryStats) FastWrite(buf []byte) int { +func (p *TQueryColumn) FastWrite(buf []byte) int { return 0 } -func (p *TTableQueryStats) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryColumn) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableQueryStats") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryColumn") if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTableQueryStats) BLength() int { +func (p *TQueryColumn) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTableQueryStats") + l += bthrift.Binary.StructBeginLength("TQueryColumn") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTableQueryStats) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryColumn) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetField() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "field", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Field) + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalogId", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CatalogId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableQueryStats) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryColumn) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryStats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_stats", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.QueryStats) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dbId", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableQueryStats) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryColumn) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFilterStats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "filter_stats", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.FilterStats) + if p.IsSetTblId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tblId", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TblId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableQueryStats) field1Length() int { +func (p *TQueryColumn) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "colName", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ColName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryColumn) field1Length() int { l := 0 - if p.IsSetField() { - l += bthrift.Binary.FieldBeginLength("field", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Field) + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalogId", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.CatalogId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableQueryStats) field2Length() int { +func (p *TQueryColumn) field2Length() int { l := 0 - if p.IsSetQueryStats() { - l += bthrift.Binary.FieldBeginLength("query_stats", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.QueryStats) + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("dbId", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.DbId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableQueryStats) field3Length() int { +func (p *TQueryColumn) field3Length() int { l := 0 - if p.IsSetFilterStats() { - l += bthrift.Binary.FieldBeginLength("filter_stats", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.FilterStats) + if p.IsSetTblId() { + l += bthrift.Binary.FieldBeginLength("tblId", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.TblId) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableIndexQueryStats) FastRead(buf []byte) (int, error) { +func (p *TQueryColumn) field4Length() int { + l := 0 + if p.IsSetColName() { + l += bthrift.Binary.FieldBeginLength("colName", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.ColName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSyncQueryColumns) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -32707,7 +54993,7 @@ func (p *TTableIndexQueryStats) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -32760,7 +55046,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableIndexQueryStats[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSyncQueryColumns[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -32769,20 +55055,34 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableIndexQueryStats) FastReadField1(buf []byte) (int, error) { +func (p *TSyncQueryColumns) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.HighPriorityColumns = make([]*TQueryColumn, 0, size) + for i := 0; i < size; i++ { + _elem := NewTQueryColumn() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.HighPriorityColumns = append(p.HighPriorityColumns, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.IndexName = &v - } return offset, nil } -func (p *TTableIndexQueryStats) FastReadField2(buf []byte) (int, error) { +func (p *TSyncQueryColumns) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -32790,16 +55090,16 @@ func (p *TTableIndexQueryStats) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.TableStats = make([]*TTableQueryStats, 0, size) + p.MidPriorityColumns = make([]*TQueryColumn, 0, size) for i := 0; i < size; i++ { - _elem := NewTTableQueryStats() + _elem := NewTQueryColumn() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.TableStats = append(p.TableStats, _elem) + p.MidPriorityColumns = append(p.MidPriorityColumns, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -32810,13 +55110,13 @@ func (p *TTableIndexQueryStats) FastReadField2(buf []byte) (int, error) { } // for compatibility -func (p *TTableIndexQueryStats) FastWrite(buf []byte) int { +func (p *TSyncQueryColumns) FastWrite(buf []byte) int { return 0 } -func (p *TTableIndexQueryStats) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSyncQueryColumns) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableIndexQueryStats") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSyncQueryColumns") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) @@ -32826,9 +55126,9 @@ func (p *TTableIndexQueryStats) FastWriteNocopy(buf []byte, binaryWriter bthrift return offset } -func (p *TTableIndexQueryStats) BLength() int { +func (p *TSyncQueryColumns) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTableIndexQueryStats") + l += bthrift.Binary.StructBeginLength("TSyncQueryColumns") if p != nil { l += p.field1Length() l += p.field2Length() @@ -32838,25 +55138,32 @@ func (p *TTableIndexQueryStats) BLength() int { return l } -func (p *TTableIndexQueryStats) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSyncQueryColumns) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetIndexName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "index_name", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.IndexName) - + if p.IsSetHighPriorityColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "highPriorityColumns", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.HighPriorityColumns { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableIndexQueryStats) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSyncQueryColumns) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableStats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_stats", thrift.LIST, 2) + if p.IsSetMidPriorityColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "midPriorityColumns", thrift.LIST, 2) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.TableStats { + for _, v := range p.MidPriorityColumns { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -32867,23 +55174,26 @@ func (p *TTableIndexQueryStats) fastWriteField2(buf []byte, binaryWriter bthrift return offset } -func (p *TTableIndexQueryStats) field1Length() int { +func (p *TSyncQueryColumns) field1Length() int { l := 0 - if p.IsSetIndexName() { - l += bthrift.Binary.FieldBeginLength("index_name", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.IndexName) - + if p.IsSetHighPriorityColumns() { + l += bthrift.Binary.FieldBeginLength("highPriorityColumns", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.HighPriorityColumns)) + for _, v := range p.HighPriorityColumns { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableIndexQueryStats) field2Length() int { +func (p *TSyncQueryColumns) field2Length() int { l := 0 - if p.IsSetTableStats() { - l += bthrift.Binary.FieldBeginLength("table_stats", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableStats)) - for _, v := range p.TableStats { + if p.IsSetMidPriorityColumns() { + l += bthrift.Binary.FieldBeginLength("midPriorityColumns", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.MidPriorityColumns)) + for _, v := range p.MidPriorityColumns { l += v.BLength() } l += bthrift.Binary.ListEndLength() @@ -32892,7 +55202,7 @@ func (p *TTableIndexQueryStats) field2Length() int { return l } -func (p *TQueryStatsResult_) FastRead(buf []byte) (int, error) { +func (p *TFetchSplitBatchRequest) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -32915,7 +55225,7 @@ func (p *TQueryStatsResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -32929,7 +55239,7 @@ func (p *TQueryStatsResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -32942,23 +55252,165 @@ func (p *TQueryStatsResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 3: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 4: + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSplitBatchRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFetchSplitBatchRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SplitSourceId = &v + + } + return offset, nil +} + +func (p *TFetchSplitBatchRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxNumSplits = &v + + } + return offset, nil +} + +// for compatibility +func (p *TFetchSplitBatchRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFetchSplitBatchRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchSplitBatchRequest") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFetchSplitBatchRequest) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFetchSplitBatchRequest") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFetchSplitBatchRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSplitSourceId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "split_source_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.SplitSourceId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSplitBatchRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxNumSplits() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_num_splits", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxNumSplits) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFetchSplitBatchRequest) field1Length() int { + l := 0 + if p.IsSetSplitSourceId() { + l += bthrift.Binary.FieldBeginLength("split_source_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.SplitSourceId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSplitBatchRequest) field2Length() int { + l := 0 + if p.IsSetMaxNumSplits() { + l += bthrift.Binary.FieldBeginLength("max_num_splits", thrift.I32, 2) + l += bthrift.Binary.I32Length(*p.MaxNumSplits) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFetchSplitBatchResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: if fieldTypeId == thrift.LIST { - l, err = p.FastReadField4(buf[offset:]) + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -32970,9 +55422,9 @@ func (p *TQueryStatsResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField5(buf[offset:]) + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -33010,7 +55462,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryStatsResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchSplitBatchResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -33019,146 +55471,245 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TQueryStatsResult_) FastReadField1(buf []byte) (int, error) { +func (p *TFetchSplitBatchResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Splits = make([]*planner.TScanRangeLocations, 0, size) + for i := 0; i < size; i++ { + _elem := planner.NewTScanRangeLocations() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Splits = append(p.Splits, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp return offset, nil } -func (p *TQueryStatsResult_) FastReadField2(buf []byte) (int, error) { +func (p *TFetchSplitBatchResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err + } else { + offset += l } - p.SimpleResult_ = make(map[string]int64, size) - for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v + p.Status = tmp + return offset, nil +} - } +// for compatibility +func (p *TFetchSplitBatchResult_) FastWrite(buf []byte) int { + return 0 +} - var _val int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TFetchSplitBatchResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchSplitBatchResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - _val = v +func (p *TFetchSplitBatchResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFetchSplitBatchResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} +func (p *TFetchSplitBatchResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSplits() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "splits", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Splits { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.SimpleResult_[_key] = _val +func (p *TFetchSplitBatchResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 2) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return offset +} + +func (p *TFetchSplitBatchResult_) field1Length() int { + l := 0 + if p.IsSetSplits() { + l += bthrift.Binary.FieldBeginLength("splits", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Splits)) + for _, v := range p.Splits { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TQueryStatsResult_) FastReadField3(buf []byte) (int, error) { - offset := 0 +func (p *TFetchSplitBatchResult_) field2Length() int { + l := 0 + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 2) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *TFetchRunningQueriesResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err + goto ReadStructBeginError } - p.TableStats = make([]*TTableQueryStats, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTableQueryStats() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - p.TableStats = append(p.TableStats, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchRunningQueriesResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TQueryStatsResult_) FastReadField4(buf []byte) (int, error) { +func (p *TFetchRunningQueriesResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.TableVerbosStats = make([]*TTableIndexQueryStats, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTableIndexQueryStats() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.TableVerbosStats = append(p.TableVerbosStats, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.Status = tmp return offset, nil } -func (p *TQueryStatsResult_) FastReadField5(buf []byte) (int, error) { +func (p *TFetchRunningQueriesResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.TabletStats = make(map[int64]int64, size) + p.RunningQueries = make([]*types.TUniqueId, 0, size) for i := 0; i < size; i++ { - var _key int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _elem := types.NewTUniqueId() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _val = v - } - p.TabletStats[_key] = _val + p.RunningQueries = append(p.RunningQueries, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -33167,41 +55718,35 @@ func (p *TQueryStatsResult_) FastReadField5(buf []byte) (int, error) { } // for compatibility -func (p *TQueryStatsResult_) FastWrite(buf []byte) int { +func (p *TFetchRunningQueriesResult_) FastWrite(buf []byte) int { return 0 } -func (p *TQueryStatsResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchRunningQueriesResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryStatsResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchRunningQueriesResult") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TQueryStatsResult_) BLength() int { +func (p *TFetchRunningQueriesResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TQueryStatsResult") + l += bthrift.Binary.StructBeginLength("TFetchRunningQueriesResult") if p != nil { l += p.field1Length() l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TQueryStatsResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchRunningQueriesResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetStatus() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) @@ -33211,54 +55756,14 @@ func (p *TQueryStatsResult_) fastWriteField1(buf []byte, binaryWriter bthrift.Bi return offset } -func (p *TQueryStatsResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSimpleResult_() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "simple_result", thrift.MAP, 2) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I64, 0) - var length int - for k, v := range p.SimpleResult_ { - length++ - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteI64(buf[offset:], v) - - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.I64, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TQueryStatsResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableStats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_stats", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.TableStats { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TQueryStatsResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchRunningQueriesResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableVerbosStats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_verbos_stats", thrift.LIST, 4) + if p.IsSetRunningQueries() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "running_queries", thrift.LIST, 2) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.TableVerbosStats { + for _, v := range p.RunningQueries { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -33269,29 +55774,7 @@ func (p *TQueryStatsResult_) fastWriteField4(buf []byte, binaryWriter bthrift.Bi return offset } -func (p *TQueryStatsResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTabletStats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_stats", thrift.MAP, 5) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0) - var length int - for k, v := range p.TabletStats { - length++ - - offset += bthrift.Binary.WriteI64(buf[offset:], k) - - offset += bthrift.Binary.WriteI64(buf[offset:], v) - - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.I64, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TQueryStatsResult_) field1Length() int { +func (p *TFetchRunningQueriesResult_) field1Length() int { l := 0 if p.IsSetStatus() { l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) @@ -33301,67 +55784,98 @@ func (p *TQueryStatsResult_) field1Length() int { return l } -func (p *TQueryStatsResult_) field2Length() int { +func (p *TFetchRunningQueriesResult_) field2Length() int { l := 0 - if p.IsSetSimpleResult_() { - l += bthrift.Binary.FieldBeginLength("simple_result", thrift.MAP, 2) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I64, len(p.SimpleResult_)) - for k, v := range p.SimpleResult_ { - - l += bthrift.Binary.StringLengthNocopy(k) - - l += bthrift.Binary.I64Length(v) - + if p.IsSetRunningQueries() { + l += bthrift.Binary.FieldBeginLength("running_queries", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.RunningQueries)) + for _, v := range p.RunningQueries { + l += v.BLength() } - l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryStatsResult_) field3Length() int { - l := 0 - if p.IsSetTableStats() { - l += bthrift.Binary.FieldBeginLength("table_stats", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableStats)) - for _, v := range p.TableStats { - l += v.BLength() +func (p *TFetchRunningQueriesRequest) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TQueryStatsResult_) field4Length() int { - l := 0 - if p.IsSetTableVerbosStats() { - l += bthrift.Binary.FieldBeginLength("table_verbos_stats", thrift.LIST, 4) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableVerbosStats)) - for _, v := range p.TableVerbosStats { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +// for compatibility +func (p *TFetchRunningQueriesRequest) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFetchRunningQueriesRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchRunningQueriesRequest") + if p != nil { } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TQueryStatsResult_) field5Length() int { +func (p *TFetchRunningQueriesRequest) BLength() int { l := 0 - if p.IsSetTabletStats() { - l += bthrift.Binary.FieldBeginLength("tablet_stats", thrift.MAP, 5) - l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.TabletStats)) - var tmpK int64 - var tmpV int64 - l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.TabletStats) - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TFetchRunningQueriesRequest") + if p != nil { } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TGetBinlogRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetDbNamesArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -33384,7 +55898,7 @@ func (p *TGetBinlogRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -33397,79 +55911,122 @@ func (p *TGetBinlogRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *FrontendServiceGetDbNamesArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTGetDbsParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Params = tmp + return offset, nil +} + +// for compatibility +func (p *FrontendServiceGetDbNamesArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *FrontendServiceGetDbNamesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getDbNames_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *FrontendServiceGetDbNamesArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("getDbNames_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *FrontendServiceGetDbNamesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *FrontendServiceGetDbNamesArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *FrontendServiceGetDbNamesResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -33481,23 +56038,126 @@ func (p *TGetBinlogRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 8: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 9: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *FrontendServiceGetDbNamesResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := NewTGetDbsResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *FrontendServiceGetDbNamesResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *FrontendServiceGetDbNamesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getDbNames_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *FrontendServiceGetDbNamesResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("getDbNames_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *FrontendServiceGetDbNamesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *FrontendServiceGetDbNamesResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *FrontendServiceGetTableNamesArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -33535,7 +56195,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -33544,365 +56204,321 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogRequest) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetTableNamesArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTGetTablesParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Cluster = &v - } + p.Params = tmp return offset, nil } -func (p *TGetBinlogRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.User = &v - - } - return offset, nil +// for compatibility +func (p *FrontendServiceGetTableNamesArgs) FastWrite(buf []byte) int { + return 0 } -func (p *TGetBinlogRequest) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceGetTableNamesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Passwd = &v - + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTableNames_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return offset, nil + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TGetBinlogRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - +func (p *FrontendServiceGetTableNamesArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("getTableNames_args") + if p != nil { + l += p.field1Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TGetBinlogRequest) FastReadField5(buf []byte) (int, error) { +func (p *FrontendServiceGetTableNamesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Table = &v - - } - return offset, nil + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TGetBinlogRequest) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TableId = &v - - } - return offset, nil +func (p *FrontendServiceGetTableNamesArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TGetBinlogRequest) FastReadField7(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.UserIp = &v - +func (p *FrontendServiceGetTableNamesResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset, nil -} - -func (p *TGetBinlogRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - p.Token = &v + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogRequest) FastReadField9(buf []byte) (int, error) { +func (p *FrontendServiceGetTableNamesResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTGetTablesResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.PrevCommitSeq = &v - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TGetBinlogRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceGetTableNamesResult) FastWrite(buf []byte) int { return 0 } -func (p *TGetBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetTableNamesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBinlogRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTableNames_result") if p != nil { - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetBinlogRequest) BLength() int { +func (p *FrontendServiceGetTableNamesResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetBinlogRequest") + l += bthrift.Binary.StructBeginLength("getTableNames_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetTableNamesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceGetTableNamesResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TGetBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPasswd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceDescribeTableArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset -} -func (p *TGetBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return offset -} - -func (p *TGetBinlogRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset -} -func (p *TGetBinlogRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDescribeTableArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetUserIp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user_ip", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.UserIp) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTDescribeTableParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Params = tmp + return offset, nil } -func (p *TGetBinlogRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 8) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset +// for compatibility +func (p *FrontendServiceDescribeTableArgs) FastWrite(buf []byte) int { + return 0 } -func (p *TGetBinlogRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDescribeTableArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPrevCommitSeq() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "prev_commit_seq", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.PrevCommitSeq) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTable_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetBinlogRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogRequest) field2Length() int { - l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogRequest) field3Length() int { - l := 0 - if p.IsSetPasswd() { - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Passwd) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogRequest) field4Length() int { - l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogRequest) field5Length() int { - l := 0 - if p.IsSetTable() { - l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.Table) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogRequest) field6Length() int { - l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.TableId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogRequest) field7Length() int { +func (p *FrontendServiceDescribeTableArgs) BLength() int { l := 0 - if p.IsSetUserIp() { - l += bthrift.Binary.FieldBeginLength("user_ip", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(*p.UserIp) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("describeTable_args") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TGetBinlogRequest) field8Length() int { - l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 8) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - - l += bthrift.Binary.FieldEndLength() - } - return l +func (p *FrontendServiceDescribeTableArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TGetBinlogRequest) field9Length() int { +func (p *FrontendServiceDescribeTableArgs) field1Length() int { l := 0 - if p.IsSetPrevCommitSeq() { - l += bthrift.Binary.FieldBeginLength("prev_commit_seq", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.PrevCommitSeq) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TBinlog) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceDescribeTableResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -33924,121 +56540,9 @@ func (p *TBinlog) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField9(buf[offset:]) + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -34076,7 +56580,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlog[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -34085,394 +56589,325 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBinlog) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceDescribeTableResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTDescribeTableResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.CommitSeq = &v - } + p.Success = tmp return offset, nil } -func (p *TBinlog) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Timestamp = &v - - } - return offset, nil +// for compatibility +func (p *FrontendServiceDescribeTableResult) FastWrite(buf []byte) int { + return 0 } -func (p *TBinlog) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceDescribeTableResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTable_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - tmp := TBinlogType(v) - p.Type = &tmp - +func (p *FrontendServiceDescribeTableResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("describeTable_result") + if p != nil { + l += p.field0Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TBinlog) FastReadField4(buf []byte) (int, error) { +func (p *FrontendServiceDescribeTableResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DbId = &v - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TBinlog) FastReadField5(buf []byte) (int, error) { - offset := 0 +func (p *FrontendServiceDescribeTableResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *FrontendServiceDescribeTablesArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err + goto ReadStructBeginError } - p.TableIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - } - - p.TableIds = append(p.TableIds, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - } - return offset, nil -} - -func (p *TBinlog) FastReadField6(buf []byte) (int, error) { - offset := 0 + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l - p.Data = &v - + if err != nil { + goto ReadFieldEndError + } } - return offset, nil -} - -func (p *TBinlog) FastReadField7(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Belong = &v - + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset, nil -} - -func (p *TBinlog) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TableRef = &v - } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBinlog) FastReadField9(buf []byte) (int, error) { +func (p *FrontendServiceDescribeTablesArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + tmp := NewTDescribeTablesParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.RemoveEnableCache = &v - } + p.Params = tmp return offset, nil } // for compatibility -func (p *TBinlog) FastWrite(buf []byte) int { +func (p *FrontendServiceDescribeTablesArgs) FastWrite(buf []byte) int { return 0 } -func (p *TBinlog) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDescribeTablesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBinlog") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTables_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TBinlog) BLength() int { +func (p *FrontendServiceDescribeTablesArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TBinlog") + l += bthrift.Binary.StructBeginLength("describeTables_args") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TBinlog) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCommitSeq() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "commit_seq", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.CommitSeq) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TBinlog) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDescribeTablesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTimestamp() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "timestamp", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Timestamp) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TBinlog) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Type)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset +func (p *FrontendServiceDescribeTablesArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TBinlog) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceDescribeTablesResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset -} - -func (p *TBinlog) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableIds() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_ids", thrift.LIST, 5) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) - var length int - for _, v := range p.TableIds { - length++ - offset += bthrift.Binary.WriteI64(buf[offset:], v) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TBinlog) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetData() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Data) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TBinlog) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBelong() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "belong", thrift.I64, 7) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Belong) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TBinlog) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableRef() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_ref", thrift.I64, 8) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableRef) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TBinlog) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRemoveEnableCache() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remove_enable_cache", thrift.BOOL, 9) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.RemoveEnableCache) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TBinlog) field1Length() int { - l := 0 - if p.IsSetCommitSeq() { - l += bthrift.Binary.FieldBeginLength("commit_seq", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.CommitSeq) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TBinlog) field2Length() int { - l := 0 - if p.IsSetTimestamp() { - l += bthrift.Binary.FieldBeginLength("timestamp", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.Timestamp) - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l -} - -func (p *TBinlog) field3Length() int { - l := 0 - if p.IsSetType() { - l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 3) - l += bthrift.Binary.I32Length(int32(*p.Type)) - - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return l + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBinlog) field4Length() int { - l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.DbId) +func (p *FrontendServiceDescribeTablesResult) FastReadField0(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + tmp := NewTDescribeTablesResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + p.Success = tmp + return offset, nil } -func (p *TBinlog) field5Length() int { - l := 0 - if p.IsSetTableIds() { - l += bthrift.Binary.FieldBeginLength("table_ids", thrift.LIST, 5) - l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TableIds)) - var tmpV int64 - l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TableIds) - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l +// for compatibility +func (p *FrontendServiceDescribeTablesResult) FastWrite(buf []byte) int { + return 0 } -func (p *TBinlog) field6Length() int { - l := 0 - if p.IsSetData() { - l += bthrift.Binary.FieldBeginLength("data", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.Data) - - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceDescribeTablesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTables_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TBinlog) field7Length() int { +func (p *FrontendServiceDescribeTablesResult) BLength() int { l := 0 - if p.IsSetBelong() { - l += bthrift.Binary.FieldBeginLength("belong", thrift.I64, 7) - l += bthrift.Binary.I64Length(*p.Belong) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("describeTables_result") + if p != nil { + l += p.field0Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TBinlog) field8Length() int { - l := 0 - if p.IsSetTableRef() { - l += bthrift.Binary.FieldBeginLength("table_ref", thrift.I64, 8) - l += bthrift.Binary.I64Length(*p.TableRef) - - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceDescribeTablesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TBinlog) field9Length() int { +func (p *FrontendServiceDescribeTablesResult) field0Length() int { l := 0 - if p.IsSetRemoveEnableCache() { - l += bthrift.Binary.FieldBeginLength("remove_enable_cache", thrift.BOOL, 9) - l += bthrift.Binary.BoolLength(*p.RemoveEnableCache) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetBinlogResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceShowVariablesArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -34508,62 +56943,6 @@ func (p *TGetBinlogResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -34590,7 +56969,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -34599,245 +56978,326 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceShowVariablesArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTShowVariableRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp + p.Params = tmp return offset, nil } -func (p *TGetBinlogResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 +// for compatibility +func (p *FrontendServiceShowVariablesArgs) FastWrite(buf []byte) int { + return 0 +} - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.NextCommitSeq = &v +func (p *FrontendServiceShowVariablesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showVariables_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} +func (p *FrontendServiceShowVariablesArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("showVariables_args") + if p != nil { + l += p.field1Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TGetBinlogResult_) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceShowVariablesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *FrontendServiceShowVariablesArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *FrontendServiceShowVariablesResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err + goto ReadStructBeginError } - p.Binlogs = make([]*TBinlog, 0, size) - for i := 0; i < size; i++ { - _elem := NewTBinlog() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - p.Binlogs = append(p.Binlogs, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } } - return offset, nil -} - -func (p *TGetBinlogResult_) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FeVersion = &v - + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogResult_) FastReadField5(buf []byte) (int, error) { +func (p *FrontendServiceShowVariablesResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTShowVariableResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FeMetaVersion = &v - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TGetBinlogResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceShowVariablesResult) FastWrite(buf []byte) int { return 0 } -func (p *TGetBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowVariablesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBinlogResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showVariables_result") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetBinlogResult_) BLength() int { +func (p *FrontendServiceShowVariablesResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetBinlogResult") + l += bthrift.Binary.StructBeginLength("showVariables_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowVariablesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetBinlogResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetNextCommitSeq() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "next_commit_seq", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.NextCommitSeq) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceShowVariablesResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TGetBinlogResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBinlogs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "binlogs", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Binlogs { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceReportExecStatusArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset -} -func (p *TGetBinlogResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFeVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_version", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FeVersion) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return offset + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogResult_) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportExecStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetFeMetaVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_meta_version", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.FeMetaVersion) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTReportExecStatusParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Params = tmp + return offset, nil } -func (p *TGetBinlogResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l +// for compatibility +func (p *FrontendServiceReportExecStatusArgs) FastWrite(buf []byte) int { + return 0 } -func (p *TGetBinlogResult_) field2Length() int { - l := 0 - if p.IsSetNextCommitSeq() { - l += bthrift.Binary.FieldBeginLength("next_commit_seq", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.NextCommitSeq) - - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceReportExecStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "reportExecStatus_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TGetBinlogResult_) field3Length() int { +func (p *FrontendServiceReportExecStatusArgs) BLength() int { l := 0 - if p.IsSetBinlogs() { - l += bthrift.Binary.FieldBeginLength("binlogs", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Binlogs)) - for _, v := range p.Binlogs { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("reportExecStatus_args") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TGetBinlogResult_) field4Length() int { - l := 0 - if p.IsSetFeVersion() { - l += bthrift.Binary.FieldBeginLength("fe_version", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.FeVersion) - - l += bthrift.Binary.FieldEndLength() - } - return l +func (p *FrontendServiceReportExecStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TGetBinlogResult_) field5Length() int { +func (p *FrontendServiceReportExecStatusArgs) field1Length() int { l := 0 - if p.IsSetFeMetaVersion() { - l += bthrift.Binary.FieldBeginLength("fe_meta_version", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.FeMetaVersion) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TGetTabletReplicaInfosRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReportExecStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetTabletIds bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -34854,14 +57314,13 @@ func (p *TGetTabletReplicaInfosRequest) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField1(buf[offset:]) + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTabletIds = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -34889,113 +57348,82 @@ func (p *TGetTabletReplicaInfosRequest) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetTabletIds { - fieldId = 1 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTabletReplicaInfosRequest[fieldId])) } -func (p *TGetTabletReplicaInfosRequest) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceReportExecStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.TabletIds = make([]int64, 0, size) - for i := 0; i < size; i++ { - var _elem int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } - - p.TabletIds = append(p.TabletIds, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + tmp := NewTReportExecStatusResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.Success = tmp return offset, nil } // for compatibility -func (p *TGetTabletReplicaInfosRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceReportExecStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *TGetTabletReplicaInfosRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportExecStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTabletReplicaInfosRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "reportExecStatus_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetTabletReplicaInfosRequest) BLength() int { +func (p *FrontendServiceReportExecStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetTabletReplicaInfosRequest") + l += bthrift.Binary.StructBeginLength("reportExecStatus_result") if p != nil { - l += p.field1Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetTabletReplicaInfosRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportExecStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) - var length int - for _, v := range p.TabletIds { - length++ - offset += bthrift.Binary.WriteI64(buf[offset:], v) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TGetTabletReplicaInfosRequest) field1Length() int { +func (p *FrontendServiceReportExecStatusResult) field0Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds)) - var tmpV int64 - l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds) - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TGetTabletReplicaInfosResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFinishTaskArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -35031,34 +57459,6 @@ func (p *TGetTabletReplicaInfosResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -35085,7 +57485,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTabletReplicaInfosResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -35094,208 +57494,194 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetTabletReplicaInfosResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceFinishTaskArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := masterservice.NewTFinishTaskRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp + p.Request = tmp return offset, nil } -func (p *TGetTabletReplicaInfosResult_) FastReadField2(buf []byte) (int, error) { +// for compatibility +func (p *FrontendServiceFinishTaskArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *FrontendServiceFinishTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "finishTask_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err +func (p *FrontendServiceFinishTaskArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("finishTask_args") + if p != nil { + l += p.field1Length() } - p.TabletReplicaInfos = make(map[int64][]*types.TReplicaInfo, size) - for i := 0; i < size; i++ { - var _key int64 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} - _key = v +func (p *FrontendServiceFinishTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} - } +func (p *FrontendServiceFinishTaskArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *FrontendServiceFinishTaskResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l if err != nil { - return offset, err + goto ReadFieldBeginError } - _val := make([]*types.TReplicaInfo, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTReplicaInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - - _val = append(_val, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - p.TabletReplicaInfos[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetTabletReplicaInfosResult_) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceFinishTaskResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := masterservice.NewTMasterResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Token = &v - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TGetTabletReplicaInfosResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceFinishTaskResult) FastWrite(buf []byte) int { return 0 } -func (p *TGetTabletReplicaInfosResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFinishTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTabletReplicaInfosResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "finishTask_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetTabletReplicaInfosResult_) BLength() int { +func (p *FrontendServiceFinishTaskResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetTabletReplicaInfosResult") + l += bthrift.Binary.StructBeginLength("finishTask_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetTabletReplicaInfosResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetTabletReplicaInfosResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTabletReplicaInfos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_replica_infos", thrift.MAP, 2) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.LIST, 0) - var length int - for k, v := range p.TabletReplicaInfos { - length++ - - offset += bthrift.Binary.WriteI64(buf[offset:], k) - - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range v { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.LIST, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetTabletReplicaInfosResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFinishTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetTabletReplicaInfosResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetTabletReplicaInfosResult_) field2Length() int { - l := 0 - if p.IsSetTabletReplicaInfos() { - l += bthrift.Binary.FieldBeginLength("tablet_replica_infos", thrift.MAP, 2) - l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.LIST, len(p.TabletReplicaInfos)) - for k, v := range p.TabletReplicaInfos { - - l += bthrift.Binary.I64Length(k) - - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) - for _, v := range v { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetTabletReplicaInfosResult_) field3Length() int { +func (p *FrontendServiceFinishTaskResult) field0Length() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetSnapshotRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReportArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -35316,122 +57702,10 @@ func (p *TGetSnapshotRequest) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField9(buf[offset:]) + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -35469,7 +57743,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -35478,367 +57752,63 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetSnapshotRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Cluster = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.User = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Passwd = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Table = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField7(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LabelName = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.SnapshotName = &v - - } - return offset, nil -} - -func (p *TGetSnapshotRequest) FastReadField9(buf []byte) (int, error) { +func (p *FrontendServiceReportArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := masterservice.NewTReportRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TSnapshotType(v) - p.SnapshotType = &tmp - } + p.Request = tmp return offset, nil } // for compatibility -func (p *TGetSnapshotRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceReportArgs) FastWrite(buf []byte) int { return 0 } -func (p *TGetSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetSnapshotRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "report_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetSnapshotRequest) BLength() int { +func (p *FrontendServiceReportArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetSnapshotRequest") + l += bthrift.Binary.StructBeginLength("report_args") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetSnapshotRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPasswd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLabelName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label_name", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LabelName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSnapshotName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_name", thrift.STRING, 8) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SnapshotName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetSnapshotRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSnapshotType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_type", thrift.I32, 9) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.SnapshotType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TGetSnapshotRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field2Length() int { - l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field3Length() int { - l := 0 - if p.IsSetPasswd() { - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Passwd) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field4Length() int { - l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field5Length() int { - l := 0 - if p.IsSetTable() { - l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.Table) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field6Length() int { - l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field7Length() int { - l := 0 - if p.IsSetLabelName() { - l += bthrift.Binary.FieldBeginLength("label_name", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(*p.LabelName) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field8Length() int { - l := 0 - if p.IsSetSnapshotName() { - l += bthrift.Binary.FieldBeginLength("snapshot_name", thrift.STRING, 8) - l += bthrift.Binary.StringLengthNocopy(*p.SnapshotName) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetSnapshotRequest) field9Length() int { +func (p *FrontendServiceReportArgs) field1Length() int { l := 0 - if p.IsSetSnapshotType() { - l += bthrift.Binary.FieldBeginLength("snapshot_type", thrift.I32, 9) - l += bthrift.Binary.I32Length(int32(*p.SnapshotType)) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TGetSnapshotResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReportResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -35860,37 +57830,9 @@ func (p *TGetSnapshotResult_) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -35928,7 +57870,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetSnapshotResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -35937,143 +57879,144 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetSnapshotResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceReportResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := masterservice.NewTMasterResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp - return offset, nil -} - -func (p *TGetSnapshotResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Meta = []byte(v) - - } - return offset, nil -} - -func (p *TGetSnapshotResult_) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.JobInfo = []byte(v) - - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TGetSnapshotResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceReportResult) FastWrite(buf []byte) int { return 0 } -func (p *TGetSnapshotResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetSnapshotResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "report_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetSnapshotResult_) BLength() int { +func (p *FrontendServiceReportResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetSnapshotResult") + l += bthrift.Binary.StructBeginLength("report_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetSnapshotResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetSnapshotResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetMeta() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta", thrift.STRING, 2) - offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Meta)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceReportResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TGetSnapshotResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetJobInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_info", thrift.STRING, 3) - offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.JobInfo)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceFetchResourceArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset -} -func (p *TGetSnapshotResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetSnapshotResult_) field2Length() int { - l := 0 - if p.IsSetMeta() { - l += bthrift.Binary.FieldBeginLength("meta", thrift.STRING, 2) - l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Meta)) +// for compatibility +func (p *FrontendServiceFetchResourceArgs) FastWrite(buf []byte) int { + return 0 +} - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceFetchResourceArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchResource_args") + if p != nil { } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TGetSnapshotResult_) field3Length() int { +func (p *FrontendServiceFetchResourceArgs) BLength() int { l := 0 - if p.IsSetJobInfo() { - l += bthrift.Binary.FieldBeginLength("job_info", thrift.STRING, 3) - l += bthrift.Binary.BinaryLengthNocopy([]byte(p.JobInfo)) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("fetchResource_args") + if p != nil { } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TTableRef) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchResourceResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -36095,23 +58038,9 @@ func (p *TTableRef) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -36149,7 +58078,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableRef[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchResourceResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -36158,106 +58087,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableRef) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Table = &v - - } - return offset, nil -} - -func (p *TTableRef) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceFetchResourceResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := masterservice.NewTFetchResourceResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.AliasName = &v - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TTableRef) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchResourceResult) FastWrite(buf []byte) int { return 0 } -func (p *TTableRef) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchResourceResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableRef") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchResource_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTableRef) BLength() int { +func (p *FrontendServiceFetchResourceResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTableRef") + l += bthrift.Binary.StructBeginLength("fetchResource_result") if p != nil { - l += p.field1Length() - l += p.field3Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTableRef) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TTableRef) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchResourceResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAliasName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "alias_name", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AliasName) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableRef) field1Length() int { - l := 0 - if p.IsSetTable() { - l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Table) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TTableRef) field3Length() int { +func (p *FrontendServiceFetchResourceResult) field0Length() int { l := 0 - if p.IsSetAliasName() { - l += bthrift.Binary.FieldBeginLength("alias_name", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.AliasName) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRestoreSnapshotRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceForwardArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -36280,7 +58170,7 @@ func (p *TRestoreSnapshotRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -36293,160 +58183,6 @@ func (p *TRestoreSnapshotRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField10(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -36473,7 +58209,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -36482,547 +58218,579 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Cluster = &v - - } - return offset, nil -} - -func (p *TRestoreSnapshotRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.User = &v - - } - return offset, nil -} - -func (p *TRestoreSnapshotRequest) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceForwardArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTMasterOpRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Passwd = &v - } + p.Params = tmp return offset, nil } -func (p *TRestoreSnapshotRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Db = &v - - } - return offset, nil +// for compatibility +func (p *FrontendServiceForwardArgs) FastWrite(buf []byte) int { + return 0 } -func (p *TRestoreSnapshotRequest) FastReadField5(buf []byte) (int, error) { +func (p *FrontendServiceForwardArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Table = &v - + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "forward_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return offset, nil + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TRestoreSnapshotRequest) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v - +func (p *FrontendServiceForwardArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("forward_args") + if p != nil { + l += p.field1Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TRestoreSnapshotRequest) FastReadField7(buf []byte) (int, error) { +func (p *FrontendServiceForwardArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LabelName = &v - - } - return offset, nil + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TRestoreSnapshotRequest) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.RepoName = &v - - } - return offset, nil +func (p *FrontendServiceForwardArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TRestoreSnapshotRequest) FastReadField9(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *FrontendServiceForwardResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err + goto ReadStructBeginError } - p.TableRefs = make([]*TTableRef, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTableRef() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.TableRefs = append(p.TableRefs, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - } - return offset, nil -} - -func (p *TRestoreSnapshotRequest) FastReadField10(buf []byte) (int, error) { - offset := 0 - - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Properties = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - + if err != nil { + goto ReadFieldBeginError } - - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l - - _val = v - + if err != nil { + goto SkipFieldError + } } - p.Properties[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TRestoreSnapshotRequest) FastReadField11(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l - - p.Meta = []byte(v) - + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) FastReadField12(buf []byte) (int, error) { +func (p *FrontendServiceForwardResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { + tmp := NewTMasterOpResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.JobInfo = []byte(v) - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TRestoreSnapshotRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceForwardResult) FastWrite(buf []byte) int { return 0 } -func (p *TRestoreSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceForwardResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRestoreSnapshotRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "forward_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TRestoreSnapshotRequest) BLength() int { +func (p *FrontendServiceForwardResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TRestoreSnapshotRequest") + l += bthrift.Binary.StructBeginLength("forward_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TRestoreSnapshotRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceForwardResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRestoreSnapshotRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceForwardResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TRestoreSnapshotRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPasswd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "passwd", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Passwd) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceListTableStatusArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return offset -} -func (p *TRestoreSnapshotRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDb() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Db) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return offset + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTableStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTGetTablesParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Params = tmp + return offset, nil } -func (p *TRestoreSnapshotRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 6) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset +// for compatibility +func (p *FrontendServiceListTableStatusArgs) FastWrite(buf []byte) int { + return 0 } -func (p *TRestoreSnapshotRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTableStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLabelName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "label_name", thrift.STRING, 7) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.LabelName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableStatus_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TRestoreSnapshotRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRepoName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "repo_name", thrift.STRING, 8) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RepoName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *FrontendServiceListTableStatusArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("listTableStatus_args") + if p != nil { + l += p.field1Length() } - return offset + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TRestoreSnapshotRequest) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTableStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableRefs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_refs", thrift.LIST, 9) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.TableRefs { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TRestoreSnapshotRequest) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetProperties() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 10) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) - var length int - for k, v := range p.Properties { - length++ +func (p *FrontendServiceListTableStatusArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) +func (p *FrontendServiceListTableStatusResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset -} - -func (p *TRestoreSnapshotRequest) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetMeta() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta", thrift.STRING, 11) - offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Meta)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTableStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - if p.IsSetJobInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_info", thrift.STRING, 12) - offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.JobInfo)) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTListTableStatusResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Success = tmp + return offset, nil } -func (p *TRestoreSnapshotRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l +// for compatibility +func (p *FrontendServiceListTableStatusResult) FastWrite(buf []byte) int { + return 0 } -func (p *TRestoreSnapshotRequest) field2Length() int { - l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) - - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceListTableStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableStatus_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TRestoreSnapshotRequest) field3Length() int { +func (p *FrontendServiceListTableStatusResult) BLength() int { l := 0 - if p.IsSetPasswd() { - l += bthrift.Binary.FieldBeginLength("passwd", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Passwd) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("listTableStatus_result") + if p != nil { + l += p.field0Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TRestoreSnapshotRequest) field4Length() int { - l := 0 - if p.IsSetDb() { - l += bthrift.Binary.FieldBeginLength("db", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Db) - - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceListTableStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TRestoreSnapshotRequest) field5Length() int { +func (p *FrontendServiceListTableStatusResult) field0Length() int { l := 0 - if p.IsSetTable() { - l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.Table) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRestoreSnapshotRequest) field6Length() int { - l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 6) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceListTableMetadataNameIdsArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return l -} -func (p *TRestoreSnapshotRequest) field7Length() int { - l := 0 - if p.IsSetLabelName() { - l += bthrift.Binary.FieldBeginLength("label_name", thrift.STRING, 7) - l += bthrift.Binary.StringLengthNocopy(*p.LabelName) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l -} - -func (p *TRestoreSnapshotRequest) field8Length() int { - l := 0 - if p.IsSetRepoName() { - l += bthrift.Binary.FieldBeginLength("repo_name", thrift.STRING, 8) - l += bthrift.Binary.StringLengthNocopy(*p.RepoName) - - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return l -} -func (p *TRestoreSnapshotRequest) field9Length() int { - l := 0 - if p.IsSetTableRefs() { - l += bthrift.Binary.FieldBeginLength("table_refs", thrift.LIST, 9) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TableRefs)) - for _, v := range p.TableRefs { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotRequest) field10Length() int { - l := 0 - if p.IsSetProperties() { - l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 10) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) - for k, v := range p.Properties { +func (p *FrontendServiceListTableMetadataNameIdsArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.StringLengthNocopy(k) + tmp := NewTGetTablesParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Params = tmp + return offset, nil +} - l += bthrift.Binary.StringLengthNocopy(v) +// for compatibility +func (p *FrontendServiceListTableMetadataNameIdsArgs) FastWrite(buf []byte) int { + return 0 +} - } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *FrontendServiceListTableMetadataNameIdsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableMetadataNameIds_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TRestoreSnapshotRequest) field11Length() int { +func (p *FrontendServiceListTableMetadataNameIdsArgs) BLength() int { l := 0 - if p.IsSetMeta() { - l += bthrift.Binary.FieldBeginLength("meta", thrift.STRING, 11) - l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Meta)) - - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("listTableMetadataNameIds_args") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TRestoreSnapshotRequest) field12Length() int { - l := 0 - if p.IsSetJobInfo() { - l += bthrift.Binary.FieldBeginLength("job_info", thrift.STRING, 12) - l += bthrift.Binary.BinaryLengthNocopy([]byte(p.JobInfo)) +func (p *FrontendServiceListTableMetadataNameIdsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} - l += bthrift.Binary.FieldEndLength() - } +func (p *FrontendServiceListTableMetadataNameIdsArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TRestoreSnapshotResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListTableMetadataNameIdsResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -37044,9 +58812,9 @@ func (p *TRestoreSnapshotResult_) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -37084,7 +58852,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRestoreSnapshotResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -37093,67 +58861,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TRestoreSnapshotResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceListTableMetadataNameIdsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTListTableMetadataNameIdsResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp + p.Success = tmp return offset, nil } // for compatibility -func (p *TRestoreSnapshotResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceListTableMetadataNameIdsResult) FastWrite(buf []byte) int { return 0 } -func (p *TRestoreSnapshotResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTableMetadataNameIdsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRestoreSnapshotResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableMetadataNameIds_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TRestoreSnapshotResult_) BLength() int { +func (p *FrontendServiceListTableMetadataNameIdsResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TRestoreSnapshotResult") + l += bthrift.Binary.StructBeginLength("listTableMetadataNameIds_result") if p != nil { - l += p.field1Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TRestoreSnapshotResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTableMetadataNameIdsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TRestoreSnapshotResult_) field1Length() int { +func (p *FrontendServiceListTableMetadataNameIdsResult) field0Length() int { l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetMasterTokenRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListTablePrivilegeStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -37176,7 +58944,7 @@ func (p *TGetMasterTokenRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -37189,34 +58957,6 @@ func (p *TGetMasterTokenRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -37243,7 +58983,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -37252,143 +58992,63 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetMasterTokenRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Cluster = &v - - } - return offset, nil -} - -func (p *TGetMasterTokenRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.User = &v - - } - return offset, nil -} - -func (p *TGetMasterTokenRequest) FastReadField3(buf []byte) (int, error) { +func (p *FrontendServiceListTablePrivilegeStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTGetTablesParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Password = &v - } + p.Params = tmp return offset, nil } // for compatibility -func (p *TGetMasterTokenRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceListTablePrivilegeStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *TGetMasterTokenRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTablePrivilegeStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMasterTokenRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTablePrivilegeStatus_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetMasterTokenRequest) BLength() int { +func (p *FrontendServiceListTablePrivilegeStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetMasterTokenRequest") + l += bthrift.Binary.StructBeginLength("listTablePrivilegeStatus_args") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetMasterTokenRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCluster() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetMasterTokenRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetUser() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "user", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.User) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetMasterTokenRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTablePrivilegeStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPassword() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "password", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Password) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TGetMasterTokenRequest) field1Length() int { - l := 0 - if p.IsSetCluster() { - l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Cluster) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetMasterTokenRequest) field2Length() int { - l := 0 - if p.IsSetUser() { - l += bthrift.Binary.FieldBeginLength("user", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.User) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetMasterTokenRequest) field3Length() int { +func (p *FrontendServiceListTablePrivilegeStatusArgs) field1Length() int { l := 0 - if p.IsSetPassword() { - l += bthrift.Binary.FieldBeginLength("password", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Password) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TGetMasterTokenResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListTablePrivilegeStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -37410,23 +59070,9 @@ func (p *TGetMasterTokenResult_) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -37464,7 +59110,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetMasterTokenResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -37473,104 +59119,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetMasterTokenResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceListTablePrivilegeStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTListPrivilegesResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp - return offset, nil -} - -func (p *TGetMasterTokenResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Token = &v - - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TGetMasterTokenResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceListTablePrivilegeStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *TGetMasterTokenResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTablePrivilegeStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetMasterTokenResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTablePrivilegeStatus_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGetMasterTokenResult_) BLength() int { +func (p *FrontendServiceListTablePrivilegeStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetMasterTokenResult") + l += bthrift.Binary.StructBeginLength("listTablePrivilegeStatus_result") if p != nil { - l += p.field1Length() - l += p.field2Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetMasterTokenResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetMasterTokenResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListTablePrivilegeStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetToken() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGetMasterTokenResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetMasterTokenResult_) field2Length() int { +func (p *FrontendServiceListTablePrivilegeStatusResult) field0Length() int { l := 0 - if p.IsSetToken() { - l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Token) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TGetBinlogLagResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -37606,20 +59215,6 @@ func (p *TGetBinlogLagResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -37646,7 +59241,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetBinlogLagResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -37655,42 +59250,28 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGetBinlogLagResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTGetTablesParams() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp - return offset, nil -} - -func (p *TGetBinlogLagResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Lag = &v - - } + p.Params = tmp return offset, nil } // for compatibility -func (p *TGetBinlogLagResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *TGetBinlogLagResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetBinlogLagResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listSchemaPrivilegeStatus_args") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -37698,61 +59279,34 @@ func (p *TGetBinlogLagResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift. return offset } -func (p *TGetBinlogLagResult_) BLength() int { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGetBinlogLagResult") + l += bthrift.Binary.StructBeginLength("listSchemaPrivilegeStatus_args") if p != nil { l += p.field1Length() - l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGetBinlogLagResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TGetBinlogLagResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLag() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lag", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Lag) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TGetBinlogLagResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TGetBinlogLagResult_) field2Length() int { +func (p *FrontendServiceListSchemaPrivilegeStatusArgs) field1Length() int { l := 0 - if p.IsSetLag() { - l += bthrift.Binary.FieldBeginLength("lag", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.Lag) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TUpdateFollowerStatsCacheRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -37774,23 +59328,9 @@ func (p *TUpdateFollowerStatsCacheRequest) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField2(buf[offset:]) + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -37828,7 +59368,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUpdateFollowerStatsCacheRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -37837,131 +59377,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TUpdateFollowerStatsCacheRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Key = &v - - } - return offset, nil -} - -func (p *TUpdateFollowerStatsCacheRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.StatsRows = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } - - p.StatsRows = append(p.StatsRows, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := NewTListPrivilegesResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.Success = tmp return offset, nil } // for compatibility -func (p *TUpdateFollowerStatsCacheRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *TUpdateFollowerStatsCacheRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUpdateFollowerStatsCacheRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listSchemaPrivilegeStatus_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TUpdateFollowerStatsCacheRequest) BLength() int { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TUpdateFollowerStatsCacheRequest") + l += bthrift.Binary.StructBeginLength("listSchemaPrivilegeStatus_result") if p != nil { - l += p.field1Length() - l += p.field2Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TUpdateFollowerStatsCacheRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetKey() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "key", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Key) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TUpdateFollowerStatsCacheRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "statsRows", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.StatsRows { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TUpdateFollowerStatsCacheRequest) field1Length() int { +func (p *FrontendServiceListSchemaPrivilegeStatusResult) field0Length() int { l := 0 - if p.IsSetKey() { - l += bthrift.Binary.FieldBeginLength("key", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Key) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TUpdateFollowerStatsCacheRequest) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("statsRows", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.StatsRows)) - for _, v := range p.StatsRows { - l += bthrift.Binary.StringLengthNocopy(v) - - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TAutoIncrementRangeRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListUserPrivilegeStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -37984,7 +59460,7 @@ func (p *TAutoIncrementRangeRequest) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -37997,62 +59473,6 @@ func (p *TAutoIncrementRangeRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -38079,7 +59499,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -38088,217 +59508,63 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TAutoIncrementRangeRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DbId = &v - - } - return offset, nil -} - -func (p *TAutoIncrementRangeRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TableId = &v - - } - return offset, nil -} - -func (p *TAutoIncrementRangeRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ColumnId = &v - - } - return offset, nil -} - -func (p *TAutoIncrementRangeRequest) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Length = &v - - } - return offset, nil -} - -func (p *TAutoIncrementRangeRequest) FastReadField5(buf []byte) (int, error) { +func (p *FrontendServiceListUserPrivilegeStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTGetTablesParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.LowerBound = &v - } + p.Params = tmp return offset, nil } // for compatibility -func (p *TAutoIncrementRangeRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceListUserPrivilegeStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *TAutoIncrementRangeRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListUserPrivilegeStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAutoIncrementRangeRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listUserPrivilegeStatus_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAutoIncrementRangeRequest) BLength() int { +func (p *FrontendServiceListUserPrivilegeStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAutoIncrementRangeRequest") + l += bthrift.Binary.StructBeginLength("listUserPrivilegeStatus_args") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAutoIncrementRangeRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAutoIncrementRangeRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAutoIncrementRangeRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetColumnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_id", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ColumnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAutoIncrementRangeRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLength() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "length", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Length) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAutoIncrementRangeRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListUserPrivilegeStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLowerBound() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lower_bound", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LowerBound) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAutoIncrementRangeRequest) field1Length() int { - l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.DbId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAutoIncrementRangeRequest) field2Length() int { - l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.TableId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAutoIncrementRangeRequest) field3Length() int { - l := 0 - if p.IsSetColumnId() { - l += bthrift.Binary.FieldBeginLength("column_id", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.ColumnId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAutoIncrementRangeRequest) field4Length() int { - l := 0 - if p.IsSetLength() { - l += bthrift.Binary.FieldBeginLength("length", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.Length) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAutoIncrementRangeRequest) field5Length() int { +func (p *FrontendServiceListUserPrivilegeStatusArgs) field1Length() int { l := 0 - if p.IsSetLowerBound() { - l += bthrift.Binary.FieldBeginLength("lower_bound", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.LowerBound) - - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) + l += p.Params.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TAutoIncrementRangeResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceListUserPrivilegeStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -38320,37 +59586,9 @@ func (p *TAutoIncrementRangeResult_) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField3(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -38388,7 +59626,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAutoIncrementRangeResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -38397,141 +59635,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TAutoIncrementRangeResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceListUserPrivilegeStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTListPrivilegesResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp - return offset, nil -} - -func (p *TAutoIncrementRangeResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Start = &v - - } - return offset, nil -} - -func (p *TAutoIncrementRangeResult_) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Length = &v - - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TAutoIncrementRangeResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceListUserPrivilegeStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *TAutoIncrementRangeResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListUserPrivilegeStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAutoIncrementRangeResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listUserPrivilegeStatus_result") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAutoIncrementRangeResult_) BLength() int { +func (p *FrontendServiceListUserPrivilegeStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAutoIncrementRangeResult") + l += bthrift.Binary.StructBeginLength("listUserPrivilegeStatus_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAutoIncrementRangeResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAutoIncrementRangeResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStart() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "start", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Start) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAutoIncrementRangeResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceListUserPrivilegeStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLength() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "length", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Length) - + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAutoIncrementRangeResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAutoIncrementRangeResult_) field2Length() int { - l := 0 - if p.IsSetStart() { - l += bthrift.Binary.FieldBeginLength("start", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.Start) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAutoIncrementRangeResult_) field3Length() int { +func (p *FrontendServiceListUserPrivilegeStatusResult) field0Length() int { l := 0 - if p.IsSetLength() { - l += bthrift.Binary.FieldBeginLength("length", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.Length) - + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TCreatePartitionRequest) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceUpdateExportTaskStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -38548,56 +59712,14 @@ func (p *TCreatePartitionRequest) FastRead(buf []byte) (int, error) { offset += l if err != nil { goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField4(buf[offset:]) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -38635,7 +59757,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionRequest[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -38644,231 +59766,63 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCreatePartitionRequest) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TxnId = &v - - } - return offset, nil -} - -func (p *TCreatePartitionRequest) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DbId = &v - - } - return offset, nil -} - -func (p *TCreatePartitionRequest) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TableId = &v - - } - return offset, nil -} - -func (p *TCreatePartitionRequest) FastReadField4(buf []byte) (int, error) { +func (p *FrontendServiceUpdateExportTaskStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PartitionValues = make([][]*exprs.TStringLiteral, 0, size) - for i := 0; i < size; i++ { - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - _elem := make([]*exprs.TStringLiteral, 0, size) - for i := 0; i < size; i++ { - _elem1 := exprs.NewTStringLiteral() - if l, err := _elem1.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - _elem = append(_elem, _elem1) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.PartitionValues = append(p.PartitionValues, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + tmp := NewTUpdateExportTaskStatusRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.Request = tmp return offset, nil } // for compatibility -func (p *TCreatePartitionRequest) FastWrite(buf []byte) int { +func (p *FrontendServiceUpdateExportTaskStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *TCreatePartitionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateExportTaskStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreatePartitionRequest") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateExportTaskStatus_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCreatePartitionRequest) BLength() int { +func (p *FrontendServiceUpdateExportTaskStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCreatePartitionRequest") + l += bthrift.Binary.StructBeginLength("updateExportTaskStatus_args") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TCreatePartitionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCreatePartitionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCreatePartitionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCreatePartitionRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateExportTaskStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPartitionValues() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitionValues", thrift.LIST, 4) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) - var length int - for _, v := range p.PartitionValues { - length++ - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range v { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TCreatePartitionRequest) field1Length() int { - l := 0 - if p.IsSetTxnId() { - l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.TxnId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCreatePartitionRequest) field2Length() int { - l := 0 - if p.IsSetDbId() { - l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(*p.DbId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCreatePartitionRequest) field3Length() int { - l := 0 - if p.IsSetTableId() { - l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.TableId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCreatePartitionRequest) field4Length() int { +func (p *FrontendServiceUpdateExportTaskStatusArgs) field1Length() int { l := 0 - if p.IsSetPartitionValues() { - l += bthrift.Binary.FieldBeginLength("partitionValues", thrift.LIST, 4) - l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.PartitionValues)) - for _, v := range p.PartitionValues { - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) - for _, v := range v { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TCreatePartitionResult_) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceUpdateExportTaskStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -38890,51 +59844,9 @@ func (p *TCreatePartitionResult_) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField4(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -38972,7 +59884,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCreatePartitionResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -38981,250 +59893,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TCreatePartitionResult_) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceUpdateExportTaskStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTFeResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp - return offset, nil -} - -func (p *TCreatePartitionResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Partitions = make([]*descriptors.TOlapTablePartition, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTOlapTablePartition() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.Partitions = append(p.Partitions, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TCreatePartitionResult_) FastReadField3(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Tablets = make([]*descriptors.TTabletLocation, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTTabletLocation() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.Tablets = append(p.Tablets, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TCreatePartitionResult_) FastReadField4(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.Nodes = make([]*descriptors.TNodeInfo, 0, size) - for i := 0; i < size; i++ { - _elem := descriptors.NewTNodeInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.Nodes = append(p.Nodes, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + p.Success = tmp return offset, nil } // for compatibility -func (p *TCreatePartitionResult_) FastWrite(buf []byte) int { +func (p *FrontendServiceUpdateExportTaskStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *TCreatePartitionResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateExportTaskStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreatePartitionResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateExportTaskStatus_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCreatePartitionResult_) BLength() int { +func (p *FrontendServiceUpdateExportTaskStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCreatePartitionResult") + l += bthrift.Binary.StructBeginLength("updateExportTaskStatus_result") if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TCreatePartitionResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCreatePartitionResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPartitions() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Partitions { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCreatePartitionResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTablets() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Tablets { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TCreatePartitionResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateExportTaskStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNodes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "nodes", thrift.LIST, 4) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Nodes { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TCreatePartitionResult_) field1Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCreatePartitionResult_) field2Length() int { - l := 0 - if p.IsSetPartitions() { - l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions)) - for _, v := range p.Partitions { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCreatePartitionResult_) field3Length() int { - l := 0 - if p.IsSetTablets() { - l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tablets)) - for _, v := range p.Tablets { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCreatePartitionResult_) field4Length() int { +func (p *FrontendServiceUpdateExportTaskStatusResult) field0Length() int { l := 0 - if p.IsSetNodes() { - l += bthrift.Binary.FieldBeginLength("nodes", thrift.LIST, 4) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Nodes)) - for _, v := range p.Nodes { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *FrontendServiceGetDbNamesArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnBeginArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -39286,7 +60015,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -39295,27 +60024,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetDbNamesArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnBeginArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetDbsParams() + tmp := NewTLoadTxnBeginRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceGetDbNamesArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnBeginArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetDbNamesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnBeginArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getDbNames_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnBegin_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -39324,9 +60053,9 @@ func (p *FrontendServiceGetDbNamesArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceGetDbNamesArgs) BLength() int { +func (p *FrontendServiceLoadTxnBeginArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getDbNames_args") + l += bthrift.Binary.StructBeginLength("loadTxnBegin_args") if p != nil { l += p.field1Length() } @@ -39335,23 +60064,23 @@ func (p *FrontendServiceGetDbNamesArgs) BLength() int { return l } -func (p *FrontendServiceGetDbNamesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnBeginArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceGetDbNamesArgs) field1Length() int { +func (p *FrontendServiceLoadTxnBeginArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceGetDbNamesResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnBeginResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -39413,7 +60142,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetDbNamesResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -39422,10 +60151,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetDbNamesResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnBeginResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetDbsResult_() + tmp := NewTLoadTxnBeginResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -39436,13 +60165,13 @@ func (p *FrontendServiceGetDbNamesResult) FastReadField0(buf []byte) (int, error } // for compatibility -func (p *FrontendServiceGetDbNamesResult) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnBeginResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetDbNamesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnBeginResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getDbNames_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnBegin_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -39451,9 +60180,9 @@ func (p *FrontendServiceGetDbNamesResult) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *FrontendServiceGetDbNamesResult) BLength() int { +func (p *FrontendServiceLoadTxnBeginResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getDbNames_result") + l += bthrift.Binary.StructBeginLength("loadTxnBegin_result") if p != nil { l += p.field0Length() } @@ -39462,7 +60191,7 @@ func (p *FrontendServiceGetDbNamesResult) BLength() int { return l } -func (p *FrontendServiceGetDbNamesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnBeginResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -39472,7 +60201,7 @@ func (p *FrontendServiceGetDbNamesResult) fastWriteField0(buf []byte, binaryWrit return offset } -func (p *FrontendServiceGetDbNamesResult) field0Length() int { +func (p *FrontendServiceLoadTxnBeginResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -39482,7 +60211,7 @@ func (p *FrontendServiceGetDbNamesResult) field0Length() int { return l } -func (p *FrontendServiceGetTableNamesArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnPreCommitArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -39544,7 +60273,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -39553,27 +60282,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTableNamesArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnPreCommitArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesParams() + tmp := NewTLoadTxnCommitRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceGetTableNamesArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnPreCommitArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetTableNamesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnPreCommitArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTableNames_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnPreCommit_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -39582,9 +60311,9 @@ func (p *FrontendServiceGetTableNamesArgs) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceGetTableNamesArgs) BLength() int { +func (p *FrontendServiceLoadTxnPreCommitArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getTableNames_args") + l += bthrift.Binary.StructBeginLength("loadTxnPreCommit_args") if p != nil { l += p.field1Length() } @@ -39593,23 +60322,23 @@ func (p *FrontendServiceGetTableNamesArgs) BLength() int { return l } -func (p *FrontendServiceGetTableNamesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnPreCommitArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceGetTableNamesArgs) field1Length() int { +func (p *FrontendServiceLoadTxnPreCommitArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceGetTableNamesResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnPreCommitResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -39671,7 +60400,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTableNamesResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -39680,10 +60409,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTableNamesResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnPreCommitResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesResult_() + tmp := NewTLoadTxnCommitResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -39694,13 +60423,13 @@ func (p *FrontendServiceGetTableNamesResult) FastReadField0(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceGetTableNamesResult) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnPreCommitResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetTableNamesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnPreCommitResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTableNames_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnPreCommit_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -39709,9 +60438,9 @@ func (p *FrontendServiceGetTableNamesResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceGetTableNamesResult) BLength() int { +func (p *FrontendServiceLoadTxnPreCommitResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getTableNames_result") + l += bthrift.Binary.StructBeginLength("loadTxnPreCommit_result") if p != nil { l += p.field0Length() } @@ -39720,7 +60449,7 @@ func (p *FrontendServiceGetTableNamesResult) BLength() int { return l } -func (p *FrontendServiceGetTableNamesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnPreCommitResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -39730,7 +60459,7 @@ func (p *FrontendServiceGetTableNamesResult) fastWriteField0(buf []byte, binaryW return offset } -func (p *FrontendServiceGetTableNamesResult) field0Length() int { +func (p *FrontendServiceLoadTxnPreCommitResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -39740,7 +60469,7 @@ func (p *FrontendServiceGetTableNamesResult) field0Length() int { return l } -func (p *FrontendServiceDescribeTableArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxn2PCArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -39802,7 +60531,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -39811,27 +60540,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTableArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxn2PCArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTDescribeTableParams() + tmp := NewTLoadTxn2PCRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceDescribeTableArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxn2PCArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceDescribeTableArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxn2PCArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTable_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxn2PC_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -39840,9 +60569,9 @@ func (p *FrontendServiceDescribeTableArgs) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceDescribeTableArgs) BLength() int { +func (p *FrontendServiceLoadTxn2PCArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("describeTable_args") + l += bthrift.Binary.StructBeginLength("loadTxn2PC_args") if p != nil { l += p.field1Length() } @@ -39851,23 +60580,23 @@ func (p *FrontendServiceDescribeTableArgs) BLength() int { return l } -func (p *FrontendServiceDescribeTableArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxn2PCArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceDescribeTableArgs) field1Length() int { +func (p *FrontendServiceLoadTxn2PCArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceDescribeTableResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxn2PCResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -39929,7 +60658,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTableResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -39938,10 +60667,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTableResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxn2PCResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTDescribeTableResult_() + tmp := NewTLoadTxn2PCResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -39952,13 +60681,13 @@ func (p *FrontendServiceDescribeTableResult) FastReadField0(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceDescribeTableResult) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxn2PCResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceDescribeTableResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxn2PCResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTable_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxn2PC_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -39967,9 +60696,9 @@ func (p *FrontendServiceDescribeTableResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceDescribeTableResult) BLength() int { +func (p *FrontendServiceLoadTxn2PCResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("describeTable_result") + l += bthrift.Binary.StructBeginLength("loadTxn2PC_result") if p != nil { l += p.field0Length() } @@ -39978,7 +60707,7 @@ func (p *FrontendServiceDescribeTableResult) BLength() int { return l } -func (p *FrontendServiceDescribeTableResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxn2PCResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -39988,7 +60717,7 @@ func (p *FrontendServiceDescribeTableResult) fastWriteField0(buf []byte, binaryW return offset } -func (p *FrontendServiceDescribeTableResult) field0Length() int { +func (p *FrontendServiceLoadTxn2PCResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -39998,7 +60727,7 @@ func (p *FrontendServiceDescribeTableResult) field0Length() int { return l } -func (p *FrontendServiceDescribeTablesArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnCommitArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40060,7 +60789,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40069,27 +60798,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTablesArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnCommitArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTDescribeTablesParams() + tmp := NewTLoadTxnCommitRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceDescribeTablesArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnCommitArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceDescribeTablesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnCommitArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTables_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnCommit_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -40098,9 +60827,9 @@ func (p *FrontendServiceDescribeTablesArgs) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *FrontendServiceDescribeTablesArgs) BLength() int { +func (p *FrontendServiceLoadTxnCommitArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("describeTables_args") + l += bthrift.Binary.StructBeginLength("loadTxnCommit_args") if p != nil { l += p.field1Length() } @@ -40109,23 +60838,23 @@ func (p *FrontendServiceDescribeTablesArgs) BLength() int { return l } -func (p *FrontendServiceDescribeTablesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnCommitArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceDescribeTablesArgs) field1Length() int { +func (p *FrontendServiceLoadTxnCommitArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceDescribeTablesResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnCommitResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40187,7 +60916,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDescribeTablesResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40196,10 +60925,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceDescribeTablesResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnCommitResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTDescribeTablesResult_() + tmp := NewTLoadTxnCommitResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -40210,13 +60939,13 @@ func (p *FrontendServiceDescribeTablesResult) FastReadField0(buf []byte) (int, e } // for compatibility -func (p *FrontendServiceDescribeTablesResult) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnCommitResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceDescribeTablesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnCommitResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "describeTables_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnCommit_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -40225,9 +60954,9 @@ func (p *FrontendServiceDescribeTablesResult) FastWriteNocopy(buf []byte, binary return offset } -func (p *FrontendServiceDescribeTablesResult) BLength() int { +func (p *FrontendServiceLoadTxnCommitResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("describeTables_result") + l += bthrift.Binary.StructBeginLength("loadTxnCommit_result") if p != nil { l += p.field0Length() } @@ -40236,7 +60965,7 @@ func (p *FrontendServiceDescribeTablesResult) BLength() int { return l } -func (p *FrontendServiceDescribeTablesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnCommitResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -40246,7 +60975,7 @@ func (p *FrontendServiceDescribeTablesResult) fastWriteField0(buf []byte, binary return offset } -func (p *FrontendServiceDescribeTablesResult) field0Length() int { +func (p *FrontendServiceLoadTxnCommitResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -40256,7 +60985,7 @@ func (p *FrontendServiceDescribeTablesResult) field0Length() int { return l } -func (p *FrontendServiceShowVariablesArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnRollbackArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40318,7 +61047,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40327,27 +61056,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceShowVariablesArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnRollbackArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTShowVariableRequest() + tmp := NewTLoadTxnRollbackRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceShowVariablesArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnRollbackArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceShowVariablesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnRollbackArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showVariables_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnRollback_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -40356,9 +61085,9 @@ func (p *FrontendServiceShowVariablesArgs) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceShowVariablesArgs) BLength() int { +func (p *FrontendServiceLoadTxnRollbackArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("showVariables_args") + l += bthrift.Binary.StructBeginLength("loadTxnRollback_args") if p != nil { l += p.field1Length() } @@ -40367,23 +61096,23 @@ func (p *FrontendServiceShowVariablesArgs) BLength() int { return l } -func (p *FrontendServiceShowVariablesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnRollbackArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceShowVariablesArgs) field1Length() int { +func (p *FrontendServiceLoadTxnRollbackArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceShowVariablesResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnRollbackResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40445,7 +61174,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowVariablesResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40454,10 +61183,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceShowVariablesResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceLoadTxnRollbackResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTShowVariableResult_() + tmp := NewTLoadTxnRollbackResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -40468,13 +61197,13 @@ func (p *FrontendServiceShowVariablesResult) FastReadField0(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceShowVariablesResult) FastWrite(buf []byte) int { +func (p *FrontendServiceLoadTxnRollbackResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceShowVariablesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnRollbackResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showVariables_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnRollback_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -40483,9 +61212,9 @@ func (p *FrontendServiceShowVariablesResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceShowVariablesResult) BLength() int { +func (p *FrontendServiceLoadTxnRollbackResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("showVariables_result") + l += bthrift.Binary.StructBeginLength("loadTxnRollback_result") if p != nil { l += p.field0Length() } @@ -40494,7 +61223,7 @@ func (p *FrontendServiceShowVariablesResult) BLength() int { return l } -func (p *FrontendServiceShowVariablesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceLoadTxnRollbackResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -40504,7 +61233,7 @@ func (p *FrontendServiceShowVariablesResult) fastWriteField0(buf []byte, binaryW return offset } -func (p *FrontendServiceShowVariablesResult) field0Length() int { +func (p *FrontendServiceLoadTxnRollbackResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -40514,7 +61243,7 @@ func (p *FrontendServiceShowVariablesResult) field0Length() int { return l } -func (p *FrontendServiceReportExecStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceBeginTxnArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40576,7 +61305,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40585,27 +61314,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportExecStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceBeginTxnArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTReportExecStatusParams() + tmp := NewTBeginTxnRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceReportExecStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceBeginTxnArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceReportExecStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceBeginTxnArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "reportExecStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "beginTxn_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -40614,9 +61343,9 @@ func (p *FrontendServiceReportExecStatusArgs) FastWriteNocopy(buf []byte, binary return offset } -func (p *FrontendServiceReportExecStatusArgs) BLength() int { +func (p *FrontendServiceBeginTxnArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("reportExecStatus_args") + l += bthrift.Binary.StructBeginLength("beginTxn_args") if p != nil { l += p.field1Length() } @@ -40625,23 +61354,23 @@ func (p *FrontendServiceReportExecStatusArgs) BLength() int { return l } -func (p *FrontendServiceReportExecStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceBeginTxnArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceReportExecStatusArgs) field1Length() int { +func (p *FrontendServiceBeginTxnArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceReportExecStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceBeginTxnResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40703,7 +61432,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportExecStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40712,10 +61441,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportExecStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceBeginTxnResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTReportExecStatusResult_() + tmp := NewTBeginTxnResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -40726,13 +61455,13 @@ func (p *FrontendServiceReportExecStatusResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceReportExecStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServiceBeginTxnResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceReportExecStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceBeginTxnResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "reportExecStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "beginTxn_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -40741,9 +61470,9 @@ func (p *FrontendServiceReportExecStatusResult) FastWriteNocopy(buf []byte, bina return offset } -func (p *FrontendServiceReportExecStatusResult) BLength() int { +func (p *FrontendServiceBeginTxnResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("reportExecStatus_result") + l += bthrift.Binary.StructBeginLength("beginTxn_result") if p != nil { l += p.field0Length() } @@ -40752,7 +61481,7 @@ func (p *FrontendServiceReportExecStatusResult) BLength() int { return l } -func (p *FrontendServiceReportExecStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceBeginTxnResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -40762,7 +61491,7 @@ func (p *FrontendServiceReportExecStatusResult) fastWriteField0(buf []byte, bina return offset } -func (p *FrontendServiceReportExecStatusResult) field0Length() int { +func (p *FrontendServiceBeginTxnResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -40772,7 +61501,7 @@ func (p *FrontendServiceReportExecStatusResult) field0Length() int { return l } -func (p *FrontendServiceFinishTaskArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCommitTxnArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40834,7 +61563,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40843,10 +61572,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFinishTaskArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceCommitTxnArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := masterservice.NewTFinishTaskRequest() + tmp := NewTCommitTxnRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -40857,13 +61586,13 @@ func (p *FrontendServiceFinishTaskArgs) FastReadField1(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceFinishTaskArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceCommitTxnArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceFinishTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCommitTxnArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "finishTask_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "commitTxn_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -40872,9 +61601,9 @@ func (p *FrontendServiceFinishTaskArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceFinishTaskArgs) BLength() int { +func (p *FrontendServiceCommitTxnArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("finishTask_args") + l += bthrift.Binary.StructBeginLength("commitTxn_args") if p != nil { l += p.field1Length() } @@ -40883,7 +61612,7 @@ func (p *FrontendServiceFinishTaskArgs) BLength() int { return l } -func (p *FrontendServiceFinishTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCommitTxnArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -40891,7 +61620,7 @@ func (p *FrontendServiceFinishTaskArgs) fastWriteField1(buf []byte, binaryWriter return offset } -func (p *FrontendServiceFinishTaskArgs) field1Length() int { +func (p *FrontendServiceCommitTxnArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -40899,7 +61628,7 @@ func (p *FrontendServiceFinishTaskArgs) field1Length() int { return l } -func (p *FrontendServiceFinishTaskResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCommitTxnResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -40961,7 +61690,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFinishTaskResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -40970,10 +61699,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFinishTaskResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceCommitTxnResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := masterservice.NewTMasterResult_() + tmp := NewTCommitTxnResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -40984,13 +61713,13 @@ func (p *FrontendServiceFinishTaskResult) FastReadField0(buf []byte) (int, error } // for compatibility -func (p *FrontendServiceFinishTaskResult) FastWrite(buf []byte) int { +func (p *FrontendServiceCommitTxnResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceFinishTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCommitTxnResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "finishTask_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "commitTxn_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -40999,9 +61728,9 @@ func (p *FrontendServiceFinishTaskResult) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *FrontendServiceFinishTaskResult) BLength() int { +func (p *FrontendServiceCommitTxnResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("finishTask_result") + l += bthrift.Binary.StructBeginLength("commitTxn_result") if p != nil { l += p.field0Length() } @@ -41010,7 +61739,7 @@ func (p *FrontendServiceFinishTaskResult) BLength() int { return l } -func (p *FrontendServiceFinishTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCommitTxnResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -41020,7 +61749,7 @@ func (p *FrontendServiceFinishTaskResult) fastWriteField0(buf []byte, binaryWrit return offset } -func (p *FrontendServiceFinishTaskResult) field0Length() int { +func (p *FrontendServiceCommitTxnResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -41030,7 +61759,7 @@ func (p *FrontendServiceFinishTaskResult) field0Length() int { return l } -func (p *FrontendServiceReportArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceRollbackTxnArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41092,7 +61821,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41101,10 +61830,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceRollbackTxnArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := masterservice.NewTReportRequest() + tmp := NewTRollbackTxnRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -41115,13 +61844,13 @@ func (p *FrontendServiceReportArgs) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceReportArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceRollbackTxnArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceReportArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRollbackTxnArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "report_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "rollbackTxn_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -41130,9 +61859,9 @@ func (p *FrontendServiceReportArgs) FastWriteNocopy(buf []byte, binaryWriter bth return offset } -func (p *FrontendServiceReportArgs) BLength() int { +func (p *FrontendServiceRollbackTxnArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("report_args") + l += bthrift.Binary.StructBeginLength("rollbackTxn_args") if p != nil { l += p.field1Length() } @@ -41141,7 +61870,7 @@ func (p *FrontendServiceReportArgs) BLength() int { return l } -func (p *FrontendServiceReportArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRollbackTxnArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -41149,7 +61878,7 @@ func (p *FrontendServiceReportArgs) fastWriteField1(buf []byte, binaryWriter bth return offset } -func (p *FrontendServiceReportArgs) field1Length() int { +func (p *FrontendServiceRollbackTxnArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -41157,7 +61886,7 @@ func (p *FrontendServiceReportArgs) field1Length() int { return l } -func (p *FrontendServiceReportResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceRollbackTxnResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41219,7 +61948,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41228,10 +61957,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceReportResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceRollbackTxnResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := masterservice.NewTMasterResult_() + tmp := NewTRollbackTxnResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -41242,13 +61971,13 @@ func (p *FrontendServiceReportResult) FastReadField0(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceReportResult) FastWrite(buf []byte) int { +func (p *FrontendServiceRollbackTxnResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceReportResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRollbackTxnResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "report_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "rollbackTxn_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -41257,9 +61986,9 @@ func (p *FrontendServiceReportResult) FastWriteNocopy(buf []byte, binaryWriter b return offset } -func (p *FrontendServiceReportResult) BLength() int { +func (p *FrontendServiceRollbackTxnResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("report_result") + l += bthrift.Binary.StructBeginLength("rollbackTxn_result") if p != nil { l += p.field0Length() } @@ -41268,7 +61997,7 @@ func (p *FrontendServiceReportResult) BLength() int { return l } -func (p *FrontendServiceReportResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRollbackTxnResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -41278,7 +62007,7 @@ func (p *FrontendServiceReportResult) fastWriteField0(buf []byte, binaryWriter b return offset } -func (p *FrontendServiceReportResult) field0Length() int { +func (p *FrontendServiceRollbackTxnResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -41288,7 +62017,7 @@ func (p *FrontendServiceReportResult) field0Length() int { return l } -func (p *FrontendServiceFetchResourceArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41309,10 +62038,27 @@ func (p *FrontendServiceFetchResourceArgs) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -41332,41 +62078,73 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } +func (p *FrontendServiceGetBinlogArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTGetBinlogRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Request = tmp + return offset, nil +} + // for compatibility -func (p *FrontendServiceFetchResourceArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetBinlogArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceFetchResourceArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchResource_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlog_args") if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *FrontendServiceFetchResourceArgs) BLength() int { +func (p *FrontendServiceGetBinlogArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("fetchResource_args") + l += bthrift.Binary.StructBeginLength("getBinlog_args") if p != nil { + l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *FrontendServiceFetchResourceResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *FrontendServiceGetBinlogArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *FrontendServiceGetBinlogResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41428,7 +62206,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchResourceResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41437,10 +62215,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchResourceResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := masterservice.NewTFetchResourceResult_() + tmp := NewTGetBinlogResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -41451,13 +62229,13 @@ func (p *FrontendServiceFetchResourceResult) FastReadField0(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceFetchResourceResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetBinlogResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceFetchResourceResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchResource_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlog_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -41466,9 +62244,9 @@ func (p *FrontendServiceFetchResourceResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceFetchResourceResult) BLength() int { +func (p *FrontendServiceGetBinlogResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("fetchResource_result") + l += bthrift.Binary.StructBeginLength("getBinlog_result") if p != nil { l += p.field0Length() } @@ -41477,7 +62255,7 @@ func (p *FrontendServiceFetchResourceResult) BLength() int { return l } -func (p *FrontendServiceFetchResourceResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -41487,7 +62265,7 @@ func (p *FrontendServiceFetchResourceResult) fastWriteField0(buf []byte, binaryW return offset } -func (p *FrontendServiceFetchResourceResult) field0Length() int { +func (p *FrontendServiceGetBinlogResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -41497,7 +62275,7 @@ func (p *FrontendServiceFetchResourceResult) field0Length() int { return l } -func (p *FrontendServiceForwardArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetSnapshotArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41559,7 +62337,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41568,27 +62346,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceForwardArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetSnapshotArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTMasterOpRequest() + tmp := NewTGetSnapshotRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceForwardArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetSnapshotArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceForwardArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "forward_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getSnapshot_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -41597,9 +62375,9 @@ func (p *FrontendServiceForwardArgs) FastWriteNocopy(buf []byte, binaryWriter bt return offset } -func (p *FrontendServiceForwardArgs) BLength() int { +func (p *FrontendServiceGetSnapshotArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("forward_args") + l += bthrift.Binary.StructBeginLength("getSnapshot_args") if p != nil { l += p.field1Length() } @@ -41608,23 +62386,23 @@ func (p *FrontendServiceForwardArgs) BLength() int { return l } -func (p *FrontendServiceForwardArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceForwardArgs) field1Length() int { +func (p *FrontendServiceGetSnapshotArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceForwardResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetSnapshotResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41686,7 +62464,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceForwardResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41695,10 +62473,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceForwardResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetSnapshotResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTMasterOpResult_() + tmp := NewTGetSnapshotResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -41709,13 +62487,13 @@ func (p *FrontendServiceForwardResult) FastReadField0(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceForwardResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetSnapshotResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceForwardResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "forward_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getSnapshot_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -41724,9 +62502,9 @@ func (p *FrontendServiceForwardResult) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceForwardResult) BLength() int { +func (p *FrontendServiceGetSnapshotResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("forward_result") + l += bthrift.Binary.StructBeginLength("getSnapshot_result") if p != nil { l += p.field0Length() } @@ -41735,7 +62513,7 @@ func (p *FrontendServiceForwardResult) BLength() int { return l } -func (p *FrontendServiceForwardResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -41745,7 +62523,7 @@ func (p *FrontendServiceForwardResult) fastWriteField0(buf []byte, binaryWriter return offset } -func (p *FrontendServiceForwardResult) field0Length() int { +func (p *FrontendServiceGetSnapshotResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -41755,7 +62533,7 @@ func (p *FrontendServiceForwardResult) field0Length() int { return l } -func (p *FrontendServiceListTableStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceRestoreSnapshotArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41817,7 +62595,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41826,27 +62604,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceRestoreSnapshotArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesParams() + tmp := NewTRestoreSnapshotRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceListTableStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceRestoreSnapshotArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListTableStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRestoreSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "restoreSnapshot_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -41855,9 +62633,9 @@ func (p *FrontendServiceListTableStatusArgs) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceListTableStatusArgs) BLength() int { +func (p *FrontendServiceRestoreSnapshotArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listTableStatus_args") + l += bthrift.Binary.StructBeginLength("restoreSnapshot_args") if p != nil { l += p.field1Length() } @@ -41866,23 +62644,23 @@ func (p *FrontendServiceListTableStatusArgs) BLength() int { return l } -func (p *FrontendServiceListTableStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRestoreSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceListTableStatusArgs) field1Length() int { +func (p *FrontendServiceRestoreSnapshotArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceListTableStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceRestoreSnapshotResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -41944,7 +62722,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -41953,10 +62731,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceRestoreSnapshotResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTListTableStatusResult_() + tmp := NewTRestoreSnapshotResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -41967,13 +62745,13 @@ func (p *FrontendServiceListTableStatusResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceListTableStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServiceRestoreSnapshotResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListTableStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRestoreSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "restoreSnapshot_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -41982,9 +62760,9 @@ func (p *FrontendServiceListTableStatusResult) FastWriteNocopy(buf []byte, binar return offset } -func (p *FrontendServiceListTableStatusResult) BLength() int { +func (p *FrontendServiceRestoreSnapshotResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listTableStatus_result") + l += bthrift.Binary.StructBeginLength("restoreSnapshot_result") if p != nil { l += p.field0Length() } @@ -41993,7 +62771,7 @@ func (p *FrontendServiceListTableStatusResult) BLength() int { return l } -func (p *FrontendServiceListTableStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceRestoreSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -42003,7 +62781,7 @@ func (p *FrontendServiceListTableStatusResult) fastWriteField0(buf []byte, binar return offset } -func (p *FrontendServiceListTableStatusResult) field0Length() int { +func (p *FrontendServiceRestoreSnapshotResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -42013,7 +62791,7 @@ func (p *FrontendServiceListTableStatusResult) field0Length() int { return l } -func (p *FrontendServiceListTableMetadataNameIdsArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceWaitingTxnStatusArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42075,7 +62853,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42084,27 +62862,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceWaitingTxnStatusArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesParams() + tmp := NewTWaitingTxnStatusRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceListTableMetadataNameIdsArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceWaitingTxnStatusArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListTableMetadataNameIdsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceWaitingTxnStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableMetadataNameIds_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "waitingTxnStatus_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -42113,9 +62891,9 @@ func (p *FrontendServiceListTableMetadataNameIdsArgs) FastWriteNocopy(buf []byte return offset } -func (p *FrontendServiceListTableMetadataNameIdsArgs) BLength() int { +func (p *FrontendServiceWaitingTxnStatusArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listTableMetadataNameIds_args") + l += bthrift.Binary.StructBeginLength("waitingTxnStatus_args") if p != nil { l += p.field1Length() } @@ -42124,23 +62902,23 @@ func (p *FrontendServiceListTableMetadataNameIdsArgs) BLength() int { return l } -func (p *FrontendServiceListTableMetadataNameIdsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceWaitingTxnStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceListTableMetadataNameIdsArgs) field1Length() int { +func (p *FrontendServiceWaitingTxnStatusArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceListTableMetadataNameIdsResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceWaitingTxnStatusResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42202,7 +62980,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTableMetadataNameIdsResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42211,10 +62989,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTableMetadataNameIdsResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceWaitingTxnStatusResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTListTableMetadataNameIdsResult_() + tmp := NewTWaitingTxnStatusResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -42225,13 +63003,13 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) FastReadField0(buf []byt } // for compatibility -func (p *FrontendServiceListTableMetadataNameIdsResult) FastWrite(buf []byte) int { +func (p *FrontendServiceWaitingTxnStatusResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListTableMetadataNameIdsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceWaitingTxnStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTableMetadataNameIds_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "waitingTxnStatus_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -42240,9 +63018,9 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) FastWriteNocopy(buf []by return offset } -func (p *FrontendServiceListTableMetadataNameIdsResult) BLength() int { +func (p *FrontendServiceWaitingTxnStatusResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listTableMetadataNameIds_result") + l += bthrift.Binary.StructBeginLength("waitingTxnStatus_result") if p != nil { l += p.field0Length() } @@ -42251,7 +63029,7 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) BLength() int { return l } -func (p *FrontendServiceListTableMetadataNameIdsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceWaitingTxnStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -42261,7 +63039,7 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) fastWriteField0(buf []by return offset } -func (p *FrontendServiceListTableMetadataNameIdsResult) field0Length() int { +func (p *FrontendServiceWaitingTxnStatusResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -42271,7 +63049,7 @@ func (p *FrontendServiceListTableMetadataNameIdsResult) field0Length() int { return l } -func (p *FrontendServiceListTablePrivilegeStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadPutArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42333,7 +63111,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42342,27 +63120,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadPutArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesParams() + tmp := NewTStreamLoadPutRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceListTablePrivilegeStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceStreamLoadPutArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListTablePrivilegeStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadPutArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTablePrivilegeStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadPut_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -42371,9 +63149,9 @@ func (p *FrontendServiceListTablePrivilegeStatusArgs) FastWriteNocopy(buf []byte return offset } -func (p *FrontendServiceListTablePrivilegeStatusArgs) BLength() int { +func (p *FrontendServiceStreamLoadPutArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listTablePrivilegeStatus_args") + l += bthrift.Binary.StructBeginLength("streamLoadPut_args") if p != nil { l += p.field1Length() } @@ -42382,23 +63160,23 @@ func (p *FrontendServiceListTablePrivilegeStatusArgs) BLength() int { return l } -func (p *FrontendServiceListTablePrivilegeStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadPutArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceListTablePrivilegeStatusArgs) field1Length() int { +func (p *FrontendServiceStreamLoadPutArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceListTablePrivilegeStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadPutResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42460,7 +63238,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListTablePrivilegeStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42469,10 +63247,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListTablePrivilegeStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadPutResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTListPrivilegesResult_() + tmp := NewTStreamLoadPutResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -42483,13 +63261,13 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) FastReadField0(buf []byt } // for compatibility -func (p *FrontendServiceListTablePrivilegeStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServiceStreamLoadPutResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListTablePrivilegeStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadPutResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listTablePrivilegeStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadPut_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -42498,9 +63276,9 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) FastWriteNocopy(buf []by return offset } -func (p *FrontendServiceListTablePrivilegeStatusResult) BLength() int { +func (p *FrontendServiceStreamLoadPutResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listTablePrivilegeStatus_result") + l += bthrift.Binary.StructBeginLength("streamLoadPut_result") if p != nil { l += p.field0Length() } @@ -42509,7 +63287,7 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) BLength() int { return l } -func (p *FrontendServiceListTablePrivilegeStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadPutResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -42519,7 +63297,7 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) fastWriteField0(buf []by return offset } -func (p *FrontendServiceListTablePrivilegeStatusResult) field0Length() int { +func (p *FrontendServiceStreamLoadPutResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -42529,7 +63307,7 @@ func (p *FrontendServiceListTablePrivilegeStatusResult) field0Length() int { return l } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42591,7 +63369,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42600,27 +63378,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesParams() + tmp := NewTStreamLoadPutRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listSchemaPrivilegeStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadMultiTablePut_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -42629,9 +63407,9 @@ func (p *FrontendServiceListSchemaPrivilegeStatusArgs) FastWriteNocopy(buf []byt return offset } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) BLength() int { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listSchemaPrivilegeStatus_args") + l += bthrift.Binary.StructBeginLength("streamLoadMultiTablePut_args") if p != nil { l += p.field1Length() } @@ -42640,23 +63418,23 @@ func (p *FrontendServiceListSchemaPrivilegeStatusArgs) BLength() int { return l } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceListSchemaPrivilegeStatusArgs) field1Length() int { +func (p *FrontendServiceStreamLoadMultiTablePutArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadMultiTablePutResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42718,7 +63496,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListSchemaPrivilegeStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42727,10 +63505,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceStreamLoadMultiTablePutResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTListPrivilegesResult_() + tmp := NewTStreamLoadMultiTablePutResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -42741,13 +63519,13 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastReadField0(buf []by } // for compatibility -func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServiceStreamLoadMultiTablePutResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadMultiTablePutResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listSchemaPrivilegeStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadMultiTablePut_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -42756,9 +63534,9 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) FastWriteNocopy(buf []b return offset } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) BLength() int { +func (p *FrontendServiceStreamLoadMultiTablePutResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listSchemaPrivilegeStatus_result") + l += bthrift.Binary.StructBeginLength("streamLoadMultiTablePut_result") if p != nil { l += p.field0Length() } @@ -42767,7 +63545,7 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) BLength() int { return l } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceStreamLoadMultiTablePutResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -42777,7 +63555,7 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) fastWriteField0(buf []b return offset } -func (p *FrontendServiceListSchemaPrivilegeStatusResult) field0Length() int { +func (p *FrontendServiceStreamLoadMultiTablePutResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -42787,7 +63565,7 @@ func (p *FrontendServiceListSchemaPrivilegeStatusResult) field0Length() int { return l } -func (p *FrontendServiceListUserPrivilegeStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceSnapshotLoaderReportArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42849,7 +63627,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42858,27 +63636,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceSnapshotLoaderReportArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTablesParams() + tmp := NewTSnapshotLoaderReportRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Params = tmp + p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceListUserPrivilegeStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceSnapshotLoaderReportArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListUserPrivilegeStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSnapshotLoaderReportArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listUserPrivilegeStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "snapshotLoaderReport_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -42887,9 +63665,9 @@ func (p *FrontendServiceListUserPrivilegeStatusArgs) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceListUserPrivilegeStatusArgs) BLength() int { +func (p *FrontendServiceSnapshotLoaderReportArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listUserPrivilegeStatus_args") + l += bthrift.Binary.StructBeginLength("snapshotLoaderReport_args") if p != nil { l += p.field1Length() } @@ -42898,23 +63676,23 @@ func (p *FrontendServiceListUserPrivilegeStatusArgs) BLength() int { return l } -func (p *FrontendServiceListUserPrivilegeStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSnapshotLoaderReportArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceListUserPrivilegeStatusArgs) field1Length() int { +func (p *FrontendServiceSnapshotLoaderReportArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1) - l += p.Params.BLength() + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceListUserPrivilegeStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceSnapshotLoaderReportResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -42976,7 +63754,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceListUserPrivilegeStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -42985,10 +63763,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceListUserPrivilegeStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceSnapshotLoaderReportResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTListPrivilegesResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -42999,13 +63777,13 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) FastReadField0(buf []byte } // for compatibility -func (p *FrontendServiceListUserPrivilegeStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServiceSnapshotLoaderReportResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceListUserPrivilegeStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSnapshotLoaderReportResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "listUserPrivilegeStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "snapshotLoaderReport_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -43014,9 +63792,9 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) FastWriteNocopy(buf []byt return offset } -func (p *FrontendServiceListUserPrivilegeStatusResult) BLength() int { +func (p *FrontendServiceSnapshotLoaderReportResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("listUserPrivilegeStatus_result") + l += bthrift.Binary.StructBeginLength("snapshotLoaderReport_result") if p != nil { l += p.field0Length() } @@ -43025,7 +63803,7 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) BLength() int { return l } -func (p *FrontendServiceListUserPrivilegeStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSnapshotLoaderReportResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -43035,7 +63813,7 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) fastWriteField0(buf []byt return offset } -func (p *FrontendServiceListUserPrivilegeStatusResult) field0Length() int { +func (p *FrontendServiceSnapshotLoaderReportResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -43045,7 +63823,7 @@ func (p *FrontendServiceListUserPrivilegeStatusResult) field0Length() int { return l } -func (p *FrontendServiceUpdateExportTaskStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServicePingArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43107,7 +63885,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43116,10 +63894,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServicePingArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTUpdateExportTaskStatusRequest() + tmp := NewTFrontendPingFrontendRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -43130,13 +63908,13 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) FastReadField1(buf []byte) ( } // for compatibility -func (p *FrontendServiceUpdateExportTaskStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServicePingArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceUpdateExportTaskStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServicePingArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateExportTaskStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ping_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -43145,9 +63923,9 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceUpdateExportTaskStatusArgs) BLength() int { +func (p *FrontendServicePingArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("updateExportTaskStatus_args") + l += bthrift.Binary.StructBeginLength("ping_args") if p != nil { l += p.field1Length() } @@ -43156,7 +63934,7 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) BLength() int { return l } -func (p *FrontendServiceUpdateExportTaskStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServicePingArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -43164,7 +63942,7 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) fastWriteField1(buf []byte, return offset } -func (p *FrontendServiceUpdateExportTaskStatusArgs) field1Length() int { +func (p *FrontendServicePingArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -43172,7 +63950,7 @@ func (p *FrontendServiceUpdateExportTaskStatusArgs) field1Length() int { return l } -func (p *FrontendServiceUpdateExportTaskStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServicePingResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43234,7 +64012,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateExportTaskStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43243,10 +64021,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateExportTaskStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServicePingResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTFeResult_() + tmp := NewTFrontendPingFrontendResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -43257,13 +64035,13 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) FastReadField0(buf []byte) } // for compatibility -func (p *FrontendServiceUpdateExportTaskStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServicePingResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceUpdateExportTaskStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServicePingResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateExportTaskStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ping_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -43272,9 +64050,9 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) FastWriteNocopy(buf []byte return offset } -func (p *FrontendServiceUpdateExportTaskStatusResult) BLength() int { +func (p *FrontendServicePingResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("updateExportTaskStatus_result") + l += bthrift.Binary.StructBeginLength("ping_result") if p != nil { l += p.field0Length() } @@ -43283,7 +64061,7 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) BLength() int { return l } -func (p *FrontendServiceUpdateExportTaskStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServicePingResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -43293,7 +64071,7 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) fastWriteField0(buf []byte return offset } -func (p *FrontendServiceUpdateExportTaskStatusResult) field0Length() int { +func (p *FrontendServicePingResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -43303,7 +64081,7 @@ func (p *FrontendServiceUpdateExportTaskStatusResult) field0Length() int { return l } -func (p *FrontendServiceLoadTxnBeginArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceInitExternalCtlMetaArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43365,7 +64143,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43374,10 +64152,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceInitExternalCtlMetaArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnBeginRequest() + tmp := NewTInitExternalCtlMetaRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -43388,13 +64166,13 @@ func (p *FrontendServiceLoadTxnBeginArgs) FastReadField1(buf []byte) (int, error } // for compatibility -func (p *FrontendServiceLoadTxnBeginArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceInitExternalCtlMetaArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnBeginArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInitExternalCtlMetaArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnBegin_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "initExternalCtlMeta_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -43403,9 +64181,9 @@ func (p *FrontendServiceLoadTxnBeginArgs) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *FrontendServiceLoadTxnBeginArgs) BLength() int { +func (p *FrontendServiceInitExternalCtlMetaArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnBegin_args") + l += bthrift.Binary.StructBeginLength("initExternalCtlMeta_args") if p != nil { l += p.field1Length() } @@ -43414,7 +64192,7 @@ func (p *FrontendServiceLoadTxnBeginArgs) BLength() int { return l } -func (p *FrontendServiceLoadTxnBeginArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInitExternalCtlMetaArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -43422,7 +64200,7 @@ func (p *FrontendServiceLoadTxnBeginArgs) fastWriteField1(buf []byte, binaryWrit return offset } -func (p *FrontendServiceLoadTxnBeginArgs) field1Length() int { +func (p *FrontendServiceInitExternalCtlMetaArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -43430,7 +64208,7 @@ func (p *FrontendServiceLoadTxnBeginArgs) field1Length() int { return l } -func (p *FrontendServiceLoadTxnBeginResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceInitExternalCtlMetaResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43492,7 +64270,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnBeginResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43501,10 +64279,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnBeginResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceInitExternalCtlMetaResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnBeginResult_() + tmp := NewTInitExternalCtlMetaResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -43515,13 +64293,13 @@ func (p *FrontendServiceLoadTxnBeginResult) FastReadField0(buf []byte) (int, err } // for compatibility -func (p *FrontendServiceLoadTxnBeginResult) FastWrite(buf []byte) int { +func (p *FrontendServiceInitExternalCtlMetaResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnBeginResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInitExternalCtlMetaResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnBegin_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "initExternalCtlMeta_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -43530,9 +64308,9 @@ func (p *FrontendServiceLoadTxnBeginResult) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *FrontendServiceLoadTxnBeginResult) BLength() int { +func (p *FrontendServiceInitExternalCtlMetaResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnBegin_result") + l += bthrift.Binary.StructBeginLength("initExternalCtlMeta_result") if p != nil { l += p.field0Length() } @@ -43541,7 +64319,7 @@ func (p *FrontendServiceLoadTxnBeginResult) BLength() int { return l } -func (p *FrontendServiceLoadTxnBeginResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInitExternalCtlMetaResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -43551,7 +64329,7 @@ func (p *FrontendServiceLoadTxnBeginResult) fastWriteField0(buf []byte, binaryWr return offset } -func (p *FrontendServiceLoadTxnBeginResult) field0Length() int { +func (p *FrontendServiceInitExternalCtlMetaResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -43561,7 +64339,7 @@ func (p *FrontendServiceLoadTxnBeginResult) field0Length() int { return l } -func (p *FrontendServiceLoadTxnPreCommitArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchSchemaTableDataArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43623,7 +64401,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43632,10 +64410,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceFetchSchemaTableDataArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnCommitRequest() + tmp := NewTFetchSchemaTableDataRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -43646,13 +64424,13 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) FastReadField1(buf []byte) (int, e } // for compatibility -func (p *FrontendServiceLoadTxnPreCommitArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchSchemaTableDataArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnPreCommitArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSchemaTableDataArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnPreCommit_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchSchemaTableData_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -43661,9 +64439,9 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) FastWriteNocopy(buf []byte, binary return offset } -func (p *FrontendServiceLoadTxnPreCommitArgs) BLength() int { +func (p *FrontendServiceFetchSchemaTableDataArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnPreCommit_args") + l += bthrift.Binary.StructBeginLength("fetchSchemaTableData_args") if p != nil { l += p.field1Length() } @@ -43672,7 +64450,7 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) BLength() int { return l } -func (p *FrontendServiceLoadTxnPreCommitArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSchemaTableDataArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -43680,7 +64458,7 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) fastWriteField1(buf []byte, binary return offset } -func (p *FrontendServiceLoadTxnPreCommitArgs) field1Length() int { +func (p *FrontendServiceFetchSchemaTableDataArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -43688,7 +64466,7 @@ func (p *FrontendServiceLoadTxnPreCommitArgs) field1Length() int { return l } -func (p *FrontendServiceLoadTxnPreCommitResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchSchemaTableDataResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43750,7 +64528,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnPreCommitResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43759,10 +64537,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnPreCommitResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceFetchSchemaTableDataResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnCommitResult_() + tmp := NewTFetchSchemaTableDataResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -43773,13 +64551,13 @@ func (p *FrontendServiceLoadTxnPreCommitResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceLoadTxnPreCommitResult) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchSchemaTableDataResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnPreCommitResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSchemaTableDataResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnPreCommit_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchSchemaTableData_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -43788,9 +64566,9 @@ func (p *FrontendServiceLoadTxnPreCommitResult) FastWriteNocopy(buf []byte, bina return offset } -func (p *FrontendServiceLoadTxnPreCommitResult) BLength() int { +func (p *FrontendServiceFetchSchemaTableDataResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnPreCommit_result") + l += bthrift.Binary.StructBeginLength("fetchSchemaTableData_result") if p != nil { l += p.field0Length() } @@ -43799,7 +64577,7 @@ func (p *FrontendServiceLoadTxnPreCommitResult) BLength() int { return l } -func (p *FrontendServiceLoadTxnPreCommitResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSchemaTableDataResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -43809,7 +64587,7 @@ func (p *FrontendServiceLoadTxnPreCommitResult) fastWriteField0(buf []byte, bina return offset } -func (p *FrontendServiceLoadTxnPreCommitResult) field0Length() int { +func (p *FrontendServiceFetchSchemaTableDataResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -43819,7 +64597,84 @@ func (p *FrontendServiceLoadTxnPreCommitResult) field0Length() int { return l } -func (p *FrontendServiceLoadTxn2PCArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceAcquireTokenArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +// for compatibility +func (p *FrontendServiceAcquireTokenArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *FrontendServiceAcquireTokenArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "acquireToken_args") + if p != nil { + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *FrontendServiceAcquireTokenArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("acquireToken_args") + if p != nil { + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *FrontendServiceAcquireTokenResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43841,9 +64696,9 @@ func (p *FrontendServiceLoadTxn2PCArgs) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 1: + case 0: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) + l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -43881,7 +64736,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAcquireTokenResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -43890,63 +64745,67 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceAcquireTokenResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxn2PCRequest() + tmp := NewTMySqlLoadAcquireTokenResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Request = tmp + p.Success = tmp return offset, nil } // for compatibility -func (p *FrontendServiceLoadTxn2PCArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceAcquireTokenResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxn2PCArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAcquireTokenResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxn2PC_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "acquireToken_result") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField0(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *FrontendServiceLoadTxn2PCArgs) BLength() int { +func (p *FrontendServiceAcquireTokenResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxn2PC_args") + l += bthrift.Binary.StructBeginLength("acquireToken_result") if p != nil { - l += p.field1Length() + l += p.field0Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *FrontendServiceLoadTxn2PCArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAcquireTokenResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) - offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *FrontendServiceLoadTxn2PCArgs) field1Length() int { +func (p *FrontendServiceAcquireTokenResult) field0Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) - l += p.Request.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *FrontendServiceLoadTxn2PCResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCheckTokenArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -43968,9 +64827,9 @@ func (p *FrontendServiceLoadTxn2PCResult) FastRead(buf []byte) (int, error) { break } switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField0(buf[offset:]) + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -44008,7 +64867,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxn2PCResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckTokenArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44017,158 +64876,28 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxn2PCResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceCheckTokenArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxn2PCResult_() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - } - p.Success = tmp - return offset, nil -} - -// for compatibility -func (p *FrontendServiceLoadTxn2PCResult) FastWrite(buf []byte) int { - return 0 -} - -func (p *FrontendServiceLoadTxn2PCResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxn2PC_result") - if p != nil { - offset += p.fastWriteField0(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *FrontendServiceLoadTxn2PCResult) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("loadTxn2PC_result") - if p != nil { - l += p.field0Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *FrontendServiceLoadTxn2PCResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) - offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *FrontendServiceLoadTxn2PCResult) field0Length() int { - l := 0 - if p.IsSetSuccess() { - l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) - l += p.Success.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *FrontendServiceLoadTxnCommitArgs) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitArgs[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *FrontendServiceLoadTxnCommitArgs) FastReadField1(buf []byte) (int, error) { - offset := 0 + p.Token = v - tmp := NewTLoadTxnCommitRequest() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.Request = tmp return offset, nil } // for compatibility -func (p *FrontendServiceLoadTxnCommitArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceCheckTokenArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnCommitArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckTokenArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnCommit_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "checkToken_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -44177,9 +64906,9 @@ func (p *FrontendServiceLoadTxnCommitArgs) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceLoadTxnCommitArgs) BLength() int { +func (p *FrontendServiceCheckTokenArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnCommit_args") + l += bthrift.Binary.StructBeginLength("checkToken_args") if p != nil { l += p.field1Length() } @@ -44188,23 +64917,25 @@ func (p *FrontendServiceLoadTxnCommitArgs) BLength() int { return l } -func (p *FrontendServiceLoadTxnCommitArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckTokenArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) - offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Token) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *FrontendServiceLoadTxnCommitArgs) field1Length() int { +func (p *FrontendServiceCheckTokenArgs) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) - l += p.Request.BLength() + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.Token) + l += bthrift.Binary.FieldEndLength() return l } -func (p *FrontendServiceLoadTxnCommitResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCheckTokenResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -44227,7 +64958,7 @@ func (p *FrontendServiceLoadTxnCommitResult) FastRead(buf []byte) (int, error) { } switch fieldId { case 0: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField0(buf[offset:]) offset += l if err != nil { @@ -44266,7 +64997,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnCommitResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckTokenResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44275,27 +65006,27 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnCommitResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceCheckTokenResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnCommitResult_() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Success = &v + } - p.Success = tmp return offset, nil } // for compatibility -func (p *FrontendServiceLoadTxnCommitResult) FastWrite(buf []byte) int { +func (p *FrontendServiceCheckTokenResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnCommitResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckTokenResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnCommit_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "checkToken_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -44304,9 +65035,9 @@ func (p *FrontendServiceLoadTxnCommitResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceLoadTxnCommitResult) BLength() int { +func (p *FrontendServiceCheckTokenResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnCommit_result") + l += bthrift.Binary.StructBeginLength("checkToken_result") if p != nil { l += p.field0Length() } @@ -44315,27 +65046,29 @@ func (p *FrontendServiceLoadTxnCommitResult) BLength() int { return l } -func (p *FrontendServiceLoadTxnCommitResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckTokenResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) - offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.BOOL, 0) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.Success) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *FrontendServiceLoadTxnCommitResult) field0Length() int { +func (p *FrontendServiceCheckTokenResult) field0Length() int { l := 0 if p.IsSetSuccess() { - l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) - l += p.Success.BLength() + l += bthrift.Binary.FieldBeginLength("success", thrift.BOOL, 0) + l += bthrift.Binary.BoolLength(*p.Success) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *FrontendServiceLoadTxnRollbackArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -44397,7 +65130,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44406,10 +65139,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnRollbackRequest() + tmp := NewTConfirmUnusedRemoteFilesRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -44420,13 +65153,13 @@ func (p *FrontendServiceLoadTxnRollbackArgs) FastReadField1(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceLoadTxnRollbackArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnRollbackArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnRollback_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "confirmUnusedRemoteFiles_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -44435,9 +65168,9 @@ func (p *FrontendServiceLoadTxnRollbackArgs) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceLoadTxnRollbackArgs) BLength() int { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnRollback_args") + l += bthrift.Binary.StructBeginLength("confirmUnusedRemoteFiles_args") if p != nil { l += p.field1Length() } @@ -44446,7 +65179,7 @@ func (p *FrontendServiceLoadTxnRollbackArgs) BLength() int { return l } -func (p *FrontendServiceLoadTxnRollbackArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -44454,7 +65187,7 @@ func (p *FrontendServiceLoadTxnRollbackArgs) fastWriteField1(buf []byte, binaryW return offset } -func (p *FrontendServiceLoadTxnRollbackArgs) field1Length() int { +func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -44462,7 +65195,7 @@ func (p *FrontendServiceLoadTxnRollbackArgs) field1Length() int { return l } -func (p *FrontendServiceLoadTxnRollbackResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -44524,7 +65257,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceLoadTxnRollbackResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44533,10 +65266,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceLoadTxnRollbackResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTLoadTxnRollbackResult_() + tmp := NewTConfirmUnusedRemoteFilesResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -44547,13 +65280,13 @@ func (p *FrontendServiceLoadTxnRollbackResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceLoadTxnRollbackResult) FastWrite(buf []byte) int { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceLoadTxnRollbackResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "loadTxnRollback_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "confirmUnusedRemoteFiles_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -44562,9 +65295,9 @@ func (p *FrontendServiceLoadTxnRollbackResult) FastWriteNocopy(buf []byte, binar return offset } -func (p *FrontendServiceLoadTxnRollbackResult) BLength() int { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("loadTxnRollback_result") + l += bthrift.Binary.StructBeginLength("confirmUnusedRemoteFiles_result") if p != nil { l += p.field0Length() } @@ -44573,7 +65306,7 @@ func (p *FrontendServiceLoadTxnRollbackResult) BLength() int { return l } -func (p *FrontendServiceLoadTxnRollbackResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -44583,7 +65316,7 @@ func (p *FrontendServiceLoadTxnRollbackResult) fastWriteField0(buf []byte, binar return offset } -func (p *FrontendServiceLoadTxnRollbackResult) field0Length() int { +func (p *FrontendServiceConfirmUnusedRemoteFilesResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -44593,7 +65326,7 @@ func (p *FrontendServiceLoadTxnRollbackResult) field0Length() int { return l } -func (p *FrontendServiceBeginTxnArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCheckAuthArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -44655,7 +65388,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44664,10 +65397,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceBeginTxnArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceCheckAuthArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTBeginTxnRequest() + tmp := NewTCheckAuthRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -44678,13 +65411,13 @@ func (p *FrontendServiceBeginTxnArgs) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceBeginTxnArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceCheckAuthArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceBeginTxnArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckAuthArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "beginTxn_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "checkAuth_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -44693,9 +65426,9 @@ func (p *FrontendServiceBeginTxnArgs) FastWriteNocopy(buf []byte, binaryWriter b return offset } -func (p *FrontendServiceBeginTxnArgs) BLength() int { +func (p *FrontendServiceCheckAuthArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("beginTxn_args") + l += bthrift.Binary.StructBeginLength("checkAuth_args") if p != nil { l += p.field1Length() } @@ -44704,7 +65437,7 @@ func (p *FrontendServiceBeginTxnArgs) BLength() int { return l } -func (p *FrontendServiceBeginTxnArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckAuthArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -44712,7 +65445,7 @@ func (p *FrontendServiceBeginTxnArgs) fastWriteField1(buf []byte, binaryWriter b return offset } -func (p *FrontendServiceBeginTxnArgs) field1Length() int { +func (p *FrontendServiceCheckAuthArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -44720,7 +65453,7 @@ func (p *FrontendServiceBeginTxnArgs) field1Length() int { return l } -func (p *FrontendServiceBeginTxnResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCheckAuthResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -44782,7 +65515,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceBeginTxnResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44791,10 +65524,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceBeginTxnResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceCheckAuthResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTBeginTxnResult_() + tmp := NewTCheckAuthResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -44805,13 +65538,13 @@ func (p *FrontendServiceBeginTxnResult) FastReadField0(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceBeginTxnResult) FastWrite(buf []byte) int { +func (p *FrontendServiceCheckAuthResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceBeginTxnResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckAuthResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "beginTxn_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "checkAuth_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -44820,9 +65553,9 @@ func (p *FrontendServiceBeginTxnResult) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceBeginTxnResult) BLength() int { +func (p *FrontendServiceCheckAuthResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("beginTxn_result") + l += bthrift.Binary.StructBeginLength("checkAuth_result") if p != nil { l += p.field0Length() } @@ -44831,7 +65564,7 @@ func (p *FrontendServiceBeginTxnResult) BLength() int { return l } -func (p *FrontendServiceBeginTxnResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCheckAuthResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -44841,7 +65574,7 @@ func (p *FrontendServiceBeginTxnResult) fastWriteField0(buf []byte, binaryWriter return offset } -func (p *FrontendServiceBeginTxnResult) field0Length() int { +func (p *FrontendServiceCheckAuthResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -44851,7 +65584,7 @@ func (p *FrontendServiceBeginTxnResult) field0Length() int { return l } -func (p *FrontendServiceCommitTxnArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetQueryStatsArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -44913,7 +65646,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -44922,10 +65655,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCommitTxnArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetQueryStatsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTCommitTxnRequest() + tmp := NewTGetQueryStatsRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -44936,13 +65669,13 @@ func (p *FrontendServiceCommitTxnArgs) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceCommitTxnArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetQueryStatsArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceCommitTxnArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetQueryStatsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "commitTxn_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getQueryStats_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -44951,9 +65684,9 @@ func (p *FrontendServiceCommitTxnArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceCommitTxnArgs) BLength() int { +func (p *FrontendServiceGetQueryStatsArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("commitTxn_args") + l += bthrift.Binary.StructBeginLength("getQueryStats_args") if p != nil { l += p.field1Length() } @@ -44962,7 +65695,7 @@ func (p *FrontendServiceCommitTxnArgs) BLength() int { return l } -func (p *FrontendServiceCommitTxnArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetQueryStatsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -44970,7 +65703,7 @@ func (p *FrontendServiceCommitTxnArgs) fastWriteField1(buf []byte, binaryWriter return offset } -func (p *FrontendServiceCommitTxnArgs) field1Length() int { +func (p *FrontendServiceGetQueryStatsArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -44978,7 +65711,7 @@ func (p *FrontendServiceCommitTxnArgs) field1Length() int { return l } -func (p *FrontendServiceCommitTxnResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetQueryStatsResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45040,7 +65773,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCommitTxnResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45049,10 +65782,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCommitTxnResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetQueryStatsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTCommitTxnResult_() + tmp := NewTQueryStatsResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45063,13 +65796,13 @@ func (p *FrontendServiceCommitTxnResult) FastReadField0(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceCommitTxnResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetQueryStatsResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceCommitTxnResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetQueryStatsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "commitTxn_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getQueryStats_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -45078,9 +65811,9 @@ func (p *FrontendServiceCommitTxnResult) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *FrontendServiceCommitTxnResult) BLength() int { +func (p *FrontendServiceGetQueryStatsResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("commitTxn_result") + l += bthrift.Binary.StructBeginLength("getQueryStats_result") if p != nil { l += p.field0Length() } @@ -45089,7 +65822,7 @@ func (p *FrontendServiceCommitTxnResult) BLength() int { return l } -func (p *FrontendServiceCommitTxnResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetQueryStatsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -45099,7 +65832,7 @@ func (p *FrontendServiceCommitTxnResult) fastWriteField0(buf []byte, binaryWrite return offset } -func (p *FrontendServiceCommitTxnResult) field0Length() int { +func (p *FrontendServiceGetQueryStatsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -45109,7 +65842,7 @@ func (p *FrontendServiceCommitTxnResult) field0Length() int { return l } -func (p *FrontendServiceRollbackTxnArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45171,7 +65904,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45180,10 +65913,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRollbackTxnArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetTabletReplicaInfosArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTRollbackTxnRequest() + tmp := NewTGetTabletReplicaInfosRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45194,13 +65927,13 @@ func (p *FrontendServiceRollbackTxnArgs) FastReadField1(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceRollbackTxnArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetTabletReplicaInfosArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceRollbackTxnArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetTabletReplicaInfosArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "rollbackTxn_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTabletReplicaInfos_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -45209,9 +65942,9 @@ func (p *FrontendServiceRollbackTxnArgs) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *FrontendServiceRollbackTxnArgs) BLength() int { +func (p *FrontendServiceGetTabletReplicaInfosArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("rollbackTxn_args") + l += bthrift.Binary.StructBeginLength("getTabletReplicaInfos_args") if p != nil { l += p.field1Length() } @@ -45220,7 +65953,7 @@ func (p *FrontendServiceRollbackTxnArgs) BLength() int { return l } -func (p *FrontendServiceRollbackTxnArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetTabletReplicaInfosArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -45228,7 +65961,7 @@ func (p *FrontendServiceRollbackTxnArgs) fastWriteField1(buf []byte, binaryWrite return offset } -func (p *FrontendServiceRollbackTxnArgs) field1Length() int { +func (p *FrontendServiceGetTabletReplicaInfosArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -45236,7 +65969,7 @@ func (p *FrontendServiceRollbackTxnArgs) field1Length() int { return l } -func (p *FrontendServiceRollbackTxnResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetTabletReplicaInfosResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45298,7 +66031,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRollbackTxnResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45307,10 +66040,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRollbackTxnResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetTabletReplicaInfosResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTRollbackTxnResult_() + tmp := NewTGetTabletReplicaInfosResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45321,13 +66054,13 @@ func (p *FrontendServiceRollbackTxnResult) FastReadField0(buf []byte) (int, erro } // for compatibility -func (p *FrontendServiceRollbackTxnResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetTabletReplicaInfosResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceRollbackTxnResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetTabletReplicaInfosResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "rollbackTxn_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTabletReplicaInfos_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -45336,9 +66069,9 @@ func (p *FrontendServiceRollbackTxnResult) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceRollbackTxnResult) BLength() int { +func (p *FrontendServiceGetTabletReplicaInfosResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("rollbackTxn_result") + l += bthrift.Binary.StructBeginLength("getTabletReplicaInfos_result") if p != nil { l += p.field0Length() } @@ -45347,7 +66080,7 @@ func (p *FrontendServiceRollbackTxnResult) BLength() int { return l } -func (p *FrontendServiceRollbackTxnResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetTabletReplicaInfosResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -45357,7 +66090,7 @@ func (p *FrontendServiceRollbackTxnResult) fastWriteField0(buf []byte, binaryWri return offset } -func (p *FrontendServiceRollbackTxnResult) field0Length() int { +func (p *FrontendServiceGetTabletReplicaInfosResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -45367,7 +66100,7 @@ func (p *FrontendServiceRollbackTxnResult) field0Length() int { return l } -func (p *FrontendServiceGetBinlogArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45429,7 +66162,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlStoredProcedureArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45438,10 +66171,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetBinlogRequest() + tmp := NewTAddPlsqlStoredProcedureRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45452,13 +66185,13 @@ func (p *FrontendServiceGetBinlogArgs) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceGetBinlogArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlog_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "addPlsqlStoredProcedure_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -45467,9 +66200,9 @@ func (p *FrontendServiceGetBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceGetBinlogArgs) BLength() int { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getBinlog_args") + l += bthrift.Binary.StructBeginLength("addPlsqlStoredProcedure_args") if p != nil { l += p.field1Length() } @@ -45478,7 +66211,7 @@ func (p *FrontendServiceGetBinlogArgs) BLength() int { return l } -func (p *FrontendServiceGetBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -45486,7 +66219,7 @@ func (p *FrontendServiceGetBinlogArgs) fastWriteField1(buf []byte, binaryWriter return offset } -func (p *FrontendServiceGetBinlogArgs) field1Length() int { +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -45494,7 +66227,7 @@ func (p *FrontendServiceGetBinlogArgs) field1Length() int { return l } -func (p *FrontendServiceGetBinlogResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45556,7 +66289,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlStoredProcedureResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45565,10 +66298,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetBinlogResult_() + tmp := NewTPlsqlStoredProcedureResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45579,13 +66312,13 @@ func (p *FrontendServiceGetBinlogResult) FastReadField0(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceGetBinlogResult) FastWrite(buf []byte) int { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlog_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "addPlsqlStoredProcedure_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -45594,9 +66327,9 @@ func (p *FrontendServiceGetBinlogResult) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *FrontendServiceGetBinlogResult) BLength() int { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getBinlog_result") + l += bthrift.Binary.StructBeginLength("addPlsqlStoredProcedure_result") if p != nil { l += p.field0Length() } @@ -45605,7 +66338,7 @@ func (p *FrontendServiceGetBinlogResult) BLength() int { return l } -func (p *FrontendServiceGetBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -45615,7 +66348,7 @@ func (p *FrontendServiceGetBinlogResult) fastWriteField0(buf []byte, binaryWrite return offset } -func (p *FrontendServiceGetBinlogResult) field0Length() int { +func (p *FrontendServiceAddPlsqlStoredProcedureResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -45625,7 +66358,7 @@ func (p *FrontendServiceGetBinlogResult) field0Length() int { return l } -func (p *FrontendServiceGetSnapshotArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45687,7 +66420,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlStoredProcedureArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45696,10 +66429,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetSnapshotArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetSnapshotRequest() + tmp := NewTDropPlsqlStoredProcedureRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45710,13 +66443,13 @@ func (p *FrontendServiceGetSnapshotArgs) FastReadField1(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceGetSnapshotArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getSnapshot_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "dropPlsqlStoredProcedure_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -45725,9 +66458,9 @@ func (p *FrontendServiceGetSnapshotArgs) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *FrontendServiceGetSnapshotArgs) BLength() int { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getSnapshot_args") + l += bthrift.Binary.StructBeginLength("dropPlsqlStoredProcedure_args") if p != nil { l += p.field1Length() } @@ -45736,7 +66469,7 @@ func (p *FrontendServiceGetSnapshotArgs) BLength() int { return l } -func (p *FrontendServiceGetSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -45744,7 +66477,7 @@ func (p *FrontendServiceGetSnapshotArgs) fastWriteField1(buf []byte, binaryWrite return offset } -func (p *FrontendServiceGetSnapshotArgs) field1Length() int { +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -45752,7 +66485,7 @@ func (p *FrontendServiceGetSnapshotArgs) field1Length() int { return l } -func (p *FrontendServiceGetSnapshotResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45814,7 +66547,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetSnapshotResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlStoredProcedureResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45823,10 +66556,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetSnapshotResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetSnapshotResult_() + tmp := NewTPlsqlStoredProcedureResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45837,13 +66570,13 @@ func (p *FrontendServiceGetSnapshotResult) FastReadField0(buf []byte) (int, erro } // for compatibility -func (p *FrontendServiceGetSnapshotResult) FastWrite(buf []byte) int { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getSnapshot_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "dropPlsqlStoredProcedure_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -45852,9 +66585,9 @@ func (p *FrontendServiceGetSnapshotResult) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceGetSnapshotResult) BLength() int { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getSnapshot_result") + l += bthrift.Binary.StructBeginLength("dropPlsqlStoredProcedure_result") if p != nil { l += p.field0Length() } @@ -45863,7 +66596,7 @@ func (p *FrontendServiceGetSnapshotResult) BLength() int { return l } -func (p *FrontendServiceGetSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -45873,7 +66606,7 @@ func (p *FrontendServiceGetSnapshotResult) fastWriteField0(buf []byte, binaryWri return offset } -func (p *FrontendServiceGetSnapshotResult) field0Length() int { +func (p *FrontendServiceDropPlsqlStoredProcedureResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -45883,7 +66616,7 @@ func (p *FrontendServiceGetSnapshotResult) field0Length() int { return l } -func (p *FrontendServiceRestoreSnapshotArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlPackageArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -45945,7 +66678,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlPackageArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -45954,10 +66687,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlPackageArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTRestoreSnapshotRequest() + tmp := NewTAddPlsqlPackageRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -45968,13 +66701,13 @@ func (p *FrontendServiceRestoreSnapshotArgs) FastReadField1(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceRestoreSnapshotArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceAddPlsqlPackageArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceRestoreSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlPackageArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "restoreSnapshot_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "addPlsqlPackage_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -45983,9 +66716,9 @@ func (p *FrontendServiceRestoreSnapshotArgs) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceRestoreSnapshotArgs) BLength() int { +func (p *FrontendServiceAddPlsqlPackageArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("restoreSnapshot_args") + l += bthrift.Binary.StructBeginLength("addPlsqlPackage_args") if p != nil { l += p.field1Length() } @@ -45994,7 +66727,7 @@ func (p *FrontendServiceRestoreSnapshotArgs) BLength() int { return l } -func (p *FrontendServiceRestoreSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlPackageArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -46002,7 +66735,7 @@ func (p *FrontendServiceRestoreSnapshotArgs) fastWriteField1(buf []byte, binaryW return offset } -func (p *FrontendServiceRestoreSnapshotArgs) field1Length() int { +func (p *FrontendServiceAddPlsqlPackageArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -46010,7 +66743,7 @@ func (p *FrontendServiceRestoreSnapshotArgs) field1Length() int { return l } -func (p *FrontendServiceRestoreSnapshotResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlPackageResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46072,7 +66805,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceRestoreSnapshotResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddPlsqlPackageResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46081,10 +66814,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceRestoreSnapshotResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceAddPlsqlPackageResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTRestoreSnapshotResult_() + tmp := NewTPlsqlPackageResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46095,13 +66828,13 @@ func (p *FrontendServiceRestoreSnapshotResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceRestoreSnapshotResult) FastWrite(buf []byte) int { +func (p *FrontendServiceAddPlsqlPackageResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceRestoreSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlPackageResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "restoreSnapshot_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "addPlsqlPackage_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -46110,9 +66843,9 @@ func (p *FrontendServiceRestoreSnapshotResult) FastWriteNocopy(buf []byte, binar return offset } -func (p *FrontendServiceRestoreSnapshotResult) BLength() int { +func (p *FrontendServiceAddPlsqlPackageResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("restoreSnapshot_result") + l += bthrift.Binary.StructBeginLength("addPlsqlPackage_result") if p != nil { l += p.field0Length() } @@ -46121,7 +66854,7 @@ func (p *FrontendServiceRestoreSnapshotResult) BLength() int { return l } -func (p *FrontendServiceRestoreSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceAddPlsqlPackageResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -46131,7 +66864,7 @@ func (p *FrontendServiceRestoreSnapshotResult) fastWriteField0(buf []byte, binar return offset } -func (p *FrontendServiceRestoreSnapshotResult) field0Length() int { +func (p *FrontendServiceAddPlsqlPackageResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -46141,7 +66874,7 @@ func (p *FrontendServiceRestoreSnapshotResult) field0Length() int { return l } -func (p *FrontendServiceWaitingTxnStatusArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlPackageArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46203,7 +66936,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlPackageArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46212,10 +66945,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlPackageArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTWaitingTxnStatusRequest() + tmp := NewTDropPlsqlPackageRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46226,13 +66959,13 @@ func (p *FrontendServiceWaitingTxnStatusArgs) FastReadField1(buf []byte) (int, e } // for compatibility -func (p *FrontendServiceWaitingTxnStatusArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceDropPlsqlPackageArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceWaitingTxnStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlPackageArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "waitingTxnStatus_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "dropPlsqlPackage_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -46241,9 +66974,9 @@ func (p *FrontendServiceWaitingTxnStatusArgs) FastWriteNocopy(buf []byte, binary return offset } -func (p *FrontendServiceWaitingTxnStatusArgs) BLength() int { +func (p *FrontendServiceDropPlsqlPackageArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("waitingTxnStatus_args") + l += bthrift.Binary.StructBeginLength("dropPlsqlPackage_args") if p != nil { l += p.field1Length() } @@ -46252,7 +66985,7 @@ func (p *FrontendServiceWaitingTxnStatusArgs) BLength() int { return l } -func (p *FrontendServiceWaitingTxnStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlPackageArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -46260,7 +66993,7 @@ func (p *FrontendServiceWaitingTxnStatusArgs) fastWriteField1(buf []byte, binary return offset } -func (p *FrontendServiceWaitingTxnStatusArgs) field1Length() int { +func (p *FrontendServiceDropPlsqlPackageArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -46268,7 +67001,7 @@ func (p *FrontendServiceWaitingTxnStatusArgs) field1Length() int { return l } -func (p *FrontendServiceWaitingTxnStatusResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlPackageResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46330,7 +67063,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceWaitingTxnStatusResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceDropPlsqlPackageResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46339,10 +67072,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceWaitingTxnStatusResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceDropPlsqlPackageResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTWaitingTxnStatusResult_() + tmp := NewTPlsqlPackageResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46353,13 +67086,13 @@ func (p *FrontendServiceWaitingTxnStatusResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceWaitingTxnStatusResult) FastWrite(buf []byte) int { +func (p *FrontendServiceDropPlsqlPackageResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceWaitingTxnStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlPackageResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "waitingTxnStatus_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "dropPlsqlPackage_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -46368,9 +67101,9 @@ func (p *FrontendServiceWaitingTxnStatusResult) FastWriteNocopy(buf []byte, bina return offset } -func (p *FrontendServiceWaitingTxnStatusResult) BLength() int { +func (p *FrontendServiceDropPlsqlPackageResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("waitingTxnStatus_result") + l += bthrift.Binary.StructBeginLength("dropPlsqlPackage_result") if p != nil { l += p.field0Length() } @@ -46379,7 +67112,7 @@ func (p *FrontendServiceWaitingTxnStatusResult) BLength() int { return l } -func (p *FrontendServiceWaitingTxnStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceDropPlsqlPackageResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -46389,7 +67122,7 @@ func (p *FrontendServiceWaitingTxnStatusResult) fastWriteField0(buf []byte, bina return offset } -func (p *FrontendServiceWaitingTxnStatusResult) field0Length() int { +func (p *FrontendServiceDropPlsqlPackageResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -46399,7 +67132,7 @@ func (p *FrontendServiceWaitingTxnStatusResult) field0Length() int { return l } -func (p *FrontendServiceStreamLoadPutArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetMasterTokenArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46461,7 +67194,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46470,10 +67203,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetMasterTokenArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTStreamLoadPutRequest() + tmp := NewTGetMasterTokenRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46484,13 +67217,13 @@ func (p *FrontendServiceStreamLoadPutArgs) FastReadField1(buf []byte) (int, erro } // for compatibility -func (p *FrontendServiceStreamLoadPutArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetMasterTokenArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceStreamLoadPutArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMasterTokenArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadPut_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getMasterToken_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -46499,9 +67232,9 @@ func (p *FrontendServiceStreamLoadPutArgs) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceStreamLoadPutArgs) BLength() int { +func (p *FrontendServiceGetMasterTokenArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("streamLoadPut_args") + l += bthrift.Binary.StructBeginLength("getMasterToken_args") if p != nil { l += p.field1Length() } @@ -46510,7 +67243,7 @@ func (p *FrontendServiceStreamLoadPutArgs) BLength() int { return l } -func (p *FrontendServiceStreamLoadPutArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMasterTokenArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -46518,7 +67251,7 @@ func (p *FrontendServiceStreamLoadPutArgs) fastWriteField1(buf []byte, binaryWri return offset } -func (p *FrontendServiceStreamLoadPutArgs) field1Length() int { +func (p *FrontendServiceGetMasterTokenArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -46526,7 +67259,7 @@ func (p *FrontendServiceStreamLoadPutArgs) field1Length() int { return l } -func (p *FrontendServiceStreamLoadPutResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetMasterTokenResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46588,7 +67321,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadPutResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46597,10 +67330,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadPutResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetMasterTokenResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTStreamLoadPutResult_() + tmp := NewTGetMasterTokenResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46611,13 +67344,13 @@ func (p *FrontendServiceStreamLoadPutResult) FastReadField0(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceStreamLoadPutResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetMasterTokenResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceStreamLoadPutResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMasterTokenResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadPut_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getMasterToken_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -46626,9 +67359,9 @@ func (p *FrontendServiceStreamLoadPutResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceStreamLoadPutResult) BLength() int { +func (p *FrontendServiceGetMasterTokenResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("streamLoadPut_result") + l += bthrift.Binary.StructBeginLength("getMasterToken_result") if p != nil { l += p.field0Length() } @@ -46637,7 +67370,7 @@ func (p *FrontendServiceStreamLoadPutResult) BLength() int { return l } -func (p *FrontendServiceStreamLoadPutResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMasterTokenResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -46647,7 +67380,7 @@ func (p *FrontendServiceStreamLoadPutResult) fastWriteField0(buf []byte, binaryW return offset } -func (p *FrontendServiceStreamLoadPutResult) field0Length() int { +func (p *FrontendServiceGetMasterTokenResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -46657,7 +67390,7 @@ func (p *FrontendServiceStreamLoadPutResult) field0Length() int { return l } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogLagArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46719,7 +67452,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46728,10 +67461,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogLagArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTStreamLoadPutRequest() + tmp := NewTGetBinlogLagRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46742,13 +67475,13 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastReadField1(buf []byte) } // for compatibility -func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetBinlogLagArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogLagArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadMultiTablePut_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlogLag_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -46757,9 +67490,9 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) BLength() int { +func (p *FrontendServiceGetBinlogLagArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("streamLoadMultiTablePut_args") + l += bthrift.Binary.StructBeginLength("getBinlogLag_args") if p != nil { l += p.field1Length() } @@ -46768,7 +67501,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) BLength() int { return l } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogLagArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -46776,7 +67509,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) fastWriteField1(buf []byte, return offset } -func (p *FrontendServiceStreamLoadMultiTablePutArgs) field1Length() int { +func (p *FrontendServiceGetBinlogLagArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -46784,7 +67517,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutArgs) field1Length() int { return l } -func (p *FrontendServiceStreamLoadMultiTablePutResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogLagResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46846,7 +67579,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceStreamLoadMultiTablePutResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46855,10 +67588,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceStreamLoadMultiTablePutResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetBinlogLagResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTStreamLoadMultiTablePutResult_() + tmp := NewTGetBinlogLagResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -46869,13 +67602,13 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) FastReadField0(buf []byte } // for compatibility -func (p *FrontendServiceStreamLoadMultiTablePutResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetBinlogLagResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceStreamLoadMultiTablePutResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogLagResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "streamLoadMultiTablePut_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlogLag_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -46884,9 +67617,9 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) FastWriteNocopy(buf []byt return offset } -func (p *FrontendServiceStreamLoadMultiTablePutResult) BLength() int { +func (p *FrontendServiceGetBinlogLagResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("streamLoadMultiTablePut_result") + l += bthrift.Binary.StructBeginLength("getBinlogLag_result") if p != nil { l += p.field0Length() } @@ -46895,7 +67628,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) BLength() int { return l } -func (p *FrontendServiceStreamLoadMultiTablePutResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBinlogLagResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -46905,7 +67638,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) fastWriteField0(buf []byt return offset } -func (p *FrontendServiceStreamLoadMultiTablePutResult) field0Length() int { +func (p *FrontendServiceGetBinlogLagResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -46915,7 +67648,7 @@ func (p *FrontendServiceStreamLoadMultiTablePutResult) field0Length() int { return l } -func (p *FrontendServiceSnapshotLoaderReportArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceUpdateStatsCacheArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -46977,7 +67710,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -46986,10 +67719,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceUpdateStatsCacheArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTSnapshotLoaderReportRequest() + tmp := NewTUpdateFollowerStatsCacheRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47000,13 +67733,13 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) FastReadField1(buf []byte) (in } // for compatibility -func (p *FrontendServiceSnapshotLoaderReportArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceUpdateStatsCacheArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceSnapshotLoaderReportArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateStatsCacheArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "snapshotLoaderReport_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateStatsCache_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -47015,9 +67748,9 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) FastWriteNocopy(buf []byte, bi return offset } -func (p *FrontendServiceSnapshotLoaderReportArgs) BLength() int { +func (p *FrontendServiceUpdateStatsCacheArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("snapshotLoaderReport_args") + l += bthrift.Binary.StructBeginLength("updateStatsCache_args") if p != nil { l += p.field1Length() } @@ -47026,7 +67759,7 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) BLength() int { return l } -func (p *FrontendServiceSnapshotLoaderReportArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateStatsCacheArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -47034,7 +67767,7 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) fastWriteField1(buf []byte, bi return offset } -func (p *FrontendServiceSnapshotLoaderReportArgs) field1Length() int { +func (p *FrontendServiceUpdateStatsCacheArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -47042,7 +67775,7 @@ func (p *FrontendServiceSnapshotLoaderReportArgs) field1Length() int { return l } -func (p *FrontendServiceSnapshotLoaderReportResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceUpdateStatsCacheResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47104,7 +67837,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSnapshotLoaderReportResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47113,7 +67846,7 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceSnapshotLoaderReportResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceUpdateStatsCacheResult) FastReadField0(buf []byte) (int, error) { offset := 0 tmp := status.NewTStatus() @@ -47127,13 +67860,13 @@ func (p *FrontendServiceSnapshotLoaderReportResult) FastReadField0(buf []byte) ( } // for compatibility -func (p *FrontendServiceSnapshotLoaderReportResult) FastWrite(buf []byte) int { +func (p *FrontendServiceUpdateStatsCacheResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceSnapshotLoaderReportResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateStatsCacheResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "snapshotLoaderReport_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateStatsCache_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -47142,9 +67875,9 @@ func (p *FrontendServiceSnapshotLoaderReportResult) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceSnapshotLoaderReportResult) BLength() int { +func (p *FrontendServiceUpdateStatsCacheResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("snapshotLoaderReport_result") + l += bthrift.Binary.StructBeginLength("updateStatsCache_result") if p != nil { l += p.field0Length() } @@ -47153,7 +67886,7 @@ func (p *FrontendServiceSnapshotLoaderReportResult) BLength() int { return l } -func (p *FrontendServiceSnapshotLoaderReportResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdateStatsCacheResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -47163,7 +67896,7 @@ func (p *FrontendServiceSnapshotLoaderReportResult) fastWriteField0(buf []byte, return offset } -func (p *FrontendServiceSnapshotLoaderReportResult) field0Length() int { +func (p *FrontendServiceUpdateStatsCacheResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -47173,7 +67906,7 @@ func (p *FrontendServiceSnapshotLoaderReportResult) field0Length() int { return l } -func (p *FrontendServicePingArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47235,7 +67968,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47244,10 +67977,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServicePingArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetAutoIncrementRangeArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTFrontendPingFrontendRequest() + tmp := NewTAutoIncrementRangeRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47258,13 +67991,13 @@ func (p *FrontendServicePingArgs) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServicePingArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetAutoIncrementRangeArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServicePingArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetAutoIncrementRangeArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ping_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getAutoIncrementRange_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -47273,9 +68006,9 @@ func (p *FrontendServicePingArgs) FastWriteNocopy(buf []byte, binaryWriter bthri return offset } -func (p *FrontendServicePingArgs) BLength() int { +func (p *FrontendServiceGetAutoIncrementRangeArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("ping_args") + l += bthrift.Binary.StructBeginLength("getAutoIncrementRange_args") if p != nil { l += p.field1Length() } @@ -47284,7 +68017,7 @@ func (p *FrontendServicePingArgs) BLength() int { return l } -func (p *FrontendServicePingArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetAutoIncrementRangeArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -47292,7 +68025,7 @@ func (p *FrontendServicePingArgs) fastWriteField1(buf []byte, binaryWriter bthri return offset } -func (p *FrontendServicePingArgs) field1Length() int { +func (p *FrontendServiceGetAutoIncrementRangeArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -47300,7 +68033,7 @@ func (p *FrontendServicePingArgs) field1Length() int { return l } -func (p *FrontendServicePingResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetAutoIncrementRangeResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47362,7 +68095,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServicePingResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47371,10 +68104,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServicePingResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetAutoIncrementRangeResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTFrontendPingFrontendResult_() + tmp := NewTAutoIncrementRangeResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47385,13 +68118,13 @@ func (p *FrontendServicePingResult) FastReadField0(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServicePingResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetAutoIncrementRangeResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServicePingResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetAutoIncrementRangeResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ping_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getAutoIncrementRange_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -47400,9 +68133,9 @@ func (p *FrontendServicePingResult) FastWriteNocopy(buf []byte, binaryWriter bth return offset } -func (p *FrontendServicePingResult) BLength() int { +func (p *FrontendServiceGetAutoIncrementRangeResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("ping_result") + l += bthrift.Binary.StructBeginLength("getAutoIncrementRange_result") if p != nil { l += p.field0Length() } @@ -47411,7 +68144,7 @@ func (p *FrontendServicePingResult) BLength() int { return l } -func (p *FrontendServicePingResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetAutoIncrementRangeResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -47421,7 +68154,7 @@ func (p *FrontendServicePingResult) fastWriteField0(buf []byte, binaryWriter bth return offset } -func (p *FrontendServicePingResult) field0Length() int { +func (p *FrontendServiceGetAutoIncrementRangeResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -47431,7 +68164,7 @@ func (p *FrontendServicePingResult) field0Length() int { return l } -func (p *FrontendServiceAddColumnsArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCreatePartitionArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47493,7 +68226,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddColumnsArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47502,10 +68235,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAddColumnsArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceCreatePartitionArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTAddColumnsRequest() + tmp := NewTCreatePartitionRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47516,13 +68249,13 @@ func (p *FrontendServiceAddColumnsArgs) FastReadField1(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceAddColumnsArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceCreatePartitionArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceAddColumnsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCreatePartitionArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "addColumns_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "createPartition_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -47531,9 +68264,9 @@ func (p *FrontendServiceAddColumnsArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceAddColumnsArgs) BLength() int { +func (p *FrontendServiceCreatePartitionArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("addColumns_args") + l += bthrift.Binary.StructBeginLength("createPartition_args") if p != nil { l += p.field1Length() } @@ -47542,7 +68275,7 @@ func (p *FrontendServiceAddColumnsArgs) BLength() int { return l } -func (p *FrontendServiceAddColumnsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCreatePartitionArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -47550,7 +68283,7 @@ func (p *FrontendServiceAddColumnsArgs) fastWriteField1(buf []byte, binaryWriter return offset } -func (p *FrontendServiceAddColumnsArgs) field1Length() int { +func (p *FrontendServiceCreatePartitionArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -47558,7 +68291,7 @@ func (p *FrontendServiceAddColumnsArgs) field1Length() int { return l } -func (p *FrontendServiceAddColumnsResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceCreatePartitionResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47620,7 +68353,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAddColumnsResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47629,10 +68362,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAddColumnsResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceCreatePartitionResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTAddColumnsResult_() + tmp := NewTCreatePartitionResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47643,13 +68376,13 @@ func (p *FrontendServiceAddColumnsResult) FastReadField0(buf []byte) (int, error } // for compatibility -func (p *FrontendServiceAddColumnsResult) FastWrite(buf []byte) int { +func (p *FrontendServiceCreatePartitionResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceAddColumnsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCreatePartitionResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "addColumns_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "createPartition_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -47658,9 +68391,9 @@ func (p *FrontendServiceAddColumnsResult) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *FrontendServiceAddColumnsResult) BLength() int { +func (p *FrontendServiceCreatePartitionResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("addColumns_result") + l += bthrift.Binary.StructBeginLength("createPartition_result") if p != nil { l += p.field0Length() } @@ -47669,7 +68402,7 @@ func (p *FrontendServiceAddColumnsResult) BLength() int { return l } -func (p *FrontendServiceAddColumnsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceCreatePartitionResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -47679,7 +68412,7 @@ func (p *FrontendServiceAddColumnsResult) fastWriteField0(buf []byte, binaryWrit return offset } -func (p *FrontendServiceAddColumnsResult) field0Length() int { +func (p *FrontendServiceCreatePartitionResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -47689,7 +68422,7 @@ func (p *FrontendServiceAddColumnsResult) field0Length() int { return l } -func (p *FrontendServiceInitExternalCtlMetaArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReplacePartitionArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47751,7 +68484,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReplacePartitionArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47760,10 +68493,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceReplacePartitionArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTInitExternalCtlMetaRequest() + tmp := NewTReplacePartitionRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47774,13 +68507,13 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) FastReadField1(buf []byte) (int } // for compatibility -func (p *FrontendServiceInitExternalCtlMetaArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceReplacePartitionArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceInitExternalCtlMetaArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReplacePartitionArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "initExternalCtlMeta_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "replacePartition_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -47789,9 +68522,9 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) FastWriteNocopy(buf []byte, bin return offset } -func (p *FrontendServiceInitExternalCtlMetaArgs) BLength() int { +func (p *FrontendServiceReplacePartitionArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("initExternalCtlMeta_args") + l += bthrift.Binary.StructBeginLength("replacePartition_args") if p != nil { l += p.field1Length() } @@ -47800,7 +68533,7 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) BLength() int { return l } -func (p *FrontendServiceInitExternalCtlMetaArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReplacePartitionArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -47808,7 +68541,7 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) fastWriteField1(buf []byte, bin return offset } -func (p *FrontendServiceInitExternalCtlMetaArgs) field1Length() int { +func (p *FrontendServiceReplacePartitionArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -47816,7 +68549,7 @@ func (p *FrontendServiceInitExternalCtlMetaArgs) field1Length() int { return l } -func (p *FrontendServiceInitExternalCtlMetaResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReplacePartitionResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -47878,7 +68611,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInitExternalCtlMetaResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReplacePartitionResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -47887,10 +68620,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceInitExternalCtlMetaResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceReplacePartitionResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTInitExternalCtlMetaResult_() + tmp := NewTReplacePartitionResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -47901,13 +68634,13 @@ func (p *FrontendServiceInitExternalCtlMetaResult) FastReadField0(buf []byte) (i } // for compatibility -func (p *FrontendServiceInitExternalCtlMetaResult) FastWrite(buf []byte) int { +func (p *FrontendServiceReplacePartitionResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceInitExternalCtlMetaResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReplacePartitionResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "initExternalCtlMeta_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "replacePartition_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -47916,9 +68649,9 @@ func (p *FrontendServiceInitExternalCtlMetaResult) FastWriteNocopy(buf []byte, b return offset } -func (p *FrontendServiceInitExternalCtlMetaResult) BLength() int { +func (p *FrontendServiceReplacePartitionResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("initExternalCtlMeta_result") + l += bthrift.Binary.StructBeginLength("replacePartition_result") if p != nil { l += p.field0Length() } @@ -47927,7 +68660,7 @@ func (p *FrontendServiceInitExternalCtlMetaResult) BLength() int { return l } -func (p *FrontendServiceInitExternalCtlMetaResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReplacePartitionResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -47937,7 +68670,7 @@ func (p *FrontendServiceInitExternalCtlMetaResult) fastWriteField0(buf []byte, b return offset } -func (p *FrontendServiceInitExternalCtlMetaResult) field0Length() int { +func (p *FrontendServiceReplacePartitionResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -47947,7 +68680,7 @@ func (p *FrontendServiceInitExternalCtlMetaResult) field0Length() int { return l } -func (p *FrontendServiceFetchSchemaTableDataArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetMetaArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48009,7 +68742,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMetaArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48018,10 +68751,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetMetaArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTFetchSchemaTableDataRequest() + tmp := NewTGetMetaRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48032,13 +68765,13 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) FastReadField1(buf []byte) (in } // for compatibility -func (p *FrontendServiceFetchSchemaTableDataArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetMetaArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceFetchSchemaTableDataArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMetaArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchSchemaTableData_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getMeta_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -48047,9 +68780,9 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) FastWriteNocopy(buf []byte, bi return offset } -func (p *FrontendServiceFetchSchemaTableDataArgs) BLength() int { +func (p *FrontendServiceGetMetaArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("fetchSchemaTableData_args") + l += bthrift.Binary.StructBeginLength("getMeta_args") if p != nil { l += p.field1Length() } @@ -48058,7 +68791,7 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) BLength() int { return l } -func (p *FrontendServiceFetchSchemaTableDataArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMetaArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -48066,7 +68799,7 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) fastWriteField1(buf []byte, bi return offset } -func (p *FrontendServiceFetchSchemaTableDataArgs) field1Length() int { +func (p *FrontendServiceGetMetaArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -48074,7 +68807,7 @@ func (p *FrontendServiceFetchSchemaTableDataArgs) field1Length() int { return l } -func (p *FrontendServiceFetchSchemaTableDataResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetMetaResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48136,7 +68869,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSchemaTableDataResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMetaResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48145,10 +68878,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceFetchSchemaTableDataResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetMetaResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTFetchSchemaTableDataResult_() + tmp := NewTGetMetaResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48159,13 +68892,13 @@ func (p *FrontendServiceFetchSchemaTableDataResult) FastReadField0(buf []byte) ( } // for compatibility -func (p *FrontendServiceFetchSchemaTableDataResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetMetaResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceFetchSchemaTableDataResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMetaResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchSchemaTableData_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getMeta_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -48174,9 +68907,9 @@ func (p *FrontendServiceFetchSchemaTableDataResult) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceFetchSchemaTableDataResult) BLength() int { +func (p *FrontendServiceGetMetaResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("fetchSchemaTableData_result") + l += bthrift.Binary.StructBeginLength("getMeta_result") if p != nil { l += p.field0Length() } @@ -48185,7 +68918,7 @@ func (p *FrontendServiceFetchSchemaTableDataResult) BLength() int { return l } -func (p *FrontendServiceFetchSchemaTableDataResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetMetaResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -48195,7 +68928,7 @@ func (p *FrontendServiceFetchSchemaTableDataResult) fastWriteField0(buf []byte, return offset } -func (p *FrontendServiceFetchSchemaTableDataResult) field0Length() int { +func (p *FrontendServiceGetMetaResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -48205,7 +68938,7 @@ func (p *FrontendServiceFetchSchemaTableDataResult) field0Length() int { return l } -func (p *FrontendServiceAcquireTokenArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetBackendMetaArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48226,10 +68959,27 @@ func (p *FrontendServiceAcquireTokenArgs) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -48249,41 +68999,73 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBackendMetaArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } +func (p *FrontendServiceGetBackendMetaArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTGetBackendMetaRequest() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Request = tmp + return offset, nil +} + // for compatibility -func (p *FrontendServiceAcquireTokenArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetBackendMetaArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceAcquireTokenArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBackendMetaArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "acquireToken_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBackendMeta_args") if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *FrontendServiceAcquireTokenArgs) BLength() int { +func (p *FrontendServiceGetBackendMetaArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("acquireToken_args") + l += bthrift.Binary.StructBeginLength("getBackendMeta_args") if p != nil { + l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *FrontendServiceAcquireTokenResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetBackendMetaArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) + offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *FrontendServiceGetBackendMetaArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) + l += p.Request.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *FrontendServiceGetBackendMetaResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48345,7 +69127,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceAcquireTokenResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBackendMetaResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48354,10 +69136,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceAcquireTokenResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetBackendMetaResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTMySqlLoadAcquireTokenResult_() + tmp := NewTGetBackendMetaResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48368,13 +69150,13 @@ func (p *FrontendServiceAcquireTokenResult) FastReadField0(buf []byte) (int, err } // for compatibility -func (p *FrontendServiceAcquireTokenResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetBackendMetaResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceAcquireTokenResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBackendMetaResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "acquireToken_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBackendMeta_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -48383,9 +69165,9 @@ func (p *FrontendServiceAcquireTokenResult) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *FrontendServiceAcquireTokenResult) BLength() int { +func (p *FrontendServiceGetBackendMetaResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("acquireToken_result") + l += bthrift.Binary.StructBeginLength("getBackendMeta_result") if p != nil { l += p.field0Length() } @@ -48394,7 +69176,7 @@ func (p *FrontendServiceAcquireTokenResult) BLength() int { return l } -func (p *FrontendServiceAcquireTokenResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetBackendMetaResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -48404,7 +69186,7 @@ func (p *FrontendServiceAcquireTokenResult) fastWriteField0(buf []byte, binaryWr return offset } -func (p *FrontendServiceAcquireTokenResult) field0Length() int { +func (p *FrontendServiceGetBackendMetaResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -48414,7 +69196,7 @@ func (p *FrontendServiceAcquireTokenResult) field0Length() int { return l } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetColumnInfoArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48476,7 +69258,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetColumnInfoArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48485,10 +69267,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceGetColumnInfoArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTConfirmUnusedRemoteFilesRequest() + tmp := NewTGetColumnInfoRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48499,13 +69281,13 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastReadField1(buf []byte) } // for compatibility -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceGetColumnInfoArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetColumnInfoArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "confirmUnusedRemoteFiles_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getColumnInfo_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -48514,9 +69296,9 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) FastWriteNocopy(buf []byte return offset } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) BLength() int { +func (p *FrontendServiceGetColumnInfoArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("confirmUnusedRemoteFiles_args") + l += bthrift.Binary.StructBeginLength("getColumnInfo_args") if p != nil { l += p.field1Length() } @@ -48525,7 +69307,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) BLength() int { return l } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetColumnInfoArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -48533,7 +69315,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) fastWriteField1(buf []byte return offset } -func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) field1Length() int { +func (p *FrontendServiceGetColumnInfoArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -48541,7 +69323,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) field1Length() int { return l } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceGetColumnInfoResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48603,7 +69385,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceConfirmUnusedRemoteFilesResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetColumnInfoResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48612,10 +69394,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceGetColumnInfoResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTConfirmUnusedRemoteFilesResult_() + tmp := NewTGetColumnInfoResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48626,13 +69408,13 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastReadField0(buf []byt } // for compatibility -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastWrite(buf []byte) int { +func (p *FrontendServiceGetColumnInfoResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetColumnInfoResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "confirmUnusedRemoteFiles_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getColumnInfo_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -48641,9 +69423,9 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) FastWriteNocopy(buf []by return offset } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) BLength() int { +func (p *FrontendServiceGetColumnInfoResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("confirmUnusedRemoteFiles_result") + l += bthrift.Binary.StructBeginLength("getColumnInfo_result") if p != nil { l += p.field0Length() } @@ -48652,7 +69434,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) BLength() int { return l } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceGetColumnInfoResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -48662,7 +69444,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) fastWriteField0(buf []by return offset } -func (p *FrontendServiceConfirmUnusedRemoteFilesResult) field0Length() int { +func (p *FrontendServiceGetColumnInfoResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -48672,7 +69454,7 @@ func (p *FrontendServiceConfirmUnusedRemoteFilesResult) field0Length() int { return l } -func (p *FrontendServiceCheckAuthArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceInvalidateStatsCacheArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48734,7 +69516,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInvalidateStatsCacheArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48743,10 +69525,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCheckAuthArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceInvalidateStatsCacheArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTCheckAuthRequest() + tmp := NewTInvalidateFollowerStatsCacheRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48757,13 +69539,13 @@ func (p *FrontendServiceCheckAuthArgs) FastReadField1(buf []byte) (int, error) { } // for compatibility -func (p *FrontendServiceCheckAuthArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceInvalidateStatsCacheArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceCheckAuthArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInvalidateStatsCacheArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "checkAuth_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "invalidateStatsCache_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -48772,9 +69554,9 @@ func (p *FrontendServiceCheckAuthArgs) FastWriteNocopy(buf []byte, binaryWriter return offset } -func (p *FrontendServiceCheckAuthArgs) BLength() int { +func (p *FrontendServiceInvalidateStatsCacheArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("checkAuth_args") + l += bthrift.Binary.StructBeginLength("invalidateStatsCache_args") if p != nil { l += p.field1Length() } @@ -48783,7 +69565,7 @@ func (p *FrontendServiceCheckAuthArgs) BLength() int { return l } -func (p *FrontendServiceCheckAuthArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInvalidateStatsCacheArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -48791,7 +69573,7 @@ func (p *FrontendServiceCheckAuthArgs) fastWriteField1(buf []byte, binaryWriter return offset } -func (p *FrontendServiceCheckAuthArgs) field1Length() int { +func (p *FrontendServiceInvalidateStatsCacheArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -48799,7 +69581,7 @@ func (p *FrontendServiceCheckAuthArgs) field1Length() int { return l } -func (p *FrontendServiceCheckAuthResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceInvalidateStatsCacheResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48861,7 +69643,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCheckAuthResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceInvalidateStatsCacheResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -48870,10 +69652,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCheckAuthResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceInvalidateStatsCacheResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTCheckAuthResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -48884,13 +69666,13 @@ func (p *FrontendServiceCheckAuthResult) FastReadField0(buf []byte) (int, error) } // for compatibility -func (p *FrontendServiceCheckAuthResult) FastWrite(buf []byte) int { +func (p *FrontendServiceInvalidateStatsCacheResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceCheckAuthResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInvalidateStatsCacheResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "checkAuth_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "invalidateStatsCache_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -48899,9 +69681,9 @@ func (p *FrontendServiceCheckAuthResult) FastWriteNocopy(buf []byte, binaryWrite return offset } -func (p *FrontendServiceCheckAuthResult) BLength() int { +func (p *FrontendServiceInvalidateStatsCacheResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("checkAuth_result") + l += bthrift.Binary.StructBeginLength("invalidateStatsCache_result") if p != nil { l += p.field0Length() } @@ -48910,7 +69692,7 @@ func (p *FrontendServiceCheckAuthResult) BLength() int { return l } -func (p *FrontendServiceCheckAuthResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceInvalidateStatsCacheResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -48920,7 +69702,7 @@ func (p *FrontendServiceCheckAuthResult) fastWriteField0(buf []byte, binaryWrite return offset } -func (p *FrontendServiceCheckAuthResult) field0Length() int { +func (p *FrontendServiceInvalidateStatsCacheResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -48930,7 +69712,7 @@ func (p *FrontendServiceCheckAuthResult) field0Length() int { return l } -func (p *FrontendServiceGetQueryStatsArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceShowProcessListArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -48992,7 +69774,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowProcessListArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49001,10 +69783,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceShowProcessListArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetQueryStatsRequest() + tmp := NewTShowProcessListRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49015,13 +69797,13 @@ func (p *FrontendServiceGetQueryStatsArgs) FastReadField1(buf []byte) (int, erro } // for compatibility -func (p *FrontendServiceGetQueryStatsArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceShowProcessListArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetQueryStatsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowProcessListArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getQueryStats_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showProcessList_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -49030,9 +69812,9 @@ func (p *FrontendServiceGetQueryStatsArgs) FastWriteNocopy(buf []byte, binaryWri return offset } -func (p *FrontendServiceGetQueryStatsArgs) BLength() int { +func (p *FrontendServiceShowProcessListArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getQueryStats_args") + l += bthrift.Binary.StructBeginLength("showProcessList_args") if p != nil { l += p.field1Length() } @@ -49041,7 +69823,7 @@ func (p *FrontendServiceGetQueryStatsArgs) BLength() int { return l } -func (p *FrontendServiceGetQueryStatsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowProcessListArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -49049,7 +69831,7 @@ func (p *FrontendServiceGetQueryStatsArgs) fastWriteField1(buf []byte, binaryWri return offset } -func (p *FrontendServiceGetQueryStatsArgs) field1Length() int { +func (p *FrontendServiceShowProcessListArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -49057,7 +69839,7 @@ func (p *FrontendServiceGetQueryStatsArgs) field1Length() int { return l } -func (p *FrontendServiceGetQueryStatsResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceShowProcessListResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49119,7 +69901,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetQueryStatsResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowProcessListResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49128,10 +69910,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetQueryStatsResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceShowProcessListResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTQueryStatsResult_() + tmp := NewTShowProcessListResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49142,13 +69924,13 @@ func (p *FrontendServiceGetQueryStatsResult) FastReadField0(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceGetQueryStatsResult) FastWrite(buf []byte) int { +func (p *FrontendServiceShowProcessListResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetQueryStatsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowProcessListResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getQueryStats_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showProcessList_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -49157,9 +69939,9 @@ func (p *FrontendServiceGetQueryStatsResult) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceGetQueryStatsResult) BLength() int { +func (p *FrontendServiceShowProcessListResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getQueryStats_result") + l += bthrift.Binary.StructBeginLength("showProcessList_result") if p != nil { l += p.field0Length() } @@ -49168,7 +69950,7 @@ func (p *FrontendServiceGetQueryStatsResult) BLength() int { return l } -func (p *FrontendServiceGetQueryStatsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowProcessListResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -49178,7 +69960,7 @@ func (p *FrontendServiceGetQueryStatsResult) fastWriteField0(buf []byte, binaryW return offset } -func (p *FrontendServiceGetQueryStatsResult) field0Length() int { +func (p *FrontendServiceShowProcessListResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -49188,7 +69970,7 @@ func (p *FrontendServiceGetQueryStatsResult) field0Length() int { return l } -func (p *FrontendServiceGetTabletReplicaInfosArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReportCommitTxnResultArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49250,7 +70032,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportCommitTxnResultArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49259,10 +70041,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceReportCommitTxnResultArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTabletReplicaInfosRequest() + tmp := NewTReportCommitTxnResultRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49273,13 +70055,13 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) FastReadField1(buf []byte) (i } // for compatibility -func (p *FrontendServiceGetTabletReplicaInfosArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceReportCommitTxnResultArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetTabletReplicaInfosArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportCommitTxnResultArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTabletReplicaInfos_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "reportCommitTxnResult_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -49288,9 +70070,9 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) FastWriteNocopy(buf []byte, b return offset } -func (p *FrontendServiceGetTabletReplicaInfosArgs) BLength() int { +func (p *FrontendServiceReportCommitTxnResultArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getTabletReplicaInfos_args") + l += bthrift.Binary.StructBeginLength("reportCommitTxnResult_args") if p != nil { l += p.field1Length() } @@ -49299,7 +70081,7 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) BLength() int { return l } -func (p *FrontendServiceGetTabletReplicaInfosArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportCommitTxnResultArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -49307,7 +70089,7 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) fastWriteField1(buf []byte, b return offset } -func (p *FrontendServiceGetTabletReplicaInfosArgs) field1Length() int { +func (p *FrontendServiceReportCommitTxnResultArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -49315,7 +70097,7 @@ func (p *FrontendServiceGetTabletReplicaInfosArgs) field1Length() int { return l } -func (p *FrontendServiceGetTabletReplicaInfosResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceReportCommitTxnResultResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49377,7 +70159,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetTabletReplicaInfosResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceReportCommitTxnResultResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49386,10 +70168,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetTabletReplicaInfosResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceReportCommitTxnResultResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetTabletReplicaInfosResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49400,13 +70182,13 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) FastReadField0(buf []byte) } // for compatibility -func (p *FrontendServiceGetTabletReplicaInfosResult) FastWrite(buf []byte) int { +func (p *FrontendServiceReportCommitTxnResultResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetTabletReplicaInfosResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportCommitTxnResultResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getTabletReplicaInfos_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "reportCommitTxnResult_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -49415,9 +70197,9 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceGetTabletReplicaInfosResult) BLength() int { +func (p *FrontendServiceReportCommitTxnResultResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getTabletReplicaInfos_result") + l += bthrift.Binary.StructBeginLength("reportCommitTxnResult_result") if p != nil { l += p.field0Length() } @@ -49426,7 +70208,7 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) BLength() int { return l } -func (p *FrontendServiceGetTabletReplicaInfosResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceReportCommitTxnResultResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -49436,7 +70218,7 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) fastWriteField0(buf []byte, return offset } -func (p *FrontendServiceGetTabletReplicaInfosResult) field0Length() int { +func (p *FrontendServiceReportCommitTxnResultResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -49446,7 +70228,7 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) field0Length() int { return l } -func (p *FrontendServiceGetMasterTokenArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceShowUserArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49508,7 +70290,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowUserArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49517,10 +70299,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceShowUserArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetMasterTokenRequest() + tmp := NewTShowUserRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49531,13 +70313,13 @@ func (p *FrontendServiceGetMasterTokenArgs) FastReadField1(buf []byte) (int, err } // for compatibility -func (p *FrontendServiceGetMasterTokenArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceShowUserArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetMasterTokenArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowUserArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getMasterToken_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showUser_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -49546,9 +70328,9 @@ func (p *FrontendServiceGetMasterTokenArgs) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *FrontendServiceGetMasterTokenArgs) BLength() int { +func (p *FrontendServiceShowUserArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getMasterToken_args") + l += bthrift.Binary.StructBeginLength("showUser_args") if p != nil { l += p.field1Length() } @@ -49557,7 +70339,7 @@ func (p *FrontendServiceGetMasterTokenArgs) BLength() int { return l } -func (p *FrontendServiceGetMasterTokenArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowUserArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -49565,7 +70347,7 @@ func (p *FrontendServiceGetMasterTokenArgs) fastWriteField1(buf []byte, binaryWr return offset } -func (p *FrontendServiceGetMasterTokenArgs) field1Length() int { +func (p *FrontendServiceShowUserArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -49573,7 +70355,7 @@ func (p *FrontendServiceGetMasterTokenArgs) field1Length() int { return l } -func (p *FrontendServiceGetMasterTokenResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceShowUserResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49635,7 +70417,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetMasterTokenResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceShowUserResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49644,10 +70426,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetMasterTokenResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceShowUserResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetMasterTokenResult_() + tmp := NewTShowUserResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49658,13 +70440,13 @@ func (p *FrontendServiceGetMasterTokenResult) FastReadField0(buf []byte) (int, e } // for compatibility -func (p *FrontendServiceGetMasterTokenResult) FastWrite(buf []byte) int { +func (p *FrontendServiceShowUserResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetMasterTokenResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowUserResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getMasterToken_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "showUser_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -49673,9 +70455,9 @@ func (p *FrontendServiceGetMasterTokenResult) FastWriteNocopy(buf []byte, binary return offset } -func (p *FrontendServiceGetMasterTokenResult) BLength() int { +func (p *FrontendServiceShowUserResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getMasterToken_result") + l += bthrift.Binary.StructBeginLength("showUser_result") if p != nil { l += p.field0Length() } @@ -49684,7 +70466,7 @@ func (p *FrontendServiceGetMasterTokenResult) BLength() int { return l } -func (p *FrontendServiceGetMasterTokenResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceShowUserResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -49694,7 +70476,7 @@ func (p *FrontendServiceGetMasterTokenResult) fastWriteField0(buf []byte, binary return offset } -func (p *FrontendServiceGetMasterTokenResult) field0Length() int { +func (p *FrontendServiceShowUserResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -49704,7 +70486,7 @@ func (p *FrontendServiceGetMasterTokenResult) field0Length() int { return l } -func (p *FrontendServiceGetBinlogLagArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceSyncQueryColumnsArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49766,7 +70548,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSyncQueryColumnsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49775,10 +70557,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceSyncQueryColumnsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTGetBinlogLagRequest() + tmp := NewTSyncQueryColumns() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49789,13 +70571,13 @@ func (p *FrontendServiceGetBinlogLagArgs) FastReadField1(buf []byte) (int, error } // for compatibility -func (p *FrontendServiceGetBinlogLagArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceSyncQueryColumnsArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetBinlogLagArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSyncQueryColumnsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlogLag_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "syncQueryColumns_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -49804,9 +70586,9 @@ func (p *FrontendServiceGetBinlogLagArgs) FastWriteNocopy(buf []byte, binaryWrit return offset } -func (p *FrontendServiceGetBinlogLagArgs) BLength() int { +func (p *FrontendServiceSyncQueryColumnsArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getBinlogLag_args") + l += bthrift.Binary.StructBeginLength("syncQueryColumns_args") if p != nil { l += p.field1Length() } @@ -49815,7 +70597,7 @@ func (p *FrontendServiceGetBinlogLagArgs) BLength() int { return l } -func (p *FrontendServiceGetBinlogLagArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSyncQueryColumnsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -49823,7 +70605,7 @@ func (p *FrontendServiceGetBinlogLagArgs) fastWriteField1(buf []byte, binaryWrit return offset } -func (p *FrontendServiceGetBinlogLagArgs) field1Length() int { +func (p *FrontendServiceSyncQueryColumnsArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -49831,7 +70613,7 @@ func (p *FrontendServiceGetBinlogLagArgs) field1Length() int { return l } -func (p *FrontendServiceGetBinlogLagResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceSyncQueryColumnsResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -49893,7 +70675,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetBinlogLagResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceSyncQueryColumnsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -49902,10 +70684,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetBinlogLagResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceSyncQueryColumnsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTGetBinlogLagResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -49916,13 +70698,13 @@ func (p *FrontendServiceGetBinlogLagResult) FastReadField0(buf []byte) (int, err } // for compatibility -func (p *FrontendServiceGetBinlogLagResult) FastWrite(buf []byte) int { +func (p *FrontendServiceSyncQueryColumnsResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetBinlogLagResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSyncQueryColumnsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getBinlogLag_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "syncQueryColumns_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -49931,9 +70713,9 @@ func (p *FrontendServiceGetBinlogLagResult) FastWriteNocopy(buf []byte, binaryWr return offset } -func (p *FrontendServiceGetBinlogLagResult) BLength() int { +func (p *FrontendServiceSyncQueryColumnsResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getBinlogLag_result") + l += bthrift.Binary.StructBeginLength("syncQueryColumns_result") if p != nil { l += p.field0Length() } @@ -49942,7 +70724,7 @@ func (p *FrontendServiceGetBinlogLagResult) BLength() int { return l } -func (p *FrontendServiceGetBinlogLagResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceSyncQueryColumnsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -49952,7 +70734,7 @@ func (p *FrontendServiceGetBinlogLagResult) fastWriteField0(buf []byte, binaryWr return offset } -func (p *FrontendServiceGetBinlogLagResult) field0Length() int { +func (p *FrontendServiceSyncQueryColumnsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -49962,7 +70744,7 @@ func (p *FrontendServiceGetBinlogLagResult) field0Length() int { return l } -func (p *FrontendServiceUpdateStatsCacheArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchSplitBatchArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -50024,7 +70806,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSplitBatchArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -50033,10 +70815,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceFetchSplitBatchArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTUpdateFollowerStatsCacheRequest() + tmp := NewTFetchSplitBatchRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -50047,13 +70829,13 @@ func (p *FrontendServiceUpdateStatsCacheArgs) FastReadField1(buf []byte) (int, e } // for compatibility -func (p *FrontendServiceUpdateStatsCacheArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchSplitBatchArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceUpdateStatsCacheArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSplitBatchArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateStatsCache_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchSplitBatch_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -50062,9 +70844,9 @@ func (p *FrontendServiceUpdateStatsCacheArgs) FastWriteNocopy(buf []byte, binary return offset } -func (p *FrontendServiceUpdateStatsCacheArgs) BLength() int { +func (p *FrontendServiceFetchSplitBatchArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("updateStatsCache_args") + l += bthrift.Binary.StructBeginLength("fetchSplitBatch_args") if p != nil { l += p.field1Length() } @@ -50073,7 +70855,7 @@ func (p *FrontendServiceUpdateStatsCacheArgs) BLength() int { return l } -func (p *FrontendServiceUpdateStatsCacheArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSplitBatchArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -50081,7 +70863,7 @@ func (p *FrontendServiceUpdateStatsCacheArgs) fastWriteField1(buf []byte, binary return offset } -func (p *FrontendServiceUpdateStatsCacheArgs) field1Length() int { +func (p *FrontendServiceFetchSplitBatchArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -50089,7 +70871,7 @@ func (p *FrontendServiceUpdateStatsCacheArgs) field1Length() int { return l } -func (p *FrontendServiceUpdateStatsCacheResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchSplitBatchResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -50151,7 +70933,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdateStatsCacheResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchSplitBatchResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -50160,10 +70942,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceUpdateStatsCacheResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceFetchSplitBatchResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := NewTFetchSplitBatchResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -50174,13 +70956,13 @@ func (p *FrontendServiceUpdateStatsCacheResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceUpdateStatsCacheResult) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchSplitBatchResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceUpdateStatsCacheResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSplitBatchResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updateStatsCache_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchSplitBatch_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -50189,9 +70971,9 @@ func (p *FrontendServiceUpdateStatsCacheResult) FastWriteNocopy(buf []byte, bina return offset } -func (p *FrontendServiceUpdateStatsCacheResult) BLength() int { +func (p *FrontendServiceFetchSplitBatchResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("updateStatsCache_result") + l += bthrift.Binary.StructBeginLength("fetchSplitBatch_result") if p != nil { l += p.field0Length() } @@ -50200,7 +70982,7 @@ func (p *FrontendServiceUpdateStatsCacheResult) BLength() int { return l } -func (p *FrontendServiceUpdateStatsCacheResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchSplitBatchResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -50210,7 +70992,7 @@ func (p *FrontendServiceUpdateStatsCacheResult) fastWriteField0(buf []byte, bina return offset } -func (p *FrontendServiceUpdateStatsCacheResult) field0Length() int { +func (p *FrontendServiceFetchSplitBatchResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -50220,7 +71002,7 @@ func (p *FrontendServiceUpdateStatsCacheResult) field0Length() int { return l } -func (p *FrontendServiceGetAutoIncrementRangeArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -50282,7 +71064,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdatePartitionStatsCacheArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -50291,10 +71073,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTAutoIncrementRangeRequest() + tmp := NewTUpdateFollowerPartitionStatsCacheRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -50305,13 +71087,13 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) FastReadField1(buf []byte) (i } // for compatibility -func (p *FrontendServiceGetAutoIncrementRangeArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetAutoIncrementRangeArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getAutoIncrementRange_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updatePartitionStatsCache_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -50320,9 +71102,9 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) FastWriteNocopy(buf []byte, b return offset } -func (p *FrontendServiceGetAutoIncrementRangeArgs) BLength() int { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getAutoIncrementRange_args") + l += bthrift.Binary.StructBeginLength("updatePartitionStatsCache_args") if p != nil { l += p.field1Length() } @@ -50331,7 +71113,7 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) BLength() int { return l } -func (p *FrontendServiceGetAutoIncrementRangeArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -50339,7 +71121,7 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) fastWriteField1(buf []byte, b return offset } -func (p *FrontendServiceGetAutoIncrementRangeArgs) field1Length() int { +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -50347,7 +71129,7 @@ func (p *FrontendServiceGetAutoIncrementRangeArgs) field1Length() int { return l } -func (p *FrontendServiceGetAutoIncrementRangeResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -50409,7 +71191,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceGetAutoIncrementRangeResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceUpdatePartitionStatsCacheResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -50418,10 +71200,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceGetAutoIncrementRangeResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTAutoIncrementRangeResult_() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -50432,13 +71214,13 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) FastReadField0(buf []byte) } // for compatibility -func (p *FrontendServiceGetAutoIncrementRangeResult) FastWrite(buf []byte) int { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceGetAutoIncrementRangeResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "getAutoIncrementRange_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "updatePartitionStatsCache_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -50447,9 +71229,9 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) FastWriteNocopy(buf []byte, return offset } -func (p *FrontendServiceGetAutoIncrementRangeResult) BLength() int { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("getAutoIncrementRange_result") + l += bthrift.Binary.StructBeginLength("updatePartitionStatsCache_result") if p != nil { l += p.field0Length() } @@ -50458,7 +71240,7 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) BLength() int { return l } -func (p *FrontendServiceGetAutoIncrementRangeResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -50468,7 +71250,7 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) fastWriteField0(buf []byte, return offset } -func (p *FrontendServiceGetAutoIncrementRangeResult) field0Length() int { +func (p *FrontendServiceUpdatePartitionStatsCacheResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -50478,7 +71260,7 @@ func (p *FrontendServiceGetAutoIncrementRangeResult) field0Length() int { return l } -func (p *FrontendServiceCreatePartitionArgs) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchRunningQueriesArgs) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -50540,7 +71322,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchRunningQueriesArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -50549,10 +71331,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCreatePartitionArgs) FastReadField1(buf []byte) (int, error) { +func (p *FrontendServiceFetchRunningQueriesArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTCreatePartitionRequest() + tmp := NewTFetchRunningQueriesRequest() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -50563,13 +71345,13 @@ func (p *FrontendServiceCreatePartitionArgs) FastReadField1(buf []byte) (int, er } // for compatibility -func (p *FrontendServiceCreatePartitionArgs) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchRunningQueriesArgs) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceCreatePartitionArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchRunningQueriesArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "createPartition_args") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchRunningQueries_args") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) } @@ -50578,9 +71360,9 @@ func (p *FrontendServiceCreatePartitionArgs) FastWriteNocopy(buf []byte, binaryW return offset } -func (p *FrontendServiceCreatePartitionArgs) BLength() int { +func (p *FrontendServiceFetchRunningQueriesArgs) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("createPartition_args") + l += bthrift.Binary.StructBeginLength("fetchRunningQueries_args") if p != nil { l += p.field1Length() } @@ -50589,7 +71371,7 @@ func (p *FrontendServiceCreatePartitionArgs) BLength() int { return l } -func (p *FrontendServiceCreatePartitionArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchRunningQueriesArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1) offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter) @@ -50597,7 +71379,7 @@ func (p *FrontendServiceCreatePartitionArgs) fastWriteField1(buf []byte, binaryW return offset } -func (p *FrontendServiceCreatePartitionArgs) field1Length() int { +func (p *FrontendServiceFetchRunningQueriesArgs) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1) l += p.Request.BLength() @@ -50605,7 +71387,7 @@ func (p *FrontendServiceCreatePartitionArgs) field1Length() int { return l } -func (p *FrontendServiceCreatePartitionResult) FastRead(buf []byte) (int, error) { +func (p *FrontendServiceFetchRunningQueriesResult) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -50667,7 +71449,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceCreatePartitionResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_FrontendServiceFetchRunningQueriesResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -50676,10 +71458,10 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *FrontendServiceCreatePartitionResult) FastReadField0(buf []byte) (int, error) { +func (p *FrontendServiceFetchRunningQueriesResult) FastReadField0(buf []byte) (int, error) { offset := 0 - tmp := NewTCreatePartitionResult_() + tmp := NewTFetchRunningQueriesResult_() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -50690,13 +71472,13 @@ func (p *FrontendServiceCreatePartitionResult) FastReadField0(buf []byte) (int, } // for compatibility -func (p *FrontendServiceCreatePartitionResult) FastWrite(buf []byte) int { +func (p *FrontendServiceFetchRunningQueriesResult) FastWrite(buf []byte) int { return 0 } -func (p *FrontendServiceCreatePartitionResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchRunningQueriesResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "createPartition_result") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "fetchRunningQueries_result") if p != nil { offset += p.fastWriteField0(buf[offset:], binaryWriter) } @@ -50705,9 +71487,9 @@ func (p *FrontendServiceCreatePartitionResult) FastWriteNocopy(buf []byte, binar return offset } -func (p *FrontendServiceCreatePartitionResult) BLength() int { +func (p *FrontendServiceFetchRunningQueriesResult) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("createPartition_result") + l += bthrift.Binary.StructBeginLength("fetchRunningQueries_result") if p != nil { l += p.field0Length() } @@ -50716,7 +71498,7 @@ func (p *FrontendServiceCreatePartitionResult) BLength() int { return l } -func (p *FrontendServiceCreatePartitionResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *FrontendServiceFetchRunningQueriesResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetSuccess() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) @@ -50726,7 +71508,7 @@ func (p *FrontendServiceCreatePartitionResult) fastWriteField0(buf []byte, binar return offset } -func (p *FrontendServiceCreatePartitionResult) field0Length() int { +func (p *FrontendServiceFetchRunningQueriesResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) @@ -50992,14 +71774,6 @@ func (p *FrontendServicePingResult) GetResult() interface{} { return p.Success } -func (p *FrontendServiceAddColumnsArgs) GetFirstArgument() interface{} { - return p.Request -} - -func (p *FrontendServiceAddColumnsResult) GetResult() interface{} { - return p.Success -} - func (p *FrontendServiceInitExternalCtlMetaArgs) GetFirstArgument() interface{} { return p.Request } @@ -51024,6 +71798,14 @@ func (p *FrontendServiceAcquireTokenResult) GetResult() interface{} { return p.Success } +func (p *FrontendServiceCheckTokenArgs) GetFirstArgument() interface{} { + return p.Token +} + +func (p *FrontendServiceCheckTokenResult) GetResult() interface{} { + return p.Success +} + func (p *FrontendServiceConfirmUnusedRemoteFilesArgs) GetFirstArgument() interface{} { return p.Request } @@ -51056,6 +71838,38 @@ func (p *FrontendServiceGetTabletReplicaInfosResult) GetResult() interface{} { return p.Success } +func (p *FrontendServiceAddPlsqlStoredProcedureArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceAddPlsqlStoredProcedureResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceDropPlsqlStoredProcedureArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceDropPlsqlStoredProcedureResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceAddPlsqlPackageArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceAddPlsqlPackageResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceDropPlsqlPackageArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceDropPlsqlPackageResult) GetResult() interface{} { + return p.Success +} + func (p *FrontendServiceGetMasterTokenArgs) GetFirstArgument() interface{} { return p.Request } @@ -51095,3 +71909,99 @@ func (p *FrontendServiceCreatePartitionArgs) GetFirstArgument() interface{} { func (p *FrontendServiceCreatePartitionResult) GetResult() interface{} { return p.Success } + +func (p *FrontendServiceReplacePartitionArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceReplacePartitionResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceGetMetaArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceGetMetaResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceGetBackendMetaArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceGetBackendMetaResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceGetColumnInfoArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceGetColumnInfoResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceInvalidateStatsCacheArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceInvalidateStatsCacheResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceShowProcessListArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceShowProcessListResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceReportCommitTxnResultArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceReportCommitTxnResultResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceShowUserArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceShowUserResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceSyncQueryColumnsArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceSyncQueryColumnsResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceFetchSplitBatchArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceFetchSplitBatchResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceUpdatePartitionStatsCacheArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceUpdatePartitionStatsCacheResult) GetResult() interface{} { + return p.Success +} + +func (p *FrontendServiceFetchRunningQueriesArgs) GetFirstArgument() interface{} { + return p.Request +} + +func (p *FrontendServiceFetchRunningQueriesResult) GetResult() interface{} { + return p.Success +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go b/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go new file mode 100644 index 00000000..59fa8e08 --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go @@ -0,0 +1,3047 @@ +// Code generated by thriftgo (0.3.13). DO NOT EDIT. + +package heartbeatservice + +import ( + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" + "strings" +) + +const ( + IS_SET_DEFAULT_ROWSET_TO_BETA_BIT = 1 +) + +type TFrontendInfo struct { + CoordinatorAddress *types.TNetworkAddress `thrift:"coordinator_address,1,optional" frugal:"1,optional,types.TNetworkAddress" json:"coordinator_address,omitempty"` + ProcessUuid *int64 `thrift:"process_uuid,2,optional" frugal:"2,optional,i64" json:"process_uuid,omitempty"` +} + +func NewTFrontendInfo() *TFrontendInfo { + return &TFrontendInfo{} +} + +func (p *TFrontendInfo) InitDefault() { +} + +var TFrontendInfo_CoordinatorAddress_DEFAULT *types.TNetworkAddress + +func (p *TFrontendInfo) GetCoordinatorAddress() (v *types.TNetworkAddress) { + if !p.IsSetCoordinatorAddress() { + return TFrontendInfo_CoordinatorAddress_DEFAULT + } + return p.CoordinatorAddress +} + +var TFrontendInfo_ProcessUuid_DEFAULT int64 + +func (p *TFrontendInfo) GetProcessUuid() (v int64) { + if !p.IsSetProcessUuid() { + return TFrontendInfo_ProcessUuid_DEFAULT + } + return *p.ProcessUuid +} +func (p *TFrontendInfo) SetCoordinatorAddress(val *types.TNetworkAddress) { + p.CoordinatorAddress = val +} +func (p *TFrontendInfo) SetProcessUuid(val *int64) { + p.ProcessUuid = val +} + +var fieldIDToName_TFrontendInfo = map[int16]string{ + 1: "coordinator_address", + 2: "process_uuid", +} + +func (p *TFrontendInfo) IsSetCoordinatorAddress() bool { + return p.CoordinatorAddress != nil +} + +func (p *TFrontendInfo) IsSetProcessUuid() bool { + return p.ProcessUuid != nil +} + +func (p *TFrontendInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFrontendInfo) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.CoordinatorAddress = _field + return nil +} +func (p *TFrontendInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ProcessUuid = _field + return nil +} + +func (p *TFrontendInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFrontendInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFrontendInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCoordinatorAddress() { + if err = oprot.WriteFieldBegin("coordinator_address", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.CoordinatorAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFrontendInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetProcessUuid() { + if err = oprot.WriteFieldBegin("process_uuid", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ProcessUuid); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFrontendInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFrontendInfo(%+v)", *p) + +} + +func (p *TFrontendInfo) DeepEqual(ano *TFrontendInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.CoordinatorAddress) { + return false + } + if !p.Field2DeepEqual(ano.ProcessUuid) { + return false + } + return true +} + +func (p *TFrontendInfo) Field1DeepEqual(src *types.TNetworkAddress) bool { + + if !p.CoordinatorAddress.DeepEqual(src) { + return false + } + return true +} +func (p *TFrontendInfo) Field2DeepEqual(src *int64) bool { + + if p.ProcessUuid == src { + return true + } else if p.ProcessUuid == nil || src == nil { + return false + } + if *p.ProcessUuid != *src { + return false + } + return true +} + +type TMasterInfo struct { + NetworkAddress *types.TNetworkAddress `thrift:"network_address,1,required" frugal:"1,required,types.TNetworkAddress" json:"network_address"` + ClusterId types.TClusterId `thrift:"cluster_id,2,required" frugal:"2,required,i32" json:"cluster_id"` + Epoch types.TEpoch `thrift:"epoch,3,required" frugal:"3,required,i64" json:"epoch"` + Token *string `thrift:"token,4,optional" frugal:"4,optional,string" json:"token,omitempty"` + BackendIp *string `thrift:"backend_ip,5,optional" frugal:"5,optional,string" json:"backend_ip,omitempty"` + HttpPort *types.TPort `thrift:"http_port,6,optional" frugal:"6,optional,i32" json:"http_port,omitempty"` + HeartbeatFlags *int64 `thrift:"heartbeat_flags,7,optional" frugal:"7,optional,i64" json:"heartbeat_flags,omitempty"` + BackendId *int64 `thrift:"backend_id,8,optional" frugal:"8,optional,i64" json:"backend_id,omitempty"` + FrontendInfos []*TFrontendInfo `thrift:"frontend_infos,9,optional" frugal:"9,optional,list" json:"frontend_infos,omitempty"` + MetaServiceEndpoint *string `thrift:"meta_service_endpoint,10,optional" frugal:"10,optional,string" json:"meta_service_endpoint,omitempty"` + CloudUniqueId *string `thrift:"cloud_unique_id,11,optional" frugal:"11,optional,string" json:"cloud_unique_id,omitempty"` + TabletReportInactiveDurationMs *int64 `thrift:"tablet_report_inactive_duration_ms,12,optional" frugal:"12,optional,i64" json:"tablet_report_inactive_duration_ms,omitempty"` + AuthToken *string `thrift:"auth_token,13,optional" frugal:"13,optional,string" json:"auth_token,omitempty"` +} + +func NewTMasterInfo() *TMasterInfo { + return &TMasterInfo{} +} + +func (p *TMasterInfo) InitDefault() { +} + +var TMasterInfo_NetworkAddress_DEFAULT *types.TNetworkAddress + +func (p *TMasterInfo) GetNetworkAddress() (v *types.TNetworkAddress) { + if !p.IsSetNetworkAddress() { + return TMasterInfo_NetworkAddress_DEFAULT + } + return p.NetworkAddress +} + +func (p *TMasterInfo) GetClusterId() (v types.TClusterId) { + return p.ClusterId +} + +func (p *TMasterInfo) GetEpoch() (v types.TEpoch) { + return p.Epoch +} + +var TMasterInfo_Token_DEFAULT string + +func (p *TMasterInfo) GetToken() (v string) { + if !p.IsSetToken() { + return TMasterInfo_Token_DEFAULT + } + return *p.Token +} + +var TMasterInfo_BackendIp_DEFAULT string + +func (p *TMasterInfo) GetBackendIp() (v string) { + if !p.IsSetBackendIp() { + return TMasterInfo_BackendIp_DEFAULT + } + return *p.BackendIp +} + +var TMasterInfo_HttpPort_DEFAULT types.TPort + +func (p *TMasterInfo) GetHttpPort() (v types.TPort) { + if !p.IsSetHttpPort() { + return TMasterInfo_HttpPort_DEFAULT + } + return *p.HttpPort +} + +var TMasterInfo_HeartbeatFlags_DEFAULT int64 + +func (p *TMasterInfo) GetHeartbeatFlags() (v int64) { + if !p.IsSetHeartbeatFlags() { + return TMasterInfo_HeartbeatFlags_DEFAULT + } + return *p.HeartbeatFlags +} + +var TMasterInfo_BackendId_DEFAULT int64 + +func (p *TMasterInfo) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TMasterInfo_BackendId_DEFAULT + } + return *p.BackendId +} + +var TMasterInfo_FrontendInfos_DEFAULT []*TFrontendInfo + +func (p *TMasterInfo) GetFrontendInfos() (v []*TFrontendInfo) { + if !p.IsSetFrontendInfos() { + return TMasterInfo_FrontendInfos_DEFAULT + } + return p.FrontendInfos +} + +var TMasterInfo_MetaServiceEndpoint_DEFAULT string + +func (p *TMasterInfo) GetMetaServiceEndpoint() (v string) { + if !p.IsSetMetaServiceEndpoint() { + return TMasterInfo_MetaServiceEndpoint_DEFAULT + } + return *p.MetaServiceEndpoint +} + +var TMasterInfo_CloudUniqueId_DEFAULT string + +func (p *TMasterInfo) GetCloudUniqueId() (v string) { + if !p.IsSetCloudUniqueId() { + return TMasterInfo_CloudUniqueId_DEFAULT + } + return *p.CloudUniqueId +} + +var TMasterInfo_TabletReportInactiveDurationMs_DEFAULT int64 + +func (p *TMasterInfo) GetTabletReportInactiveDurationMs() (v int64) { + if !p.IsSetTabletReportInactiveDurationMs() { + return TMasterInfo_TabletReportInactiveDurationMs_DEFAULT + } + return *p.TabletReportInactiveDurationMs +} + +var TMasterInfo_AuthToken_DEFAULT string + +func (p *TMasterInfo) GetAuthToken() (v string) { + if !p.IsSetAuthToken() { + return TMasterInfo_AuthToken_DEFAULT + } + return *p.AuthToken +} +func (p *TMasterInfo) SetNetworkAddress(val *types.TNetworkAddress) { + p.NetworkAddress = val +} +func (p *TMasterInfo) SetClusterId(val types.TClusterId) { + p.ClusterId = val +} +func (p *TMasterInfo) SetEpoch(val types.TEpoch) { + p.Epoch = val +} +func (p *TMasterInfo) SetToken(val *string) { + p.Token = val +} +func (p *TMasterInfo) SetBackendIp(val *string) { + p.BackendIp = val +} +func (p *TMasterInfo) SetHttpPort(val *types.TPort) { + p.HttpPort = val +} +func (p *TMasterInfo) SetHeartbeatFlags(val *int64) { + p.HeartbeatFlags = val +} +func (p *TMasterInfo) SetBackendId(val *int64) { + p.BackendId = val +} +func (p *TMasterInfo) SetFrontendInfos(val []*TFrontendInfo) { + p.FrontendInfos = val +} +func (p *TMasterInfo) SetMetaServiceEndpoint(val *string) { + p.MetaServiceEndpoint = val +} +func (p *TMasterInfo) SetCloudUniqueId(val *string) { + p.CloudUniqueId = val +} +func (p *TMasterInfo) SetTabletReportInactiveDurationMs(val *int64) { + p.TabletReportInactiveDurationMs = val +} +func (p *TMasterInfo) SetAuthToken(val *string) { + p.AuthToken = val +} + +var fieldIDToName_TMasterInfo = map[int16]string{ + 1: "network_address", + 2: "cluster_id", + 3: "epoch", + 4: "token", + 5: "backend_ip", + 6: "http_port", + 7: "heartbeat_flags", + 8: "backend_id", + 9: "frontend_infos", + 10: "meta_service_endpoint", + 11: "cloud_unique_id", + 12: "tablet_report_inactive_duration_ms", + 13: "auth_token", +} + +func (p *TMasterInfo) IsSetNetworkAddress() bool { + return p.NetworkAddress != nil +} + +func (p *TMasterInfo) IsSetToken() bool { + return p.Token != nil +} + +func (p *TMasterInfo) IsSetBackendIp() bool { + return p.BackendIp != nil +} + +func (p *TMasterInfo) IsSetHttpPort() bool { + return p.HttpPort != nil +} + +func (p *TMasterInfo) IsSetHeartbeatFlags() bool { + return p.HeartbeatFlags != nil +} + +func (p *TMasterInfo) IsSetBackendId() bool { + return p.BackendId != nil +} + +func (p *TMasterInfo) IsSetFrontendInfos() bool { + return p.FrontendInfos != nil +} + +func (p *TMasterInfo) IsSetMetaServiceEndpoint() bool { + return p.MetaServiceEndpoint != nil +} + +func (p *TMasterInfo) IsSetCloudUniqueId() bool { + return p.CloudUniqueId != nil +} + +func (p *TMasterInfo) IsSetTabletReportInactiveDurationMs() bool { + return p.TabletReportInactiveDurationMs != nil +} + +func (p *TMasterInfo) IsSetAuthToken() bool { + return p.AuthToken != nil +} + +func (p *TMasterInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetNetworkAddress bool = false + var issetClusterId bool = false + var issetEpoch bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetNetworkAddress = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetClusterId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetEpoch = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.LIST { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRING { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I64 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRING { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetNetworkAddress { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetClusterId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEpoch { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterInfo[fieldId])) +} + +func (p *TMasterInfo) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.NetworkAddress = _field + return nil +} +func (p *TMasterInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TClusterId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ClusterId = _field + return nil +} +func (p *TMasterInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TEpoch + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.Epoch = _field + return nil +} +func (p *TMasterInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Token = _field + return nil +} +func (p *TMasterInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.BackendIp = _field + return nil +} +func (p *TMasterInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field *types.TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.HttpPort = _field + return nil +} +func (p *TMasterInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.HeartbeatFlags = _field + return nil +} +func (p *TMasterInfo) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BackendId = _field + return nil +} +func (p *TMasterInfo) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TFrontendInfo, 0, size) + values := make([]TFrontendInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FrontendInfos = _field + return nil +} +func (p *TMasterInfo) ReadField10(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.MetaServiceEndpoint = _field + return nil +} +func (p *TMasterInfo) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CloudUniqueId = _field + return nil +} +func (p *TMasterInfo) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TabletReportInactiveDurationMs = _field + return nil +} +func (p *TMasterInfo) ReadField13(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.AuthToken = _field + return nil +} + +func (p *TMasterInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TMasterInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TMasterInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("network_address", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.NetworkAddress.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TMasterInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("cluster_id", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ClusterId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TMasterInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("epoch", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.Epoch); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TMasterInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TMasterInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendIp() { + if err = oprot.WriteFieldBegin("backend_ip", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.BackendIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TMasterInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetHttpPort() { + if err = oprot.WriteFieldBegin("http_port", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.HttpPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TMasterInfo) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetHeartbeatFlags() { + if err = oprot.WriteFieldBegin("heartbeat_flags", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.HeartbeatFlags); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TMasterInfo) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TMasterInfo) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetFrontendInfos() { + if err = oprot.WriteFieldBegin("frontend_infos", thrift.LIST, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FrontendInfos)); err != nil { + return err + } + for _, v := range p.FrontendInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TMasterInfo) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetMetaServiceEndpoint() { + if err = oprot.WriteFieldBegin("meta_service_endpoint", thrift.STRING, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.MetaServiceEndpoint); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TMasterInfo) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetCloudUniqueId() { + if err = oprot.WriteFieldBegin("cloud_unique_id", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CloudUniqueId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TMasterInfo) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletReportInactiveDurationMs() { + if err = oprot.WriteFieldBegin("tablet_report_inactive_duration_ms", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TabletReportInactiveDurationMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TMasterInfo) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetAuthToken() { + if err = oprot.WriteFieldBegin("auth_token", thrift.STRING, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.AuthToken); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TMasterInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TMasterInfo(%+v)", *p) + +} + +func (p *TMasterInfo) DeepEqual(ano *TMasterInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.NetworkAddress) { + return false + } + if !p.Field2DeepEqual(ano.ClusterId) { + return false + } + if !p.Field3DeepEqual(ano.Epoch) { + return false + } + if !p.Field4DeepEqual(ano.Token) { + return false + } + if !p.Field5DeepEqual(ano.BackendIp) { + return false + } + if !p.Field6DeepEqual(ano.HttpPort) { + return false + } + if !p.Field7DeepEqual(ano.HeartbeatFlags) { + return false + } + if !p.Field8DeepEqual(ano.BackendId) { + return false + } + if !p.Field9DeepEqual(ano.FrontendInfos) { + return false + } + if !p.Field10DeepEqual(ano.MetaServiceEndpoint) { + return false + } + if !p.Field11DeepEqual(ano.CloudUniqueId) { + return false + } + if !p.Field12DeepEqual(ano.TabletReportInactiveDurationMs) { + return false + } + if !p.Field13DeepEqual(ano.AuthToken) { + return false + } + return true +} + +func (p *TMasterInfo) Field1DeepEqual(src *types.TNetworkAddress) bool { + + if !p.NetworkAddress.DeepEqual(src) { + return false + } + return true +} +func (p *TMasterInfo) Field2DeepEqual(src types.TClusterId) bool { + + if p.ClusterId != src { + return false + } + return true +} +func (p *TMasterInfo) Field3DeepEqual(src types.TEpoch) bool { + + if p.Epoch != src { + return false + } + return true +} +func (p *TMasterInfo) Field4DeepEqual(src *string) bool { + + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false + } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TMasterInfo) Field5DeepEqual(src *string) bool { + + if p.BackendIp == src { + return true + } else if p.BackendIp == nil || src == nil { + return false + } + if strings.Compare(*p.BackendIp, *src) != 0 { + return false + } + return true +} +func (p *TMasterInfo) Field6DeepEqual(src *types.TPort) bool { + + if p.HttpPort == src { + return true + } else if p.HttpPort == nil || src == nil { + return false + } + if *p.HttpPort != *src { + return false + } + return true +} +func (p *TMasterInfo) Field7DeepEqual(src *int64) bool { + + if p.HeartbeatFlags == src { + return true + } else if p.HeartbeatFlags == nil || src == nil { + return false + } + if *p.HeartbeatFlags != *src { + return false + } + return true +} +func (p *TMasterInfo) Field8DeepEqual(src *int64) bool { + + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true +} +func (p *TMasterInfo) Field9DeepEqual(src []*TFrontendInfo) bool { + + if len(p.FrontendInfos) != len(src) { + return false + } + for i, v := range p.FrontendInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TMasterInfo) Field10DeepEqual(src *string) bool { + + if p.MetaServiceEndpoint == src { + return true + } else if p.MetaServiceEndpoint == nil || src == nil { + return false + } + if strings.Compare(*p.MetaServiceEndpoint, *src) != 0 { + return false + } + return true +} +func (p *TMasterInfo) Field11DeepEqual(src *string) bool { + + if p.CloudUniqueId == src { + return true + } else if p.CloudUniqueId == nil || src == nil { + return false + } + if strings.Compare(*p.CloudUniqueId, *src) != 0 { + return false + } + return true +} +func (p *TMasterInfo) Field12DeepEqual(src *int64) bool { + + if p.TabletReportInactiveDurationMs == src { + return true + } else if p.TabletReportInactiveDurationMs == nil || src == nil { + return false + } + if *p.TabletReportInactiveDurationMs != *src { + return false + } + return true +} +func (p *TMasterInfo) Field13DeepEqual(src *string) bool { + + if p.AuthToken == src { + return true + } else if p.AuthToken == nil || src == nil { + return false + } + if strings.Compare(*p.AuthToken, *src) != 0 { + return false + } + return true +} + +type TBackendInfo struct { + BePort types.TPort `thrift:"be_port,1,required" frugal:"1,required,i32" json:"be_port"` + HttpPort types.TPort `thrift:"http_port,2,required" frugal:"2,required,i32" json:"http_port"` + BeRpcPort *types.TPort `thrift:"be_rpc_port,3,optional" frugal:"3,optional,i32" json:"be_rpc_port,omitempty"` + BrpcPort *types.TPort `thrift:"brpc_port,4,optional" frugal:"4,optional,i32" json:"brpc_port,omitempty"` + Version *string `thrift:"version,5,optional" frugal:"5,optional,string" json:"version,omitempty"` + BeStartTime *int64 `thrift:"be_start_time,6,optional" frugal:"6,optional,i64" json:"be_start_time,omitempty"` + BeNodeRole *string `thrift:"be_node_role,7,optional" frugal:"7,optional,string" json:"be_node_role,omitempty"` + IsShutdown *bool `thrift:"is_shutdown,8,optional" frugal:"8,optional,bool" json:"is_shutdown,omitempty"` + ArrowFlightSqlPort *types.TPort `thrift:"arrow_flight_sql_port,9,optional" frugal:"9,optional,i32" json:"arrow_flight_sql_port,omitempty"` + BeMem *int64 `thrift:"be_mem,10,optional" frugal:"10,optional,i64" json:"be_mem,omitempty"` + FragmentExecutingCount *int64 `thrift:"fragment_executing_count,1000,optional" frugal:"1000,optional,i64" json:"fragment_executing_count,omitempty"` + FragmentLastActiveTime *int64 `thrift:"fragment_last_active_time,1001,optional" frugal:"1001,optional,i64" json:"fragment_last_active_time,omitempty"` +} + +func NewTBackendInfo() *TBackendInfo { + return &TBackendInfo{} +} + +func (p *TBackendInfo) InitDefault() { +} + +func (p *TBackendInfo) GetBePort() (v types.TPort) { + return p.BePort +} + +func (p *TBackendInfo) GetHttpPort() (v types.TPort) { + return p.HttpPort +} + +var TBackendInfo_BeRpcPort_DEFAULT types.TPort + +func (p *TBackendInfo) GetBeRpcPort() (v types.TPort) { + if !p.IsSetBeRpcPort() { + return TBackendInfo_BeRpcPort_DEFAULT + } + return *p.BeRpcPort +} + +var TBackendInfo_BrpcPort_DEFAULT types.TPort + +func (p *TBackendInfo) GetBrpcPort() (v types.TPort) { + if !p.IsSetBrpcPort() { + return TBackendInfo_BrpcPort_DEFAULT + } + return *p.BrpcPort +} + +var TBackendInfo_Version_DEFAULT string + +func (p *TBackendInfo) GetVersion() (v string) { + if !p.IsSetVersion() { + return TBackendInfo_Version_DEFAULT + } + return *p.Version +} + +var TBackendInfo_BeStartTime_DEFAULT int64 + +func (p *TBackendInfo) GetBeStartTime() (v int64) { + if !p.IsSetBeStartTime() { + return TBackendInfo_BeStartTime_DEFAULT + } + return *p.BeStartTime +} + +var TBackendInfo_BeNodeRole_DEFAULT string + +func (p *TBackendInfo) GetBeNodeRole() (v string) { + if !p.IsSetBeNodeRole() { + return TBackendInfo_BeNodeRole_DEFAULT + } + return *p.BeNodeRole +} + +var TBackendInfo_IsShutdown_DEFAULT bool + +func (p *TBackendInfo) GetIsShutdown() (v bool) { + if !p.IsSetIsShutdown() { + return TBackendInfo_IsShutdown_DEFAULT + } + return *p.IsShutdown +} + +var TBackendInfo_ArrowFlightSqlPort_DEFAULT types.TPort + +func (p *TBackendInfo) GetArrowFlightSqlPort() (v types.TPort) { + if !p.IsSetArrowFlightSqlPort() { + return TBackendInfo_ArrowFlightSqlPort_DEFAULT + } + return *p.ArrowFlightSqlPort +} + +var TBackendInfo_BeMem_DEFAULT int64 + +func (p *TBackendInfo) GetBeMem() (v int64) { + if !p.IsSetBeMem() { + return TBackendInfo_BeMem_DEFAULT + } + return *p.BeMem +} + +var TBackendInfo_FragmentExecutingCount_DEFAULT int64 + +func (p *TBackendInfo) GetFragmentExecutingCount() (v int64) { + if !p.IsSetFragmentExecutingCount() { + return TBackendInfo_FragmentExecutingCount_DEFAULT + } + return *p.FragmentExecutingCount +} + +var TBackendInfo_FragmentLastActiveTime_DEFAULT int64 + +func (p *TBackendInfo) GetFragmentLastActiveTime() (v int64) { + if !p.IsSetFragmentLastActiveTime() { + return TBackendInfo_FragmentLastActiveTime_DEFAULT + } + return *p.FragmentLastActiveTime +} +func (p *TBackendInfo) SetBePort(val types.TPort) { + p.BePort = val +} +func (p *TBackendInfo) SetHttpPort(val types.TPort) { + p.HttpPort = val +} +func (p *TBackendInfo) SetBeRpcPort(val *types.TPort) { + p.BeRpcPort = val +} +func (p *TBackendInfo) SetBrpcPort(val *types.TPort) { + p.BrpcPort = val +} +func (p *TBackendInfo) SetVersion(val *string) { + p.Version = val +} +func (p *TBackendInfo) SetBeStartTime(val *int64) { + p.BeStartTime = val +} +func (p *TBackendInfo) SetBeNodeRole(val *string) { + p.BeNodeRole = val +} +func (p *TBackendInfo) SetIsShutdown(val *bool) { + p.IsShutdown = val +} +func (p *TBackendInfo) SetArrowFlightSqlPort(val *types.TPort) { + p.ArrowFlightSqlPort = val +} +func (p *TBackendInfo) SetBeMem(val *int64) { + p.BeMem = val +} +func (p *TBackendInfo) SetFragmentExecutingCount(val *int64) { + p.FragmentExecutingCount = val +} +func (p *TBackendInfo) SetFragmentLastActiveTime(val *int64) { + p.FragmentLastActiveTime = val +} + +var fieldIDToName_TBackendInfo = map[int16]string{ + 1: "be_port", + 2: "http_port", + 3: "be_rpc_port", + 4: "brpc_port", + 5: "version", + 6: "be_start_time", + 7: "be_node_role", + 8: "is_shutdown", + 9: "arrow_flight_sql_port", + 10: "be_mem", + 1000: "fragment_executing_count", + 1001: "fragment_last_active_time", +} + +func (p *TBackendInfo) IsSetBeRpcPort() bool { + return p.BeRpcPort != nil +} + +func (p *TBackendInfo) IsSetBrpcPort() bool { + return p.BrpcPort != nil +} + +func (p *TBackendInfo) IsSetVersion() bool { + return p.Version != nil +} + +func (p *TBackendInfo) IsSetBeStartTime() bool { + return p.BeStartTime != nil +} + +func (p *TBackendInfo) IsSetBeNodeRole() bool { + return p.BeNodeRole != nil +} + +func (p *TBackendInfo) IsSetIsShutdown() bool { + return p.IsShutdown != nil +} + +func (p *TBackendInfo) IsSetArrowFlightSqlPort() bool { + return p.ArrowFlightSqlPort != nil +} + +func (p *TBackendInfo) IsSetBeMem() bool { + return p.BeMem != nil +} + +func (p *TBackendInfo) IsSetFragmentExecutingCount() bool { + return p.FragmentExecutingCount != nil +} + +func (p *TBackendInfo) IsSetFragmentLastActiveTime() bool { + return p.FragmentLastActiveTime != nil +} + +func (p *TBackendInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetBePort bool = false + var issetHttpPort bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetBePort = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetHttpPort = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1001: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1001(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetBePort { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetHttpPort { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TBackendInfo[fieldId])) +} + +func (p *TBackendInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.BePort = _field + return nil +} +func (p *TBackendInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.HttpPort = _field + return nil +} +func (p *TBackendInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.BeRpcPort = _field + return nil +} +func (p *TBackendInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.BrpcPort = _field + return nil +} +func (p *TBackendInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Version = _field + return nil +} +func (p *TBackendInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BeStartTime = _field + return nil +} +func (p *TBackendInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.BeNodeRole = _field + return nil +} +func (p *TBackendInfo) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsShutdown = _field + return nil +} +func (p *TBackendInfo) ReadField9(iprot thrift.TProtocol) error { + + var _field *types.TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ArrowFlightSqlPort = _field + return nil +} +func (p *TBackendInfo) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BeMem = _field + return nil +} +func (p *TBackendInfo) ReadField1000(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FragmentExecutingCount = _field + return nil +} +func (p *TBackendInfo) ReadField1001(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FragmentLastActiveTime = _field + return nil +} + +func (p *TBackendInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TBackendInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TBackendInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("be_port", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.BePort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TBackendInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("http_port", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.HttpPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TBackendInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBeRpcPort() { + if err = oprot.WriteFieldBegin("be_rpc_port", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BeRpcPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TBackendInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBrpcPort() { + if err = oprot.WriteFieldBegin("brpc_port", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BrpcPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TBackendInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Version); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TBackendInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetBeStartTime() { + if err = oprot.WriteFieldBegin("be_start_time", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BeStartTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TBackendInfo) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetBeNodeRole() { + if err = oprot.WriteFieldBegin("be_node_role", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.BeNodeRole); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TBackendInfo) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetIsShutdown() { + if err = oprot.WriteFieldBegin("is_shutdown", thrift.BOOL, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsShutdown); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TBackendInfo) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetArrowFlightSqlPort() { + if err = oprot.WriteFieldBegin("arrow_flight_sql_port", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ArrowFlightSqlPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TBackendInfo) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetBeMem() { + if err = oprot.WriteFieldBegin("be_mem", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BeMem); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TBackendInfo) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentExecutingCount() { + if err = oprot.WriteFieldBegin("fragment_executing_count", thrift.I64, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FragmentExecutingCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TBackendInfo) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentLastActiveTime() { + if err = oprot.WriteFieldBegin("fragment_last_active_time", thrift.I64, 1001); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FragmentLastActiveTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) +} + +func (p *TBackendInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBackendInfo(%+v)", *p) + +} + +func (p *TBackendInfo) DeepEqual(ano *TBackendInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.BePort) { + return false + } + if !p.Field2DeepEqual(ano.HttpPort) { + return false + } + if !p.Field3DeepEqual(ano.BeRpcPort) { + return false + } + if !p.Field4DeepEqual(ano.BrpcPort) { + return false + } + if !p.Field5DeepEqual(ano.Version) { + return false + } + if !p.Field6DeepEqual(ano.BeStartTime) { + return false + } + if !p.Field7DeepEqual(ano.BeNodeRole) { + return false + } + if !p.Field8DeepEqual(ano.IsShutdown) { + return false + } + if !p.Field9DeepEqual(ano.ArrowFlightSqlPort) { + return false + } + if !p.Field10DeepEqual(ano.BeMem) { + return false + } + if !p.Field1000DeepEqual(ano.FragmentExecutingCount) { + return false + } + if !p.Field1001DeepEqual(ano.FragmentLastActiveTime) { + return false + } + return true +} + +func (p *TBackendInfo) Field1DeepEqual(src types.TPort) bool { + + if p.BePort != src { + return false + } + return true +} +func (p *TBackendInfo) Field2DeepEqual(src types.TPort) bool { + + if p.HttpPort != src { + return false + } + return true +} +func (p *TBackendInfo) Field3DeepEqual(src *types.TPort) bool { + + if p.BeRpcPort == src { + return true + } else if p.BeRpcPort == nil || src == nil { + return false + } + if *p.BeRpcPort != *src { + return false + } + return true +} +func (p *TBackendInfo) Field4DeepEqual(src *types.TPort) bool { + + if p.BrpcPort == src { + return true + } else if p.BrpcPort == nil || src == nil { + return false + } + if *p.BrpcPort != *src { + return false + } + return true +} +func (p *TBackendInfo) Field5DeepEqual(src *string) bool { + + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false + } + if strings.Compare(*p.Version, *src) != 0 { + return false + } + return true +} +func (p *TBackendInfo) Field6DeepEqual(src *int64) bool { + + if p.BeStartTime == src { + return true + } else if p.BeStartTime == nil || src == nil { + return false + } + if *p.BeStartTime != *src { + return false + } + return true +} +func (p *TBackendInfo) Field7DeepEqual(src *string) bool { + + if p.BeNodeRole == src { + return true + } else if p.BeNodeRole == nil || src == nil { + return false + } + if strings.Compare(*p.BeNodeRole, *src) != 0 { + return false + } + return true +} +func (p *TBackendInfo) Field8DeepEqual(src *bool) bool { + + if p.IsShutdown == src { + return true + } else if p.IsShutdown == nil || src == nil { + return false + } + if *p.IsShutdown != *src { + return false + } + return true +} +func (p *TBackendInfo) Field9DeepEqual(src *types.TPort) bool { + + if p.ArrowFlightSqlPort == src { + return true + } else if p.ArrowFlightSqlPort == nil || src == nil { + return false + } + if *p.ArrowFlightSqlPort != *src { + return false + } + return true +} +func (p *TBackendInfo) Field10DeepEqual(src *int64) bool { + + if p.BeMem == src { + return true + } else if p.BeMem == nil || src == nil { + return false + } + if *p.BeMem != *src { + return false + } + return true +} +func (p *TBackendInfo) Field1000DeepEqual(src *int64) bool { + + if p.FragmentExecutingCount == src { + return true + } else if p.FragmentExecutingCount == nil || src == nil { + return false + } + if *p.FragmentExecutingCount != *src { + return false + } + return true +} +func (p *TBackendInfo) Field1001DeepEqual(src *int64) bool { + + if p.FragmentLastActiveTime == src { + return true + } else if p.FragmentLastActiveTime == nil || src == nil { + return false + } + if *p.FragmentLastActiveTime != *src { + return false + } + return true +} + +type THeartbeatResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + BackendInfo *TBackendInfo `thrift:"backend_info,2,required" frugal:"2,required,TBackendInfo" json:"backend_info"` +} + +func NewTHeartbeatResult_() *THeartbeatResult_ { + return &THeartbeatResult_{} +} + +func (p *THeartbeatResult_) InitDefault() { +} + +var THeartbeatResult__Status_DEFAULT *status.TStatus + +func (p *THeartbeatResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return THeartbeatResult__Status_DEFAULT + } + return p.Status +} + +var THeartbeatResult__BackendInfo_DEFAULT *TBackendInfo + +func (p *THeartbeatResult_) GetBackendInfo() (v *TBackendInfo) { + if !p.IsSetBackendInfo() { + return THeartbeatResult__BackendInfo_DEFAULT + } + return p.BackendInfo +} +func (p *THeartbeatResult_) SetStatus(val *status.TStatus) { + p.Status = val +} +func (p *THeartbeatResult_) SetBackendInfo(val *TBackendInfo) { + p.BackendInfo = val +} + +var fieldIDToName_THeartbeatResult_ = map[int16]string{ + 1: "status", + 2: "backend_info", +} + +func (p *THeartbeatResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *THeartbeatResult_) IsSetBackendInfo() bool { + return p.BackendInfo != nil +} + +func (p *THeartbeatResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + var issetBackendInfo bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetBackendInfo = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetBackendInfo { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THeartbeatResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THeartbeatResult_[fieldId])) +} + +func (p *THeartbeatResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err + } + p.Status = _field + return nil +} +func (p *THeartbeatResult_) ReadField2(iprot thrift.TProtocol) error { + _field := NewTBackendInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.BackendInfo = _field + return nil +} + +func (p *THeartbeatResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("THeartbeatResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *THeartbeatResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *THeartbeatResult_) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("backend_info", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.BackendInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *THeartbeatResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THeartbeatResult_(%+v)", *p) + +} + +func (p *THeartbeatResult_) DeepEqual(ano *THeartbeatResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Status) { + return false + } + if !p.Field2DeepEqual(ano.BackendInfo) { + return false + } + return true +} + +func (p *THeartbeatResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { + return false + } + return true +} +func (p *THeartbeatResult_) Field2DeepEqual(src *TBackendInfo) bool { + + if !p.BackendInfo.DeepEqual(src) { + return false + } + return true +} + +type HeartbeatService interface { + Heartbeat(ctx context.Context, masterInfo *TMasterInfo) (r *THeartbeatResult_, err error) +} + +type HeartbeatServiceClient struct { + c thrift.TClient +} + +func NewHeartbeatServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *HeartbeatServiceClient { + return &HeartbeatServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewHeartbeatServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *HeartbeatServiceClient { + return &HeartbeatServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewHeartbeatServiceClient(c thrift.TClient) *HeartbeatServiceClient { + return &HeartbeatServiceClient{ + c: c, + } +} + +func (p *HeartbeatServiceClient) Client_() thrift.TClient { + return p.c +} + +func (p *HeartbeatServiceClient) Heartbeat(ctx context.Context, masterInfo *TMasterInfo) (r *THeartbeatResult_, err error) { + var _args HeartbeatServiceHeartbeatArgs + _args.MasterInfo = masterInfo + var _result HeartbeatServiceHeartbeatResult + if err = p.Client_().Call(ctx, "heartbeat", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +type HeartbeatServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler HeartbeatService +} + +func (p *HeartbeatServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *HeartbeatServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *HeartbeatServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewHeartbeatServiceProcessor(handler HeartbeatService) *HeartbeatServiceProcessor { + self := &HeartbeatServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self.AddToProcessorMap("heartbeat", &heartbeatServiceProcessorHeartbeat{handler: handler}) + return self +} +func (p *HeartbeatServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x +} + +type heartbeatServiceProcessorHeartbeat struct { + handler HeartbeatService +} + +func (p *heartbeatServiceProcessorHeartbeat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := HeartbeatServiceHeartbeatArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("heartbeat", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := HeartbeatServiceHeartbeatResult{} + var retval *THeartbeatResult_ + if retval, err2 = p.handler.Heartbeat(ctx, args.MasterInfo); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing heartbeat: "+err2.Error()) + oprot.WriteMessageBegin("heartbeat", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("heartbeat", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type HeartbeatServiceHeartbeatArgs struct { + MasterInfo *TMasterInfo `thrift:"master_info,1" frugal:"1,default,TMasterInfo" json:"master_info"` +} + +func NewHeartbeatServiceHeartbeatArgs() *HeartbeatServiceHeartbeatArgs { + return &HeartbeatServiceHeartbeatArgs{} +} + +func (p *HeartbeatServiceHeartbeatArgs) InitDefault() { +} + +var HeartbeatServiceHeartbeatArgs_MasterInfo_DEFAULT *TMasterInfo + +func (p *HeartbeatServiceHeartbeatArgs) GetMasterInfo() (v *TMasterInfo) { + if !p.IsSetMasterInfo() { + return HeartbeatServiceHeartbeatArgs_MasterInfo_DEFAULT + } + return p.MasterInfo +} +func (p *HeartbeatServiceHeartbeatArgs) SetMasterInfo(val *TMasterInfo) { + p.MasterInfo = val +} + +var fieldIDToName_HeartbeatServiceHeartbeatArgs = map[int16]string{ + 1: "master_info", +} + +func (p *HeartbeatServiceHeartbeatArgs) IsSetMasterInfo() bool { + return p.MasterInfo != nil +} + +func (p *HeartbeatServiceHeartbeatArgs) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_HeartbeatServiceHeartbeatArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewTMasterInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.MasterInfo = _field + return nil +} + +func (p *HeartbeatServiceHeartbeatArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("heartbeat_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("master_info", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.MasterInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("HeartbeatServiceHeartbeatArgs(%+v)", *p) + +} + +func (p *HeartbeatServiceHeartbeatArgs) DeepEqual(ano *HeartbeatServiceHeartbeatArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.MasterInfo) { + return false + } + return true +} + +func (p *HeartbeatServiceHeartbeatArgs) Field1DeepEqual(src *TMasterInfo) bool { + + if !p.MasterInfo.DeepEqual(src) { + return false + } + return true +} + +type HeartbeatServiceHeartbeatResult struct { + Success *THeartbeatResult_ `thrift:"success,0,optional" frugal:"0,optional,THeartbeatResult_" json:"success,omitempty"` +} + +func NewHeartbeatServiceHeartbeatResult() *HeartbeatServiceHeartbeatResult { + return &HeartbeatServiceHeartbeatResult{} +} + +func (p *HeartbeatServiceHeartbeatResult) InitDefault() { +} + +var HeartbeatServiceHeartbeatResult_Success_DEFAULT *THeartbeatResult_ + +func (p *HeartbeatServiceHeartbeatResult) GetSuccess() (v *THeartbeatResult_) { + if !p.IsSetSuccess() { + return HeartbeatServiceHeartbeatResult_Success_DEFAULT + } + return p.Success +} +func (p *HeartbeatServiceHeartbeatResult) SetSuccess(x interface{}) { + p.Success = x.(*THeartbeatResult_) +} + +var fieldIDToName_HeartbeatServiceHeartbeatResult = map[int16]string{ + 0: "success", +} + +func (p *HeartbeatServiceHeartbeatResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *HeartbeatServiceHeartbeatResult) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_HeartbeatServiceHeartbeatResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewTHeartbeatResult_() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *HeartbeatServiceHeartbeatResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("heartbeat_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("HeartbeatServiceHeartbeatResult(%+v)", *p) + +} + +func (p *HeartbeatServiceHeartbeatResult) DeepEqual(ano *HeartbeatServiceHeartbeatResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *HeartbeatServiceHeartbeatResult) Field0DeepEqual(src *THeartbeatResult_) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/client.go b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/client.go new file mode 100644 index 00000000..e71ef355 --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/client.go @@ -0,0 +1,49 @@ +// Code generated by Kitex v0.8.0. DO NOT EDIT. + +package heartbeatservice + +import ( + "context" + client "github.com/cloudwego/kitex/client" + callopt "github.com/cloudwego/kitex/client/callopt" + heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice" +) + +// Client is designed to provide IDL-compatible methods with call-option parameter for kitex framework. +type Client interface { + Heartbeat(ctx context.Context, masterInfo *heartbeatservice.TMasterInfo, callOptions ...callopt.Option) (r *heartbeatservice.THeartbeatResult_, err error) +} + +// NewClient creates a client for the service defined in IDL. +func NewClient(destService string, opts ...client.Option) (Client, error) { + var options []client.Option + options = append(options, client.WithDestService(destService)) + + options = append(options, opts...) + + kc, err := client.NewClient(serviceInfo(), options...) + if err != nil { + return nil, err + } + return &kHeartbeatServiceClient{ + kClient: newServiceClient(kc), + }, nil +} + +// MustNewClient creates a client for the service defined in IDL. It panics if any error occurs. +func MustNewClient(destService string, opts ...client.Option) Client { + kc, err := NewClient(destService, opts...) + if err != nil { + panic(err) + } + return kc +} + +type kHeartbeatServiceClient struct { + *kClient +} + +func (p *kHeartbeatServiceClient) Heartbeat(ctx context.Context, masterInfo *heartbeatservice.TMasterInfo, callOptions ...callopt.Option) (r *heartbeatservice.THeartbeatResult_, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.Heartbeat(ctx, masterInfo) +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/heartbeatservice.go b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/heartbeatservice.go new file mode 100644 index 00000000..2bc64d59 --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/heartbeatservice.go @@ -0,0 +1,75 @@ +// Code generated by Kitex v0.8.0. DO NOT EDIT. + +package heartbeatservice + +import ( + "context" + client "github.com/cloudwego/kitex/client" + kitex "github.com/cloudwego/kitex/pkg/serviceinfo" + heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice" +) + +func serviceInfo() *kitex.ServiceInfo { + return heartbeatServiceServiceInfo +} + +var heartbeatServiceServiceInfo = NewServiceInfo() + +func NewServiceInfo() *kitex.ServiceInfo { + serviceName := "HeartbeatService" + handlerType := (*heartbeatservice.HeartbeatService)(nil) + methods := map[string]kitex.MethodInfo{ + "heartbeat": kitex.NewMethodInfo(heartbeatHandler, newHeartbeatServiceHeartbeatArgs, newHeartbeatServiceHeartbeatResult, false), + } + extra := map[string]interface{}{ + "PackageName": "heartbeatservice", + "ServiceFilePath": `thrift/HeartbeatService.thrift`, + } + svcInfo := &kitex.ServiceInfo{ + ServiceName: serviceName, + HandlerType: handlerType, + Methods: methods, + PayloadCodec: kitex.Thrift, + KiteXGenVersion: "v0.8.0", + Extra: extra, + } + return svcInfo +} + +func heartbeatHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*heartbeatservice.HeartbeatServiceHeartbeatArgs) + realResult := result.(*heartbeatservice.HeartbeatServiceHeartbeatResult) + success, err := handler.(heartbeatservice.HeartbeatService).Heartbeat(ctx, realArg.MasterInfo) + if err != nil { + return err + } + realResult.Success = success + return nil +} +func newHeartbeatServiceHeartbeatArgs() interface{} { + return heartbeatservice.NewHeartbeatServiceHeartbeatArgs() +} + +func newHeartbeatServiceHeartbeatResult() interface{} { + return heartbeatservice.NewHeartbeatServiceHeartbeatResult() +} + +type kClient struct { + c client.Client +} + +func newServiceClient(c client.Client) *kClient { + return &kClient{ + c: c, + } +} + +func (p *kClient) Heartbeat(ctx context.Context, masterInfo *heartbeatservice.TMasterInfo) (r *heartbeatservice.THeartbeatResult_, err error) { + var _args heartbeatservice.HeartbeatServiceHeartbeatArgs + _args.MasterInfo = masterInfo + var _result heartbeatservice.HeartbeatServiceHeartbeatResult + if err = p.c.Call(ctx, "heartbeat", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/invoker.go b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/invoker.go new file mode 100644 index 00000000..2cc8aa00 --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/invoker.go @@ -0,0 +1,24 @@ +// Code generated by Kitex v0.8.0. DO NOT EDIT. + +package heartbeatservice + +import ( + server "github.com/cloudwego/kitex/server" + heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice" +) + +// NewInvoker creates a server.Invoker with the given handler and options. +func NewInvoker(handler heartbeatservice.HeartbeatService, opts ...server.Option) server.Invoker { + var options []server.Option + + options = append(options, opts...) + + s := server.NewInvoker(options...) + if err := s.RegisterService(serviceInfo(), handler); err != nil { + panic(err) + } + if err := s.Init(); err != nil { + panic(err) + } + return s +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/server.go b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/server.go new file mode 100644 index 00000000..6335c09c --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/server.go @@ -0,0 +1,20 @@ +// Code generated by Kitex v0.8.0. DO NOT EDIT. +package heartbeatservice + +import ( + server "github.com/cloudwego/kitex/server" + heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice" +) + +// NewServer creates a server.Server with the given handler and options. +func NewServer(handler heartbeatservice.HeartbeatService, opts ...server.Option) server.Server { + var options []server.Option + + options = append(options, opts...) + + svr := server.NewServer(options...) + if err := svr.RegisterService(serviceInfo(), handler); err != nil { + panic(err) + } + return svr +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go b/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go new file mode 100644 index 00000000..fa412a07 --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go @@ -0,0 +1,2148 @@ +// Code generated by Kitex v0.8.0. DO NOT EDIT. + +package heartbeatservice + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + "github.com/apache/thrift/lib/go/thrift" + + "github.com/cloudwego/kitex/pkg/protocol/bthrift" + + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" +) + +// unused protection +var ( + _ = fmt.Formatter(nil) + _ = (*bytes.Buffer)(nil) + _ = (*strings.Builder)(nil) + _ = reflect.Type(nil) + _ = thrift.TProtocol(nil) + _ = bthrift.BinaryWriter(nil) + _ = agentservice.KitexUnusedProtection + _ = status.KitexUnusedProtection + _ = types.KitexUnusedProtection +) + +func (p *TFrontendInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFrontendInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CoordinatorAddress = tmp + return offset, nil +} + +func (p *TFrontendInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ProcessUuid = &v + + } + return offset, nil +} + +// for compatibility +func (p *TFrontendInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFrontendInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendInfo") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFrontendInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFrontendInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFrontendInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCoordinatorAddress() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "coordinator_address", thrift.STRUCT, 1) + offset += p.CoordinatorAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProcessUuid() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "process_uuid", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ProcessUuid) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFrontendInfo) field1Length() int { + l := 0 + if p.IsSetCoordinatorAddress() { + l += bthrift.Binary.FieldBeginLength("coordinator_address", thrift.STRUCT, 1) + l += p.CoordinatorAddress.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFrontendInfo) field2Length() int { + l := 0 + if p.IsSetProcessUuid() { + l += bthrift.Binary.FieldBeginLength("process_uuid", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.ProcessUuid) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetNetworkAddress bool = false + var issetClusterId bool = false + var issetEpoch bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetNetworkAddress = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetClusterId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEpoch = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetNetworkAddress { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetClusterId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEpoch { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMasterInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMasterInfo[fieldId])) +} + +func (p *TMasterInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.NetworkAddress = tmp + return offset, nil +} + +func (p *TMasterInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ClusterId = v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Epoch = v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Token = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendIp = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.HttpPort = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.HeartbeatFlags = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FrontendInfos = make([]*TFrontendInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTFrontendInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FrontendInfos = append(p.FrontendInfos, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MetaServiceEndpoint = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CloudUniqueId = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TabletReportInactiveDurationMs = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.AuthToken = &v + + } + return offset, nil +} + +// for compatibility +func (p *TMasterInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TMasterInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMasterInfo") + if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TMasterInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TMasterInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TMasterInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "network_address", thrift.STRUCT, 1) + offset += p.NetworkAddress.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMasterInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_id", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ClusterId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMasterInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "epoch", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], p.Epoch) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TMasterInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendIp() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_ip", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BackendIp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHttpPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "http_port", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.HttpPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHeartbeatFlags() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "heartbeat_flags", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.HeartbeatFlags) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFrontendInfos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "frontend_infos", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.FrontendInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetaServiceEndpoint() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta_service_endpoint", thrift.STRING, 10) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.MetaServiceEndpoint) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCloudUniqueId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_unique_id", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudUniqueId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletReportInactiveDurationMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_report_inactive_duration_ms", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TabletReportInactiveDurationMs) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAuthToken() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "auth_token", thrift.STRING, 13) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.AuthToken) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("network_address", thrift.STRUCT, 1) + l += p.NetworkAddress.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMasterInfo) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("cluster_id", thrift.I32, 2) + l += bthrift.Binary.I32Length(p.ClusterId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMasterInfo) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("epoch", thrift.I64, 3) + l += bthrift.Binary.I64Length(p.Epoch) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TMasterInfo) field4Length() int { + l := 0 + if p.IsSetToken() { + l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Token) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field5Length() int { + l := 0 + if p.IsSetBackendIp() { + l += bthrift.Binary.FieldBeginLength("backend_ip", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.BackendIp) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field6Length() int { + l := 0 + if p.IsSetHttpPort() { + l += bthrift.Binary.FieldBeginLength("http_port", thrift.I32, 6) + l += bthrift.Binary.I32Length(*p.HttpPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field7Length() int { + l := 0 + if p.IsSetHeartbeatFlags() { + l += bthrift.Binary.FieldBeginLength("heartbeat_flags", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.HeartbeatFlags) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field8Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.BackendId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field9Length() int { + l := 0 + if p.IsSetFrontendInfos() { + l += bthrift.Binary.FieldBeginLength("frontend_infos", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FrontendInfos)) + for _, v := range p.FrontendInfos { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field10Length() int { + l := 0 + if p.IsSetMetaServiceEndpoint() { + l += bthrift.Binary.FieldBeginLength("meta_service_endpoint", thrift.STRING, 10) + l += bthrift.Binary.StringLengthNocopy(*p.MetaServiceEndpoint) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field11Length() int { + l := 0 + if p.IsSetCloudUniqueId() { + l += bthrift.Binary.FieldBeginLength("cloud_unique_id", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.CloudUniqueId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field12Length() int { + l := 0 + if p.IsSetTabletReportInactiveDurationMs() { + l += bthrift.Binary.FieldBeginLength("tablet_report_inactive_duration_ms", thrift.I64, 12) + l += bthrift.Binary.I64Length(*p.TabletReportInactiveDurationMs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field13Length() int { + l := 0 + if p.IsSetAuthToken() { + l += bthrift.Binary.FieldBeginLength("auth_token", thrift.STRING, 13) + l += bthrift.Binary.StringLengthNocopy(*p.AuthToken) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetBePort bool = false + var issetHttpPort bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetBePort = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetHttpPort = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1001: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1001(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetBePort { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetHttpPort { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TBackendInfo[fieldId])) +} + +func (p *TBackendInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BePort = v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.HttpPort = v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeRpcPort = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BrpcPort = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Version = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeStartTime = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeNodeRole = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsShutdown = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ArrowFlightSqlPort = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeMem = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentExecutingCount = &v + + } + return offset, nil +} + +func (p *TBackendInfo) FastReadField1001(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentLastActiveTime = &v + + } + return offset, nil +} + +// for compatibility +func (p *TBackendInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TBackendInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBackendInfo") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + offset += p.fastWriteField1001(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TBackendInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TBackendInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field1000Length() + l += p.field1001Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TBackendInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_port", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], p.BePort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TBackendInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "http_port", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], p.HttpPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TBackendInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeRpcPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_rpc_port", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BeRpcPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBrpcPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "brpc_port", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BrpcPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Version) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeStartTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_start_time", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BeStartTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeNodeRole() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_node_role", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BeNodeRole) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsShutdown() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_shutdown", thrift.BOOL, 8) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsShutdown) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetArrowFlightSqlPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "arrow_flight_sql_port", thrift.I32, 9) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ArrowFlightSqlPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeMem() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_mem", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BeMem) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentExecutingCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_executing_count", thrift.I64, 1000) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FragmentExecutingCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentLastActiveTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_last_active_time", thrift.I64, 1001) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.FragmentLastActiveTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendInfo) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("be_port", thrift.I32, 1) + l += bthrift.Binary.I32Length(p.BePort) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TBackendInfo) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("http_port", thrift.I32, 2) + l += bthrift.Binary.I32Length(p.HttpPort) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TBackendInfo) field3Length() int { + l := 0 + if p.IsSetBeRpcPort() { + l += bthrift.Binary.FieldBeginLength("be_rpc_port", thrift.I32, 3) + l += bthrift.Binary.I32Length(*p.BeRpcPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field4Length() int { + l := 0 + if p.IsSetBrpcPort() { + l += bthrift.Binary.FieldBeginLength("brpc_port", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.BrpcPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field5Length() int { + l := 0 + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Version) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field6Length() int { + l := 0 + if p.IsSetBeStartTime() { + l += bthrift.Binary.FieldBeginLength("be_start_time", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.BeStartTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field7Length() int { + l := 0 + if p.IsSetBeNodeRole() { + l += bthrift.Binary.FieldBeginLength("be_node_role", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.BeNodeRole) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field8Length() int { + l := 0 + if p.IsSetIsShutdown() { + l += bthrift.Binary.FieldBeginLength("is_shutdown", thrift.BOOL, 8) + l += bthrift.Binary.BoolLength(*p.IsShutdown) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field9Length() int { + l := 0 + if p.IsSetArrowFlightSqlPort() { + l += bthrift.Binary.FieldBeginLength("arrow_flight_sql_port", thrift.I32, 9) + l += bthrift.Binary.I32Length(*p.ArrowFlightSqlPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field10Length() int { + l := 0 + if p.IsSetBeMem() { + l += bthrift.Binary.FieldBeginLength("be_mem", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.BeMem) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field1000Length() int { + l := 0 + if p.IsSetFragmentExecutingCount() { + l += bthrift.Binary.FieldBeginLength("fragment_executing_count", thrift.I64, 1000) + l += bthrift.Binary.I64Length(*p.FragmentExecutingCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendInfo) field1001Length() int { + l := 0 + if p.IsSetFragmentLastActiveTime() { + l += bthrift.Binary.FieldBeginLength("fragment_last_active_time", thrift.I64, 1001) + l += bthrift.Binary.I64Length(*p.FragmentLastActiveTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THeartbeatResult_) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetStatus bool = false + var issetBackendInfo bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetBackendInfo = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetBackendInfo { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THeartbeatResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THeartbeatResult_[fieldId])) +} + +func (p *THeartbeatResult_) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Status = tmp + return offset, nil +} + +func (p *THeartbeatResult_) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := NewTBackendInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BackendInfo = tmp + return offset, nil +} + +// for compatibility +func (p *THeartbeatResult_) FastWrite(buf []byte) int { + return 0 +} + +func (p *THeartbeatResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THeartbeatResult") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *THeartbeatResult_) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THeartbeatResult") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *THeartbeatResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *THeartbeatResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_info", thrift.STRUCT, 2) + offset += p.BackendInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *THeartbeatResult_) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *THeartbeatResult_) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("backend_info", thrift.STRUCT, 2) + l += p.BackendInfo.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *HeartbeatServiceHeartbeatArgs) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_HeartbeatServiceHeartbeatArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTMasterInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MasterInfo = tmp + return offset, nil +} + +// for compatibility +func (p *HeartbeatServiceHeartbeatArgs) FastWrite(buf []byte) int { + return 0 +} + +func (p *HeartbeatServiceHeartbeatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "heartbeat_args") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *HeartbeatServiceHeartbeatArgs) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("heartbeat_args") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *HeartbeatServiceHeartbeatArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "master_info", thrift.STRUCT, 1) + offset += p.MasterInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *HeartbeatServiceHeartbeatArgs) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("master_info", thrift.STRUCT, 1) + l += p.MasterInfo.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *HeartbeatServiceHeartbeatResult) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_HeartbeatServiceHeartbeatResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *HeartbeatServiceHeartbeatResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHeartbeatResult_() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = tmp + return offset, nil +} + +// for compatibility +func (p *HeartbeatServiceHeartbeatResult) FastWrite(buf []byte) int { + return 0 +} + +func (p *HeartbeatServiceHeartbeatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "heartbeat_result") + if p != nil { + offset += p.fastWriteField0(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *HeartbeatServiceHeartbeatResult) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("heartbeat_result") + if p != nil { + l += p.field0Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *HeartbeatServiceHeartbeatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *HeartbeatServiceHeartbeatResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0) + l += p.Success.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *HeartbeatServiceHeartbeatArgs) GetFirstArgument() interface{} { + return p.MasterInfo +} + +func (p *HeartbeatServiceHeartbeatResult) GetResult() interface{} { + return p.Success +} diff --git a/pkg/rpc/kitex_gen/heartbeatservice/k-consts.go b/pkg/rpc/kitex_gen/heartbeatservice/k-consts.go new file mode 100644 index 00000000..f859bb2f --- /dev/null +++ b/pkg/rpc/kitex_gen/heartbeatservice/k-consts.go @@ -0,0 +1,4 @@ +package heartbeatservice + +// KitexUnusedProtection is used to prevent 'imported and not used' error. +var KitexUnusedProtection = struct{}{} diff --git a/pkg/rpc/kitex_gen/masterservice/MasterService.go b/pkg/rpc/kitex_gen/masterservice/MasterService.go index d576d725..a886a064 100644 --- a/pkg/rpc/kitex_gen/masterservice/MasterService.go +++ b/pkg/rpc/kitex_gen/masterservice/MasterService.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package masterservice @@ -96,24 +96,26 @@ func (p *TResourceType) Value() (driver.Value, error) { } type TTabletInfo struct { - TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` - SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"` - Version types.TVersion `thrift:"version,3,required" frugal:"3,required,i64" json:"version"` - VersionHash types.TVersionHash `thrift:"version_hash,4,required" frugal:"4,required,i64" json:"version_hash"` - RowCount types.TCount `thrift:"row_count,5,required" frugal:"5,required,i64" json:"row_count"` - DataSize types.TSize `thrift:"data_size,6,required" frugal:"6,required,i64" json:"data_size"` - StorageMedium *types.TStorageMedium `thrift:"storage_medium,7,optional" frugal:"7,optional,TStorageMedium" json:"storage_medium,omitempty"` - TransactionIds []types.TTransactionId `thrift:"transaction_ids,8,optional" frugal:"8,optional,list" json:"transaction_ids,omitempty"` - VersionCount *int64 `thrift:"version_count,9,optional" frugal:"9,optional,i64" json:"version_count,omitempty"` - PathHash *int64 `thrift:"path_hash,10,optional" frugal:"10,optional,i64" json:"path_hash,omitempty"` - VersionMiss *bool `thrift:"version_miss,11,optional" frugal:"11,optional,bool" json:"version_miss,omitempty"` - Used *bool `thrift:"used,12,optional" frugal:"12,optional,bool" json:"used,omitempty"` - PartitionId *types.TPartitionId `thrift:"partition_id,13,optional" frugal:"13,optional,i64" json:"partition_id,omitempty"` - IsInMemory *bool `thrift:"is_in_memory,14,optional" frugal:"14,optional,bool" json:"is_in_memory,omitempty"` - ReplicaId *types.TReplicaId `thrift:"replica_id,15,optional" frugal:"15,optional,i64" json:"replica_id,omitempty"` - RemoteDataSize *types.TSize `thrift:"remote_data_size,16,optional" frugal:"16,optional,i64" json:"remote_data_size,omitempty"` - CooldownTerm *int64 `thrift:"cooldown_term,19,optional" frugal:"19,optional,i64" json:"cooldown_term,omitempty"` - CooldownMetaId *types.TUniqueId `thrift:"cooldown_meta_id,20,optional" frugal:"20,optional,types.TUniqueId" json:"cooldown_meta_id,omitempty"` + TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"` + SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"` + Version types.TVersion `thrift:"version,3,required" frugal:"3,required,i64" json:"version"` + VersionHash types.TVersionHash `thrift:"version_hash,4,required" frugal:"4,required,i64" json:"version_hash"` + RowCount types.TCount `thrift:"row_count,5,required" frugal:"5,required,i64" json:"row_count"` + DataSize types.TSize `thrift:"data_size,6,required" frugal:"6,required,i64" json:"data_size"` + StorageMedium *types.TStorageMedium `thrift:"storage_medium,7,optional" frugal:"7,optional,TStorageMedium" json:"storage_medium,omitempty"` + TransactionIds []types.TTransactionId `thrift:"transaction_ids,8,optional" frugal:"8,optional,list" json:"transaction_ids,omitempty"` + TotalVersionCount *int64 `thrift:"total_version_count,9,optional" frugal:"9,optional,i64" json:"total_version_count,omitempty"` + PathHash *int64 `thrift:"path_hash,10,optional" frugal:"10,optional,i64" json:"path_hash,omitempty"` + VersionMiss *bool `thrift:"version_miss,11,optional" frugal:"11,optional,bool" json:"version_miss,omitempty"` + Used *bool `thrift:"used,12,optional" frugal:"12,optional,bool" json:"used,omitempty"` + PartitionId *types.TPartitionId `thrift:"partition_id,13,optional" frugal:"13,optional,i64" json:"partition_id,omitempty"` + IsInMemory *bool `thrift:"is_in_memory,14,optional" frugal:"14,optional,bool" json:"is_in_memory,omitempty"` + ReplicaId *types.TReplicaId `thrift:"replica_id,15,optional" frugal:"15,optional,i64" json:"replica_id,omitempty"` + RemoteDataSize *types.TSize `thrift:"remote_data_size,16,optional" frugal:"16,optional,i64" json:"remote_data_size,omitempty"` + CooldownTerm *int64 `thrift:"cooldown_term,19,optional" frugal:"19,optional,i64" json:"cooldown_term,omitempty"` + CooldownMetaId *types.TUniqueId `thrift:"cooldown_meta_id,20,optional" frugal:"20,optional,types.TUniqueId" json:"cooldown_meta_id,omitempty"` + VisibleVersionCount *int64 `thrift:"visible_version_count,21,optional" frugal:"21,optional,i64" json:"visible_version_count,omitempty"` + IsPersistent *bool `thrift:"is_persistent,1000,optional" frugal:"1000,optional,bool" json:"is_persistent,omitempty"` } func NewTTabletInfo() *TTabletInfo { @@ -121,7 +123,6 @@ func NewTTabletInfo() *TTabletInfo { } func (p *TTabletInfo) InitDefault() { - *p = TTabletInfo{} } func (p *TTabletInfo) GetTabletId() (v types.TTabletId) { @@ -166,13 +167,13 @@ func (p *TTabletInfo) GetTransactionIds() (v []types.TTransactionId) { return p.TransactionIds } -var TTabletInfo_VersionCount_DEFAULT int64 +var TTabletInfo_TotalVersionCount_DEFAULT int64 -func (p *TTabletInfo) GetVersionCount() (v int64) { - if !p.IsSetVersionCount() { - return TTabletInfo_VersionCount_DEFAULT +func (p *TTabletInfo) GetTotalVersionCount() (v int64) { + if !p.IsSetTotalVersionCount() { + return TTabletInfo_TotalVersionCount_DEFAULT } - return *p.VersionCount + return *p.TotalVersionCount } var TTabletInfo_PathHash_DEFAULT int64 @@ -255,6 +256,24 @@ func (p *TTabletInfo) GetCooldownMetaId() (v *types.TUniqueId) { } return p.CooldownMetaId } + +var TTabletInfo_VisibleVersionCount_DEFAULT int64 + +func (p *TTabletInfo) GetVisibleVersionCount() (v int64) { + if !p.IsSetVisibleVersionCount() { + return TTabletInfo_VisibleVersionCount_DEFAULT + } + return *p.VisibleVersionCount +} + +var TTabletInfo_IsPersistent_DEFAULT bool + +func (p *TTabletInfo) GetIsPersistent() (v bool) { + if !p.IsSetIsPersistent() { + return TTabletInfo_IsPersistent_DEFAULT + } + return *p.IsPersistent +} func (p *TTabletInfo) SetTabletId(val types.TTabletId) { p.TabletId = val } @@ -279,8 +298,8 @@ func (p *TTabletInfo) SetStorageMedium(val *types.TStorageMedium) { func (p *TTabletInfo) SetTransactionIds(val []types.TTransactionId) { p.TransactionIds = val } -func (p *TTabletInfo) SetVersionCount(val *int64) { - p.VersionCount = val +func (p *TTabletInfo) SetTotalVersionCount(val *int64) { + p.TotalVersionCount = val } func (p *TTabletInfo) SetPathHash(val *int64) { p.PathHash = val @@ -309,26 +328,34 @@ func (p *TTabletInfo) SetCooldownTerm(val *int64) { func (p *TTabletInfo) SetCooldownMetaId(val *types.TUniqueId) { p.CooldownMetaId = val } +func (p *TTabletInfo) SetVisibleVersionCount(val *int64) { + p.VisibleVersionCount = val +} +func (p *TTabletInfo) SetIsPersistent(val *bool) { + p.IsPersistent = val +} var fieldIDToName_TTabletInfo = map[int16]string{ - 1: "tablet_id", - 2: "schema_hash", - 3: "version", - 4: "version_hash", - 5: "row_count", - 6: "data_size", - 7: "storage_medium", - 8: "transaction_ids", - 9: "version_count", - 10: "path_hash", - 11: "version_miss", - 12: "used", - 13: "partition_id", - 14: "is_in_memory", - 15: "replica_id", - 16: "remote_data_size", - 19: "cooldown_term", - 20: "cooldown_meta_id", + 1: "tablet_id", + 2: "schema_hash", + 3: "version", + 4: "version_hash", + 5: "row_count", + 6: "data_size", + 7: "storage_medium", + 8: "transaction_ids", + 9: "total_version_count", + 10: "path_hash", + 11: "version_miss", + 12: "used", + 13: "partition_id", + 14: "is_in_memory", + 15: "replica_id", + 16: "remote_data_size", + 19: "cooldown_term", + 20: "cooldown_meta_id", + 21: "visible_version_count", + 1000: "is_persistent", } func (p *TTabletInfo) IsSetStorageMedium() bool { @@ -339,8 +366,8 @@ func (p *TTabletInfo) IsSetTransactionIds() bool { return p.TransactionIds != nil } -func (p *TTabletInfo) IsSetVersionCount() bool { - return p.VersionCount != nil +func (p *TTabletInfo) IsSetTotalVersionCount() bool { + return p.TotalVersionCount != nil } func (p *TTabletInfo) IsSetPathHash() bool { @@ -379,6 +406,14 @@ func (p *TTabletInfo) IsSetCooldownMetaId() bool { return p.CooldownMetaId != nil } +func (p *TTabletInfo) IsSetVisibleVersionCount() bool { + return p.VisibleVersionCount != nil +} + +func (p *TTabletInfo) IsSetIsPersistent() bool { + return p.IsPersistent != nil +} + func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -410,10 +445,8 @@ func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -421,10 +454,8 @@ func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -432,10 +463,8 @@ func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -443,10 +472,8 @@ func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersionHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -454,10 +481,8 @@ func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRowCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { @@ -465,137 +490,126 @@ func (p *TTabletInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDataSize = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.BOOL { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.BOOL { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I64 { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.BOOL { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.I64 { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.I64 { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.I64 { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.STRUCT { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.I64 { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -652,76 +666,91 @@ RequiredFieldNotSetError: } func (p *TTabletInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TTabletInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSchemaHash if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TTabletInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TVersion if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TTabletInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TVersionHash if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionHash = v + _field = v } + p.VersionHash = _field return nil } - func (p *TTabletInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field types.TCount if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RowCount = v + _field = v } + p.RowCount = _field return nil } - func (p *TTabletInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DataSize = v + _field = v } + p.DataSize = _field return nil } - func (p *TTabletInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field *types.TStorageMedium if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TStorageMedium(v) - p.StorageMedium = &tmp + _field = &tmp } + p.StorageMedium = _field return nil } - func (p *TTabletInfo) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TransactionIds = make([]types.TTransactionId, 0, size) + _field := make([]types.TTransactionId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTransactionId if v, err := iprot.ReadI64(); err != nil { return err @@ -729,100 +758,141 @@ func (p *TTabletInfo) ReadField8(iprot thrift.TProtocol) error { _elem = v } - p.TransactionIds = append(p.TransactionIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TransactionIds = _field return nil } - func (p *TTabletInfo) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VersionCount = &v + _field = &v } + p.TotalVersionCount = _field return nil } - func (p *TTabletInfo) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PathHash = &v + _field = &v } + p.PathHash = _field return nil } - func (p *TTabletInfo) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.VersionMiss = &v + _field = &v } + p.VersionMiss = _field return nil } - func (p *TTabletInfo) ReadField12(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Used = &v + _field = &v } + p.Used = _field return nil } - func (p *TTabletInfo) ReadField13(iprot thrift.TProtocol) error { + + var _field *types.TPartitionId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionId = &v + _field = &v } + p.PartitionId = _field return nil } - func (p *TTabletInfo) ReadField14(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsInMemory = &v + _field = &v } + p.IsInMemory = _field return nil } - func (p *TTabletInfo) ReadField15(iprot thrift.TProtocol) error { + + var _field *types.TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplicaId = &v + _field = &v } + p.ReplicaId = _field return nil } - func (p *TTabletInfo) ReadField16(iprot thrift.TProtocol) error { + + var _field *types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RemoteDataSize = &v + _field = &v } + p.RemoteDataSize = _field return nil } - func (p *TTabletInfo) ReadField19(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CooldownTerm = &v + _field = &v } + p.CooldownTerm = _field return nil } - func (p *TTabletInfo) ReadField20(iprot thrift.TProtocol) error { - p.CooldownMetaId = types.NewTUniqueId() - if err := p.CooldownMetaId.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.CooldownMetaId = _field + return nil +} +func (p *TTabletInfo) ReadField21(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.VisibleVersionCount = _field + return nil +} +func (p *TTabletInfo) ReadField1000(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsPersistent = _field return nil } @@ -904,7 +974,14 @@ func (p *TTabletInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 20 goto WriteFieldError } - + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1072,11 +1149,11 @@ WriteFieldEndError: } func (p *TTabletInfo) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetVersionCount() { - if err = oprot.WriteFieldBegin("version_count", thrift.I64, 9); err != nil { + if p.IsSetTotalVersionCount() { + if err = oprot.WriteFieldBegin("total_version_count", thrift.I64, 9); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.VersionCount); err != nil { + if err := oprot.WriteI64(*p.TotalVersionCount); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -1261,11 +1338,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } +func (p *TTabletInfo) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetVisibleVersionCount() { + if err = oprot.WriteFieldBegin("visible_version_count", thrift.I64, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.VisibleVersionCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) +} + +func (p *TTabletInfo) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetIsPersistent() { + if err = oprot.WriteFieldBegin("is_persistent", thrift.BOOL, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsPersistent); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + func (p *TTabletInfo) String() string { if p == nil { return "" } return fmt.Sprintf("TTabletInfo(%+v)", *p) + } func (p *TTabletInfo) DeepEqual(ano *TTabletInfo) bool { @@ -1298,7 +1414,7 @@ func (p *TTabletInfo) DeepEqual(ano *TTabletInfo) bool { if !p.Field8DeepEqual(ano.TransactionIds) { return false } - if !p.Field9DeepEqual(ano.VersionCount) { + if !p.Field9DeepEqual(ano.TotalVersionCount) { return false } if !p.Field10DeepEqual(ano.PathHash) { @@ -1328,6 +1444,12 @@ func (p *TTabletInfo) DeepEqual(ano *TTabletInfo) bool { if !p.Field20DeepEqual(ano.CooldownMetaId) { return false } + if !p.Field21DeepEqual(ano.VisibleVersionCount) { + return false + } + if !p.Field1000DeepEqual(ano.IsPersistent) { + return false + } return true } @@ -1400,12 +1522,12 @@ func (p *TTabletInfo) Field8DeepEqual(src []types.TTransactionId) bool { } func (p *TTabletInfo) Field9DeepEqual(src *int64) bool { - if p.VersionCount == src { + if p.TotalVersionCount == src { return true - } else if p.VersionCount == nil || src == nil { + } else if p.TotalVersionCount == nil || src == nil { return false } - if *p.VersionCount != *src { + if *p.TotalVersionCount != *src { return false } return true @@ -1513,26 +1635,52 @@ func (p *TTabletInfo) Field20DeepEqual(src *types.TUniqueId) bool { } return true } +func (p *TTabletInfo) Field21DeepEqual(src *int64) bool { + + if p.VisibleVersionCount == src { + return true + } else if p.VisibleVersionCount == nil || src == nil { + return false + } + if *p.VisibleVersionCount != *src { + return false + } + return true +} +func (p *TTabletInfo) Field1000DeepEqual(src *bool) bool { + + if p.IsPersistent == src { + return true + } else if p.IsPersistent == nil || src == nil { + return false + } + if *p.IsPersistent != *src { + return false + } + return true +} type TFinishTaskRequest struct { - Backend *types.TBackend `thrift:"backend,1,required" frugal:"1,required,types.TBackend" json:"backend"` - TaskType types.TTaskType `thrift:"task_type,2,required" frugal:"2,required,TTaskType" json:"task_type"` - Signature int64 `thrift:"signature,3,required" frugal:"3,required,i64" json:"signature"` - TaskStatus *status.TStatus `thrift:"task_status,4,required" frugal:"4,required,status.TStatus" json:"task_status"` - ReportVersion *int64 `thrift:"report_version,5,optional" frugal:"5,optional,i64" json:"report_version,omitempty"` - FinishTabletInfos []*TTabletInfo `thrift:"finish_tablet_infos,6,optional" frugal:"6,optional,list" json:"finish_tablet_infos,omitempty"` - TabletChecksum *int64 `thrift:"tablet_checksum,7,optional" frugal:"7,optional,i64" json:"tablet_checksum,omitempty"` - RequestVersion *int64 `thrift:"request_version,8,optional" frugal:"8,optional,i64" json:"request_version,omitempty"` - RequestVersionHash *int64 `thrift:"request_version_hash,9,optional" frugal:"9,optional,i64" json:"request_version_hash,omitempty"` - SnapshotPath *string `thrift:"snapshot_path,10,optional" frugal:"10,optional,string" json:"snapshot_path,omitempty"` - ErrorTabletIds []types.TTabletId `thrift:"error_tablet_ids,11,optional" frugal:"11,optional,list" json:"error_tablet_ids,omitempty"` - SnapshotFiles []string `thrift:"snapshot_files,12,optional" frugal:"12,optional,list" json:"snapshot_files,omitempty"` - TabletFiles map[types.TTabletId][]string `thrift:"tablet_files,13,optional" frugal:"13,optional,map>" json:"tablet_files,omitempty"` - DownloadedTabletIds []types.TTabletId `thrift:"downloaded_tablet_ids,14,optional" frugal:"14,optional,list" json:"downloaded_tablet_ids,omitempty"` - CopySize *int64 `thrift:"copy_size,15,optional" frugal:"15,optional,i64" json:"copy_size,omitempty"` - CopyTimeMs *int64 `thrift:"copy_time_ms,16,optional" frugal:"16,optional,i64" json:"copy_time_ms,omitempty"` - SuccTablets map[types.TTabletId]types.TVersion `thrift:"succ_tablets,17,optional" frugal:"17,optional,map" json:"succ_tablets,omitempty"` - TabletIdToDeltaNumRows map[int64]int64 `thrift:"tablet_id_to_delta_num_rows,18,optional" frugal:"18,optional,map" json:"tablet_id_to_delta_num_rows,omitempty"` + Backend *types.TBackend `thrift:"backend,1,required" frugal:"1,required,types.TBackend" json:"backend"` + TaskType types.TTaskType `thrift:"task_type,2,required" frugal:"2,required,TTaskType" json:"task_type"` + Signature int64 `thrift:"signature,3,required" frugal:"3,required,i64" json:"signature"` + TaskStatus *status.TStatus `thrift:"task_status,4,required" frugal:"4,required,status.TStatus" json:"task_status"` + ReportVersion *int64 `thrift:"report_version,5,optional" frugal:"5,optional,i64" json:"report_version,omitempty"` + FinishTabletInfos []*TTabletInfo `thrift:"finish_tablet_infos,6,optional" frugal:"6,optional,list" json:"finish_tablet_infos,omitempty"` + TabletChecksum *int64 `thrift:"tablet_checksum,7,optional" frugal:"7,optional,i64" json:"tablet_checksum,omitempty"` + RequestVersion *int64 `thrift:"request_version,8,optional" frugal:"8,optional,i64" json:"request_version,omitempty"` + RequestVersionHash *int64 `thrift:"request_version_hash,9,optional" frugal:"9,optional,i64" json:"request_version_hash,omitempty"` + SnapshotPath *string `thrift:"snapshot_path,10,optional" frugal:"10,optional,string" json:"snapshot_path,omitempty"` + ErrorTabletIds []types.TTabletId `thrift:"error_tablet_ids,11,optional" frugal:"11,optional,list" json:"error_tablet_ids,omitempty"` + SnapshotFiles []string `thrift:"snapshot_files,12,optional" frugal:"12,optional,list" json:"snapshot_files,omitempty"` + TabletFiles map[types.TTabletId][]string `thrift:"tablet_files,13,optional" frugal:"13,optional,map>" json:"tablet_files,omitempty"` + DownloadedTabletIds []types.TTabletId `thrift:"downloaded_tablet_ids,14,optional" frugal:"14,optional,list" json:"downloaded_tablet_ids,omitempty"` + CopySize *int64 `thrift:"copy_size,15,optional" frugal:"15,optional,i64" json:"copy_size,omitempty"` + CopyTimeMs *int64 `thrift:"copy_time_ms,16,optional" frugal:"16,optional,i64" json:"copy_time_ms,omitempty"` + SuccTablets map[types.TTabletId]types.TVersion `thrift:"succ_tablets,17,optional" frugal:"17,optional,map" json:"succ_tablets,omitempty"` + TableIdToDeltaNumRows map[int64]int64 `thrift:"table_id_to_delta_num_rows,18,optional" frugal:"18,optional,map" json:"table_id_to_delta_num_rows,omitempty"` + TableIdToTabletIdToDeltaNumRows map[int64]map[int64]int64 `thrift:"table_id_to_tablet_id_to_delta_num_rows,19,optional" frugal:"19,optional,map>" json:"table_id_to_tablet_id_to_delta_num_rows,omitempty"` + RespPartitions []*agentservice.TCalcDeleteBitmapPartitionInfo `thrift:"resp_partitions,20,optional" frugal:"20,optional,list" json:"resp_partitions,omitempty"` } func NewTFinishTaskRequest() *TFinishTaskRequest { @@ -1540,7 +1688,6 @@ func NewTFinishTaskRequest() *TFinishTaskRequest { } func (p *TFinishTaskRequest) InitDefault() { - *p = TFinishTaskRequest{} } var TFinishTaskRequest_Backend_DEFAULT *types.TBackend @@ -1686,13 +1833,31 @@ func (p *TFinishTaskRequest) GetSuccTablets() (v map[types.TTabletId]types.TVers return p.SuccTablets } -var TFinishTaskRequest_TabletIdToDeltaNumRows_DEFAULT map[int64]int64 +var TFinishTaskRequest_TableIdToDeltaNumRows_DEFAULT map[int64]int64 + +func (p *TFinishTaskRequest) GetTableIdToDeltaNumRows() (v map[int64]int64) { + if !p.IsSetTableIdToDeltaNumRows() { + return TFinishTaskRequest_TableIdToDeltaNumRows_DEFAULT + } + return p.TableIdToDeltaNumRows +} + +var TFinishTaskRequest_TableIdToTabletIdToDeltaNumRows_DEFAULT map[int64]map[int64]int64 + +func (p *TFinishTaskRequest) GetTableIdToTabletIdToDeltaNumRows() (v map[int64]map[int64]int64) { + if !p.IsSetTableIdToTabletIdToDeltaNumRows() { + return TFinishTaskRequest_TableIdToTabletIdToDeltaNumRows_DEFAULT + } + return p.TableIdToTabletIdToDeltaNumRows +} + +var TFinishTaskRequest_RespPartitions_DEFAULT []*agentservice.TCalcDeleteBitmapPartitionInfo -func (p *TFinishTaskRequest) GetTabletIdToDeltaNumRows() (v map[int64]int64) { - if !p.IsSetTabletIdToDeltaNumRows() { - return TFinishTaskRequest_TabletIdToDeltaNumRows_DEFAULT +func (p *TFinishTaskRequest) GetRespPartitions() (v []*agentservice.TCalcDeleteBitmapPartitionInfo) { + if !p.IsSetRespPartitions() { + return TFinishTaskRequest_RespPartitions_DEFAULT } - return p.TabletIdToDeltaNumRows + return p.RespPartitions } func (p *TFinishTaskRequest) SetBackend(val *types.TBackend) { p.Backend = val @@ -1745,8 +1910,14 @@ func (p *TFinishTaskRequest) SetCopyTimeMs(val *int64) { func (p *TFinishTaskRequest) SetSuccTablets(val map[types.TTabletId]types.TVersion) { p.SuccTablets = val } -func (p *TFinishTaskRequest) SetTabletIdToDeltaNumRows(val map[int64]int64) { - p.TabletIdToDeltaNumRows = val +func (p *TFinishTaskRequest) SetTableIdToDeltaNumRows(val map[int64]int64) { + p.TableIdToDeltaNumRows = val +} +func (p *TFinishTaskRequest) SetTableIdToTabletIdToDeltaNumRows(val map[int64]map[int64]int64) { + p.TableIdToTabletIdToDeltaNumRows = val +} +func (p *TFinishTaskRequest) SetRespPartitions(val []*agentservice.TCalcDeleteBitmapPartitionInfo) { + p.RespPartitions = val } var fieldIDToName_TFinishTaskRequest = map[int16]string{ @@ -1767,7 +1938,9 @@ var fieldIDToName_TFinishTaskRequest = map[int16]string{ 15: "copy_size", 16: "copy_time_ms", 17: "succ_tablets", - 18: "tablet_id_to_delta_num_rows", + 18: "table_id_to_delta_num_rows", + 19: "table_id_to_tablet_id_to_delta_num_rows", + 20: "resp_partitions", } func (p *TFinishTaskRequest) IsSetBackend() bool { @@ -1830,8 +2003,16 @@ func (p *TFinishTaskRequest) IsSetSuccTablets() bool { return p.SuccTablets != nil } -func (p *TFinishTaskRequest) IsSetTabletIdToDeltaNumRows() bool { - return p.TabletIdToDeltaNumRows != nil +func (p *TFinishTaskRequest) IsSetTableIdToDeltaNumRows() bool { + return p.TableIdToDeltaNumRows != nil +} + +func (p *TFinishTaskRequest) IsSetTableIdToTabletIdToDeltaNumRows() bool { + return p.TableIdToTabletIdToDeltaNumRows != nil +} + +func (p *TFinishTaskRequest) IsSetRespPartitions() bool { + return p.RespPartitions != nil } func (p *TFinishTaskRequest) Read(iprot thrift.TProtocol) (err error) { @@ -1863,10 +2044,8 @@ func (p *TFinishTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBackend = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -1874,10 +2053,8 @@ func (p *TFinishTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTaskType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -1885,10 +2062,8 @@ func (p *TFinishTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSignature = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { @@ -1896,157 +2071,142 @@ func (p *TFinishTaskRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTaskStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRING { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.LIST { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.LIST { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.MAP { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.LIST { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.I64 { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.I64 { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.MAP { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.MAP { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.MAP { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.LIST { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2093,111 +2253,129 @@ RequiredFieldNotSetError: } func (p *TFinishTaskRequest) ReadField1(iprot thrift.TProtocol) error { - p.Backend = types.NewTBackend() - if err := p.Backend.Read(iprot); err != nil { + _field := types.NewTBackend() + if err := _field.Read(iprot); err != nil { return err } + p.Backend = _field return nil } - func (p *TFinishTaskRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TTaskType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TaskType = types.TTaskType(v) + _field = types.TTaskType(v) } + p.TaskType = _field return nil } - func (p *TFinishTaskRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Signature = v + _field = v } + p.Signature = _field return nil } - func (p *TFinishTaskRequest) ReadField4(iprot thrift.TProtocol) error { - p.TaskStatus = status.NewTStatus() - if err := p.TaskStatus.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.TaskStatus = _field return nil } - func (p *TFinishTaskRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReportVersion = &v + _field = &v } + p.ReportVersion = _field return nil } - func (p *TFinishTaskRequest) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.FinishTabletInfos = make([]*TTabletInfo, 0, size) + _field := make([]*TTabletInfo, 0, size) + values := make([]TTabletInfo, size) for i := 0; i < size; i++ { - _elem := NewTTabletInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.FinishTabletInfos = append(p.FinishTabletInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.FinishTabletInfos = _field return nil } - func (p *TFinishTaskRequest) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletChecksum = &v + _field = &v } + p.TabletChecksum = _field return nil } - func (p *TFinishTaskRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RequestVersion = &v + _field = &v } + p.RequestVersion = _field return nil } - func (p *TFinishTaskRequest) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RequestVersionHash = &v + _field = &v } + p.RequestVersionHash = _field return nil } - func (p *TFinishTaskRequest) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SnapshotPath = &v + _field = &v } + p.SnapshotPath = _field return nil } - func (p *TFinishTaskRequest) ReadField11(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ErrorTabletIds = make([]types.TTabletId, 0, size) + _field := make([]types.TTabletId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err @@ -2205,21 +2383,22 @@ func (p *TFinishTaskRequest) ReadField11(iprot thrift.TProtocol) error { _elem = v } - p.ErrorTabletIds = append(p.ErrorTabletIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ErrorTabletIds = _field return nil } - func (p *TFinishTaskRequest) ReadField12(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SnapshotFiles = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -2227,20 +2406,20 @@ func (p *TFinishTaskRequest) ReadField12(iprot thrift.TProtocol) error { _elem = v } - p.SnapshotFiles = append(p.SnapshotFiles, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SnapshotFiles = _field return nil } - func (p *TFinishTaskRequest) ReadField13(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.TabletFiles = make(map[types.TTabletId][]string, size) + _field := make(map[types.TTabletId][]string, size) for i := 0; i < size; i++ { var _key types.TTabletId if v, err := iprot.ReadI64(); err != nil { @@ -2248,13 +2427,13 @@ func (p *TFinishTaskRequest) ReadField13(iprot thrift.TProtocol) error { } else { _key = v } - _, size, err := iprot.ReadListBegin() if err != nil { return err } _val := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -2268,21 +2447,22 @@ func (p *TFinishTaskRequest) ReadField13(iprot thrift.TProtocol) error { return err } - p.TabletFiles[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.TabletFiles = _field return nil } - func (p *TFinishTaskRequest) ReadField14(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DownloadedTabletIds = make([]types.TTabletId, 0, size) + _field := make([]types.TTabletId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err @@ -2290,38 +2470,42 @@ func (p *TFinishTaskRequest) ReadField14(iprot thrift.TProtocol) error { _elem = v } - p.DownloadedTabletIds = append(p.DownloadedTabletIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DownloadedTabletIds = _field return nil } - func (p *TFinishTaskRequest) ReadField15(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CopySize = &v + _field = &v } + p.CopySize = _field return nil } - func (p *TFinishTaskRequest) ReadField16(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CopyTimeMs = &v + _field = &v } + p.CopyTimeMs = _field return nil } - func (p *TFinishTaskRequest) ReadField17(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.SuccTablets = make(map[types.TTabletId]types.TVersion, size) + _field := make(map[types.TTabletId]types.TVersion, size) for i := 0; i < size; i++ { var _key types.TTabletId if v, err := iprot.ReadI64(); err != nil { @@ -2337,20 +2521,20 @@ func (p *TFinishTaskRequest) ReadField17(iprot thrift.TProtocol) error { _val = v } - p.SuccTablets[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.SuccTablets = _field return nil } - func (p *TFinishTaskRequest) ReadField18(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.TabletIdToDeltaNumRows = make(map[int64]int64, size) + _field := make(map[int64]int64, size) for i := 0; i < size; i++ { var _key int64 if v, err := iprot.ReadI64(); err != nil { @@ -2366,11 +2550,82 @@ func (p *TFinishTaskRequest) ReadField18(iprot thrift.TProtocol) error { _val = v } - p.TabletIdToDeltaNumRows[_key] = _val + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TableIdToDeltaNumRows = _field + return nil +} +func (p *TFinishTaskRequest) ReadField19(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int64]map[int64]int64, size) + for i := 0; i < size; i++ { + var _key int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _val := make(map[int64]int64, size) + for i := 0; i < size; i++ { + var _key1 int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key1 = v + } + + var _val1 int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _val1 = v + } + + _val[_key1] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.TableIdToTabletIdToDeltaNumRows = _field + return nil +} +func (p *TFinishTaskRequest) ReadField20(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*agentservice.TCalcDeleteBitmapPartitionInfo, 0, size) + values := make([]agentservice.TCalcDeleteBitmapPartitionInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.RespPartitions = _field return nil } @@ -2452,7 +2707,14 @@ func (p *TFinishTaskRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 18 goto WriteFieldError } - + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2724,11 +2986,9 @@ func (p *TFinishTaskRequest) writeField13(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.TabletFiles { - if err := oprot.WriteI64(k); err != nil { return err } - if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { return err } @@ -2829,11 +3089,9 @@ func (p *TFinishTaskRequest) writeField17(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.SuccTablets { - if err := oprot.WriteI64(k); err != nil { return err } - if err := oprot.WriteI64(v); err != nil { return err } @@ -2853,19 +3111,17 @@ WriteFieldEndError: } func (p *TFinishTaskRequest) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetTabletIdToDeltaNumRows() { - if err = oprot.WriteFieldBegin("tablet_id_to_delta_num_rows", thrift.MAP, 18); err != nil { + if p.IsSetTableIdToDeltaNumRows() { + if err = oprot.WriteFieldBegin("table_id_to_delta_num_rows", thrift.MAP, 18); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.TabletIdToDeltaNumRows)); err != nil { + if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.TableIdToDeltaNumRows)); err != nil { return err } - for k, v := range p.TabletIdToDeltaNumRows { - + for k, v := range p.TableIdToDeltaNumRows { if err := oprot.WriteI64(k); err != nil { return err } - if err := oprot.WriteI64(v); err != nil { return err } @@ -2884,11 +3140,80 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } +func (p *TFinishTaskRequest) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetTableIdToTabletIdToDeltaNumRows() { + if err = oprot.WriteFieldBegin("table_id_to_tablet_id_to_delta_num_rows", thrift.MAP, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.MAP, len(p.TableIdToTabletIdToDeltaNumRows)); err != nil { + return err + } + for k, v := range p.TableIdToTabletIdToDeltaNumRows { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(v)); err != nil { + return err + } + for k, v := range v { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TFinishTaskRequest) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetRespPartitions() { + if err = oprot.WriteFieldBegin("resp_partitions", thrift.LIST, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RespPartitions)); err != nil { + return err + } + for _, v := range p.RespPartitions { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + func (p *TFinishTaskRequest) String() string { if p == nil { return "" } return fmt.Sprintf("TFinishTaskRequest(%+v)", *p) + } func (p *TFinishTaskRequest) DeepEqual(ano *TFinishTaskRequest) bool { @@ -2948,7 +3273,13 @@ func (p *TFinishTaskRequest) DeepEqual(ano *TFinishTaskRequest) bool { if !p.Field17DeepEqual(ano.SuccTablets) { return false } - if !p.Field18DeepEqual(ano.TabletIdToDeltaNumRows) { + if !p.Field18DeepEqual(ano.TableIdToDeltaNumRows) { + return false + } + if !p.Field19DeepEqual(ano.TableIdToTabletIdToDeltaNumRows) { + return false + } + if !p.Field20DeepEqual(ano.RespPartitions) { return false } return true @@ -3152,10 +3483,10 @@ func (p *TFinishTaskRequest) Field17DeepEqual(src map[types.TTabletId]types.TVer } func (p *TFinishTaskRequest) Field18DeepEqual(src map[int64]int64) bool { - if len(p.TabletIdToDeltaNumRows) != len(src) { + if len(p.TableIdToDeltaNumRows) != len(src) { return false } - for k, v := range p.TabletIdToDeltaNumRows { + for k, v := range p.TableIdToDeltaNumRows { _src := src[k] if v != _src { return false @@ -3163,6 +3494,38 @@ func (p *TFinishTaskRequest) Field18DeepEqual(src map[int64]int64) bool { } return true } +func (p *TFinishTaskRequest) Field19DeepEqual(src map[int64]map[int64]int64) bool { + + if len(p.TableIdToTabletIdToDeltaNumRows) != len(src) { + return false + } + for k, v := range p.TableIdToTabletIdToDeltaNumRows { + _src := src[k] + if len(v) != len(_src) { + return false + } + for k, v := range v { + _src1 := _src[k] + if v != _src1 { + return false + } + } + } + return true +} +func (p *TFinishTaskRequest) Field20DeepEqual(src []*agentservice.TCalcDeleteBitmapPartitionInfo) bool { + + if len(p.RespPartitions) != len(src) { + return false + } + for i, v := range p.RespPartitions { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} type TTablet struct { TabletInfos []*TTabletInfo `thrift:"tablet_infos,1,required" frugal:"1,required,list" json:"tablet_infos"` @@ -3173,7 +3536,6 @@ func NewTTablet() *TTablet { } func (p *TTablet) InitDefault() { - *p = TTablet{} } func (p *TTablet) GetTabletInfos() (v []*TTabletInfo) { @@ -3213,17 +3575,14 @@ func (p *TTablet) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletInfos = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3259,18 +3618,22 @@ func (p *TTablet) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.TabletInfos = make([]*TTabletInfo, 0, size) + _field := make([]*TTabletInfo, 0, size) + values := make([]TTabletInfo, size) for i := 0; i < size; i++ { - _elem := NewTTabletInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TabletInfos = append(p.TabletInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletInfos = _field return nil } @@ -3284,7 +3647,6 @@ func (p *TTablet) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3333,6 +3695,7 @@ func (p *TTablet) String() string { return "" } return fmt.Sprintf("TTablet(%+v)", *p) + } func (p *TTablet) DeepEqual(ano *TTablet) bool { @@ -3378,7 +3741,6 @@ func NewTDisk() *TDisk { } func (p *TDisk) InitDefault() { - *p = TDisk{} } func (p *TDisk) GetRootPath() (v string) { @@ -3530,10 +3892,8 @@ func (p *TDisk) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRootPath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -3541,10 +3901,8 @@ func (p *TDisk) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDiskTotalCapacity = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -3552,10 +3910,8 @@ func (p *TDisk) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDataUsedCapacity = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { @@ -3563,67 +3919,54 @@ func (p *TDisk) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUsed = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3670,84 +4013,103 @@ RequiredFieldNotSetError: } func (p *TDisk) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RootPath = v + _field = v } + p.RootPath = _field return nil } - func (p *TDisk) ReadField2(iprot thrift.TProtocol) error { + + var _field types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DiskTotalCapacity = v + _field = v } + p.DiskTotalCapacity = _field return nil } - func (p *TDisk) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DataUsedCapacity = v + _field = v } + p.DataUsedCapacity = _field return nil } - func (p *TDisk) ReadField4(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Used = v + _field = v } + p.Used = _field return nil } - func (p *TDisk) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DiskAvailableCapacity = &v + _field = &v } + p.DiskAvailableCapacity = _field return nil } - func (p *TDisk) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PathHash = &v + _field = &v } + p.PathHash = _field return nil } - func (p *TDisk) ReadField7(iprot thrift.TProtocol) error { + + var _field *types.TStorageMedium if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TStorageMedium(v) - p.StorageMedium = &tmp + _field = &tmp } + p.StorageMedium = _field return nil } - func (p *TDisk) ReadField8(iprot thrift.TProtocol) error { + + var _field *types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RemoteUsedCapacity = &v + _field = &v } + p.RemoteUsedCapacity = _field return nil } - func (p *TDisk) ReadField9(iprot thrift.TProtocol) error { + + var _field *types.TSize if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TrashUsedCapacity = &v + _field = &v } + p.TrashUsedCapacity = _field return nil } @@ -3793,7 +4155,6 @@ func (p *TDisk) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3980,6 +4341,7 @@ func (p *TDisk) String() string { return "" } return fmt.Sprintf("TDisk(%+v)", *p) + } func (p *TDisk) DeepEqual(ano *TDisk) bool { @@ -4117,7 +4479,6 @@ func NewTPluginInfo() *TPluginInfo { } func (p *TPluginInfo) InitDefault() { - *p = TPluginInfo{} } func (p *TPluginInfo) GetPluginName() (v string) { @@ -4166,10 +4527,8 @@ func (p *TPluginInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPluginName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -4177,17 +4536,14 @@ func (p *TPluginInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4224,20 +4580,25 @@ RequiredFieldNotSetError: } func (p *TPluginInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.PluginName = v + _field = v } + p.PluginName = _field return nil } - func (p *TPluginInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = v + _field = v } + p.Type = _field return nil } @@ -4255,7 +4616,6 @@ func (p *TPluginInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4313,6 +4673,7 @@ func (p *TPluginInfo) String() string { return "" } return fmt.Sprintf("TPluginInfo(%+v)", *p) + } func (p *TPluginInfo) DeepEqual(ano *TPluginInfo) bool { @@ -4346,18 +4707,20 @@ func (p *TPluginInfo) Field2DeepEqual(src int32) bool { } type TReportRequest struct { - Backend *types.TBackend `thrift:"backend,1,required" frugal:"1,required,types.TBackend" json:"backend"` - ReportVersion *int64 `thrift:"report_version,2,optional" frugal:"2,optional,i64" json:"report_version,omitempty"` - Tasks map[types.TTaskType][]int64 `thrift:"tasks,3,optional" frugal:"3,optional,map>" json:"tasks,omitempty"` - Tablets map[types.TTabletId]*TTablet `thrift:"tablets,4,optional" frugal:"4,optional,map" json:"tablets,omitempty"` - Disks map[string]*TDisk `thrift:"disks,5,optional" frugal:"5,optional,map" json:"disks,omitempty"` - ForceRecovery *bool `thrift:"force_recovery,6,optional" frugal:"6,optional,bool" json:"force_recovery,omitempty"` - TabletList []*TTablet `thrift:"tablet_list,7,optional" frugal:"7,optional,list" json:"tablet_list,omitempty"` - TabletMaxCompactionScore *int64 `thrift:"tablet_max_compaction_score,8,optional" frugal:"8,optional,i64" json:"tablet_max_compaction_score,omitempty"` - StoragePolicy []*agentservice.TStoragePolicy `thrift:"storage_policy,9,optional" frugal:"9,optional,list" json:"storage_policy,omitempty"` - Resource []*agentservice.TStorageResource `thrift:"resource,10,optional" frugal:"10,optional,list" json:"resource,omitempty"` - NumCores int32 `thrift:"num_cores,11" frugal:"11,default,i32" json:"num_cores"` - PipelineExecutorSize int32 `thrift:"pipeline_executor_size,12" frugal:"12,default,i32" json:"pipeline_executor_size"` + Backend *types.TBackend `thrift:"backend,1,required" frugal:"1,required,types.TBackend" json:"backend"` + ReportVersion *int64 `thrift:"report_version,2,optional" frugal:"2,optional,i64" json:"report_version,omitempty"` + Tasks map[types.TTaskType][]int64 `thrift:"tasks,3,optional" frugal:"3,optional,map>" json:"tasks,omitempty"` + Tablets map[types.TTabletId]*TTablet `thrift:"tablets,4,optional" frugal:"4,optional,map" json:"tablets,omitempty"` + Disks map[string]*TDisk `thrift:"disks,5,optional" frugal:"5,optional,map" json:"disks,omitempty"` + ForceRecovery *bool `thrift:"force_recovery,6,optional" frugal:"6,optional,bool" json:"force_recovery,omitempty"` + TabletList []*TTablet `thrift:"tablet_list,7,optional" frugal:"7,optional,list" json:"tablet_list,omitempty"` + TabletMaxCompactionScore *int64 `thrift:"tablet_max_compaction_score,8,optional" frugal:"8,optional,i64" json:"tablet_max_compaction_score,omitempty"` + StoragePolicy []*agentservice.TStoragePolicy `thrift:"storage_policy,9,optional" frugal:"9,optional,list" json:"storage_policy,omitempty"` + Resource []*agentservice.TStorageResource `thrift:"resource,10,optional" frugal:"10,optional,list" json:"resource,omitempty"` + NumCores int32 `thrift:"num_cores,11" frugal:"11,default,i32" json:"num_cores"` + PipelineExecutorSize int32 `thrift:"pipeline_executor_size,12" frugal:"12,default,i32" json:"pipeline_executor_size"` + PartitionsVersion map[types.TPartitionId]types.TVersion `thrift:"partitions_version,13,optional" frugal:"13,optional,map" json:"partitions_version,omitempty"` + NumTablets *int64 `thrift:"num_tablets,14,optional" frugal:"14,optional,i64" json:"num_tablets,omitempty"` } func NewTReportRequest() *TReportRequest { @@ -4365,7 +4728,6 @@ func NewTReportRequest() *TReportRequest { } func (p *TReportRequest) InitDefault() { - *p = TReportRequest{} } var TReportRequest_Backend_DEFAULT *types.TBackend @@ -4465,6 +4827,24 @@ func (p *TReportRequest) GetNumCores() (v int32) { func (p *TReportRequest) GetPipelineExecutorSize() (v int32) { return p.PipelineExecutorSize } + +var TReportRequest_PartitionsVersion_DEFAULT map[types.TPartitionId]types.TVersion + +func (p *TReportRequest) GetPartitionsVersion() (v map[types.TPartitionId]types.TVersion) { + if !p.IsSetPartitionsVersion() { + return TReportRequest_PartitionsVersion_DEFAULT + } + return p.PartitionsVersion +} + +var TReportRequest_NumTablets_DEFAULT int64 + +func (p *TReportRequest) GetNumTablets() (v int64) { + if !p.IsSetNumTablets() { + return TReportRequest_NumTablets_DEFAULT + } + return *p.NumTablets +} func (p *TReportRequest) SetBackend(val *types.TBackend) { p.Backend = val } @@ -4501,6 +4881,12 @@ func (p *TReportRequest) SetNumCores(val int32) { func (p *TReportRequest) SetPipelineExecutorSize(val int32) { p.PipelineExecutorSize = val } +func (p *TReportRequest) SetPartitionsVersion(val map[types.TPartitionId]types.TVersion) { + p.PartitionsVersion = val +} +func (p *TReportRequest) SetNumTablets(val *int64) { + p.NumTablets = val +} var fieldIDToName_TReportRequest = map[int16]string{ 1: "backend", @@ -4515,6 +4901,8 @@ var fieldIDToName_TReportRequest = map[int16]string{ 10: "resource", 11: "num_cores", 12: "pipeline_executor_size", + 13: "partitions_version", + 14: "num_tablets", } func (p *TReportRequest) IsSetBackend() bool { @@ -4557,6 +4945,14 @@ func (p *TReportRequest) IsSetResource() bool { return p.Resource != nil } +func (p *TReportRequest) IsSetPartitionsVersion() bool { + return p.PartitionsVersion != nil +} + +func (p *TReportRequest) IsSetNumTablets() bool { + return p.NumTablets != nil +} + func (p *TReportRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -4583,127 +4979,118 @@ func (p *TReportRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBackend = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.MAP { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.LIST { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.LIST { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I32 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.MAP { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4735,28 +5122,30 @@ RequiredFieldNotSetError: } func (p *TReportRequest) ReadField1(iprot thrift.TProtocol) error { - p.Backend = types.NewTBackend() - if err := p.Backend.Read(iprot); err != nil { + _field := types.NewTBackend() + if err := _field.Read(iprot); err != nil { return err } + p.Backend = _field return nil } - func (p *TReportRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReportVersion = &v + _field = &v } + p.ReportVersion = _field return nil } - func (p *TReportRequest) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Tasks = make(map[types.TTaskType][]int64, size) + _field := make(map[types.TTaskType][]int64, size) for i := 0; i < size; i++ { var _key types.TTaskType if v, err := iprot.ReadI32(); err != nil { @@ -4764,13 +5153,13 @@ func (p *TReportRequest) ReadField3(iprot thrift.TProtocol) error { } else { _key = types.TTaskType(v) } - _, size, err := iprot.ReadSetBegin() if err != nil { return err } _val := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -4784,20 +5173,21 @@ func (p *TReportRequest) ReadField3(iprot thrift.TProtocol) error { return err } - p.Tasks[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Tasks = _field return nil } - func (p *TReportRequest) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Tablets = make(map[types.TTabletId]*TTablet, size) + _field := make(map[types.TTabletId]*TTablet, size) + values := make([]TTablet, size) for i := 0; i < size; i++ { var _key types.TTabletId if v, err := iprot.ReadI64(); err != nil { @@ -4805,25 +5195,28 @@ func (p *TReportRequest) ReadField4(iprot thrift.TProtocol) error { } else { _key = v } - _val := NewTTablet() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.Tablets[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Tablets = _field return nil } - func (p *TReportRequest) ReadField5(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Disks = make(map[string]*TDisk, size) + _field := make(map[string]*TDisk, size) + values := make([]TDisk, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -4831,112 +5224,172 @@ func (p *TReportRequest) ReadField5(iprot thrift.TProtocol) error { } else { _key = v } - _val := NewTDisk() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.Disks[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Disks = _field return nil } - func (p *TReportRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ForceRecovery = &v + _field = &v } + p.ForceRecovery = _field return nil } - func (p *TReportRequest) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.TabletList = make([]*TTablet, 0, size) + _field := make([]*TTablet, 0, size) + values := make([]TTablet, size) for i := 0; i < size; i++ { - _elem := NewTTablet() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.TabletList = append(p.TabletList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletList = _field return nil } - func (p *TReportRequest) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletMaxCompactionScore = &v + _field = &v } + p.TabletMaxCompactionScore = _field return nil } - func (p *TReportRequest) ReadField9(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.StoragePolicy = make([]*agentservice.TStoragePolicy, 0, size) + _field := make([]*agentservice.TStoragePolicy, 0, size) + values := make([]agentservice.TStoragePolicy, size) for i := 0; i < size; i++ { - _elem := agentservice.NewTStoragePolicy() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.StoragePolicy = append(p.StoragePolicy, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.StoragePolicy = _field return nil } - func (p *TReportRequest) ReadField10(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Resource = make([]*agentservice.TStorageResource, 0, size) + _field := make([]*agentservice.TStorageResource, 0, size) + values := make([]agentservice.TStorageResource, size) for i := 0; i < size; i++ { - _elem := agentservice.NewTStorageResource() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Resource = append(p.Resource, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Resource = _field return nil } - func (p *TReportRequest) ReadField11(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumCores = v + _field = v } + p.NumCores = _field return nil } - func (p *TReportRequest) ReadField12(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.PipelineExecutorSize = v + _field = v + } + p.PipelineExecutorSize = _field + return nil +} +func (p *TReportRequest) ReadField13(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPartitionId]types.TVersion, size) + for i := 0; i < size; i++ { + var _key types.TPartitionId + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } + + var _val types.TVersion + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PartitionsVersion = _field + return nil +} +func (p *TReportRequest) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.NumTablets = _field return nil } @@ -4994,7 +5447,14 @@ func (p *TReportRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } - + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5058,11 +5518,9 @@ func (p *TReportRequest) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Tasks { - if err := oprot.WriteI32(int32(k)); err != nil { return err } - if err := oprot.WriteSetBegin(thrift.I64, len(v)); err != nil { return err } @@ -5110,11 +5568,9 @@ func (p *TReportRequest) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Tablets { - if err := oprot.WriteI64(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -5142,11 +5598,9 @@ func (p *TReportRequest) writeField5(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Disks { - if err := oprot.WriteString(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -5318,11 +5772,61 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TReportRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionsVersion() { + if err = oprot.WriteFieldBegin("partitions_version", thrift.MAP, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.PartitionsVersion)); err != nil { + return err + } + for k, v := range p.PartitionsVersion { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TReportRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetNumTablets() { + if err = oprot.WriteFieldBegin("num_tablets", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.NumTablets); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + func (p *TReportRequest) String() string { if p == nil { return "" } return fmt.Sprintf("TReportRequest(%+v)", *p) + } func (p *TReportRequest) DeepEqual(ano *TReportRequest) bool { @@ -5367,6 +5871,12 @@ func (p *TReportRequest) DeepEqual(ano *TReportRequest) bool { if !p.Field12DeepEqual(ano.PipelineExecutorSize) { return false } + if !p.Field13DeepEqual(ano.PartitionsVersion) { + return false + } + if !p.Field14DeepEqual(ano.NumTablets) { + return false + } return true } @@ -5511,6 +6021,31 @@ func (p *TReportRequest) Field12DeepEqual(src int32) bool { } return true } +func (p *TReportRequest) Field13DeepEqual(src map[types.TPartitionId]types.TVersion) bool { + + if len(p.PartitionsVersion) != len(src) { + return false + } + for k, v := range p.PartitionsVersion { + _src := src[k] + if v != _src { + return false + } + } + return true +} +func (p *TReportRequest) Field14DeepEqual(src *int64) bool { + + if p.NumTablets == src { + return true + } else if p.NumTablets == nil || src == nil { + return false + } + if *p.NumTablets != *src { + return false + } + return true +} type TMasterResult_ struct { Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` @@ -5521,7 +6056,6 @@ func NewTMasterResult_() *TMasterResult_ { } func (p *TMasterResult_) InitDefault() { - *p = TMasterResult_{} } var TMasterResult__Status_DEFAULT *status.TStatus @@ -5570,17 +6104,14 @@ func (p *TMasterResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5612,10 +6143,11 @@ RequiredFieldNotSetError: } func (p *TMasterResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } @@ -5629,7 +6161,6 @@ func (p *TMasterResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5670,6 +6201,7 @@ func (p *TMasterResult_) String() string { return "" } return fmt.Sprintf("TMasterResult_(%+v)", *p) + } func (p *TMasterResult_) DeepEqual(ano *TMasterResult_) bool { @@ -5701,7 +6233,6 @@ func NewTResourceGroup() *TResourceGroup { } func (p *TResourceGroup) InitDefault() { - *p = TResourceGroup{} } func (p *TResourceGroup) GetResourceByType() (v map[TResourceType]int32) { @@ -5741,17 +6272,14 @@ func (p *TResourceGroup) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResourceByType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5787,7 +6315,7 @@ func (p *TResourceGroup) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.ResourceByType = make(map[TResourceType]int32, size) + _field := make(map[TResourceType]int32, size) for i := 0; i < size; i++ { var _key TResourceType if v, err := iprot.ReadI32(); err != nil { @@ -5803,11 +6331,12 @@ func (p *TResourceGroup) ReadField1(iprot thrift.TProtocol) error { _val = v } - p.ResourceByType[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ResourceByType = _field return nil } @@ -5821,7 +6350,6 @@ func (p *TResourceGroup) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5848,11 +6376,9 @@ func (p *TResourceGroup) writeField1(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.ResourceByType { - if err := oprot.WriteI32(int32(k)); err != nil { return err } - if err := oprot.WriteI32(v); err != nil { return err } @@ -5875,6 +6401,7 @@ func (p *TResourceGroup) String() string { return "" } return fmt.Sprintf("TResourceGroup(%+v)", *p) + } func (p *TResourceGroup) DeepEqual(ano *TResourceGroup) bool { @@ -5913,7 +6440,6 @@ func NewTUserResource() *TUserResource { } func (p *TUserResource) InitDefault() { - *p = TUserResource{} } var TUserResource_Resource_DEFAULT *TResourceGroup @@ -5971,10 +6497,8 @@ func (p *TUserResource) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResource = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.MAP { @@ -5982,17 +6506,14 @@ func (p *TUserResource) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetShareByGroup = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6029,19 +6550,19 @@ RequiredFieldNotSetError: } func (p *TUserResource) ReadField1(iprot thrift.TProtocol) error { - p.Resource = NewTResourceGroup() - if err := p.Resource.Read(iprot); err != nil { + _field := NewTResourceGroup() + if err := _field.Read(iprot); err != nil { return err } + p.Resource = _field return nil } - func (p *TUserResource) ReadField2(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.ShareByGroup = make(map[string]int32, size) + _field := make(map[string]int32, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -6057,11 +6578,12 @@ func (p *TUserResource) ReadField2(iprot thrift.TProtocol) error { _val = v } - p.ShareByGroup[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ShareByGroup = _field return nil } @@ -6079,7 +6601,6 @@ func (p *TUserResource) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6123,11 +6644,9 @@ func (p *TUserResource) writeField2(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.ShareByGroup { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteI32(v); err != nil { return err } @@ -6150,6 +6669,7 @@ func (p *TUserResource) String() string { return "" } return fmt.Sprintf("TUserResource(%+v)", *p) + } func (p *TUserResource) DeepEqual(ano *TUserResource) bool { @@ -6199,7 +6719,6 @@ func NewTFetchResourceResult_() *TFetchResourceResult_ { } func (p *TFetchResourceResult_) InitDefault() { - *p = TFetchResourceResult_{} } func (p *TFetchResourceResult_) GetProtocolVersion() (v agentservice.TAgentServiceVersion) { @@ -6257,10 +6776,8 @@ func (p *TFetchResourceResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -6268,10 +6785,8 @@ func (p *TFetchResourceResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResourceVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { @@ -6279,17 +6794,14 @@ func (p *TFetchResourceResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResourceByUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6331,29 +6843,34 @@ RequiredFieldNotSetError: } func (p *TFetchResourceResult_) ReadField1(iprot thrift.TProtocol) error { + + var _field agentservice.TAgentServiceVersion if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ProtocolVersion = agentservice.TAgentServiceVersion(v) + _field = agentservice.TAgentServiceVersion(v) } + p.ProtocolVersion = _field return nil } - func (p *TFetchResourceResult_) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ResourceVersion = v + _field = v } + p.ResourceVersion = _field return nil } - func (p *TFetchResourceResult_) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.ResourceByUser = make(map[string]*TUserResource, size) + _field := make(map[string]*TUserResource, size) + values := make([]TUserResource, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -6361,16 +6878,19 @@ func (p *TFetchResourceResult_) ReadField3(iprot thrift.TProtocol) error { } else { _key = v } - _val := NewTUserResource() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.ResourceByUser[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ResourceByUser = _field return nil } @@ -6392,7 +6912,6 @@ func (p *TFetchResourceResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6453,11 +6972,9 @@ func (p *TFetchResourceResult_) writeField3(oprot thrift.TProtocol) (err error) return err } for k, v := range p.ResourceByUser { - if err := oprot.WriteString(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -6480,6 +6997,7 @@ func (p *TFetchResourceResult_) String() string { return "" } return fmt.Sprintf("TFetchResourceResult_(%+v)", *p) + } func (p *TFetchResourceResult_) DeepEqual(ano *TFetchResourceResult_) bool { diff --git a/pkg/rpc/kitex_gen/masterservice/k-MasterService.go b/pkg/rpc/kitex_gen/masterservice/k-MasterService.go index 8dce3314..e3f9de5b 100644 --- a/pkg/rpc/kitex_gen/masterservice/k-MasterService.go +++ b/pkg/rpc/kitex_gen/masterservice/k-MasterService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package masterservice @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" @@ -317,6 +318,34 @@ func (p *TTabletInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 21: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -519,7 +548,7 @@ func (p *TTabletInfo) FastReadField9(buf []byte) (int, error) { return offset, err } else { offset += l - p.VersionCount = &v + p.TotalVersionCount = &v } return offset, nil @@ -642,6 +671,32 @@ func (p *TTabletInfo) FastReadField20(buf []byte) (int, error) { return offset, nil } +func (p *TTabletInfo) FastReadField21(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.VisibleVersionCount = &v + + } + return offset, nil +} + +func (p *TTabletInfo) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsPersistent = &v + + } + return offset, nil +} + // for compatibility func (p *TTabletInfo) FastWrite(buf []byte) int { return 0 @@ -666,6 +721,8 @@ func (p *TTabletInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWri offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField16(buf[offset:], binaryWriter) offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField20(buf[offset:], binaryWriter) @@ -697,6 +754,8 @@ func (p *TTabletInfo) BLength() int { l += p.field16Length() l += p.field19Length() l += p.field20Length() + l += p.field21Length() + l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -789,9 +848,9 @@ func (p *TTabletInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWri func (p *TTabletInfo) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetVersionCount() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version_count", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.VersionCount) + if p.IsSetTotalVersionCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_version_count", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalVersionCount) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } @@ -896,6 +955,28 @@ func (p *TTabletInfo) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWr return offset } +func (p *TTabletInfo) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetVisibleVersionCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version_count", thrift.I64, 21) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.VisibleVersionCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTabletInfo) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsPersistent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_persistent", thrift.BOOL, 1000) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsPersistent) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TTabletInfo) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1) @@ -976,9 +1057,9 @@ func (p *TTabletInfo) field8Length() int { func (p *TTabletInfo) field9Length() int { l := 0 - if p.IsSetVersionCount() { - l += bthrift.Binary.FieldBeginLength("version_count", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.VersionCount) + if p.IsSetTotalVersionCount() { + l += bthrift.Binary.FieldBeginLength("total_version_count", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.TotalVersionCount) l += bthrift.Binary.FieldEndLength() } @@ -1083,6 +1164,28 @@ func (p *TTabletInfo) field20Length() int { return l } +func (p *TTabletInfo) field21Length() int { + l := 0 + if p.IsSetVisibleVersionCount() { + l += bthrift.Binary.FieldBeginLength("visible_version_count", thrift.I64, 21) + l += bthrift.Binary.I64Length(*p.VisibleVersionCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTabletInfo) field1000Length() int { + l := 0 + if p.IsSetIsPersistent() { + l += bthrift.Binary.FieldBeginLength("is_persistent", thrift.BOOL, 1000) + l += bthrift.Binary.BoolLength(*p.IsPersistent) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TFinishTaskRequest) FastRead(buf []byte) (int, error) { var err error var offset int @@ -1365,6 +1468,34 @@ func (p *TFinishTaskRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 19: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -1786,7 +1917,7 @@ func (p *TFinishTaskRequest) FastReadField18(buf []byte) (int, error) { if err != nil { return offset, err } - p.TabletIdToDeltaNumRows = make(map[int64]int64, size) + p.TableIdToDeltaNumRows = make(map[int64]int64, size) for i := 0; i < size; i++ { var _key int64 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { @@ -1808,7 +1939,72 @@ func (p *TFinishTaskRequest) FastReadField18(buf []byte) (int, error) { } - p.TabletIdToDeltaNumRows[_key] = _val + p.TableIdToDeltaNumRows[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFinishTaskRequest) FastReadField19(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TableIdToTabletIdToDeltaNumRows = make(map[int64]map[int64]int64, size) + for i := 0; i < size; i++ { + var _key int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _val := make(map[int64]int64, size) + for i := 0; i < size; i++ { + var _key1 int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key1 = v + + } + + var _val1 int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val1 = v + + } + + _val[_key1] = _val1 + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TableIdToTabletIdToDeltaNumRows[_key] = _val } if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err @@ -1818,6 +2014,33 @@ func (p *TFinishTaskRequest) FastReadField18(buf []byte) (int, error) { return offset, nil } +func (p *TFinishTaskRequest) FastReadField20(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.RespPartitions = make([]*agentservice.TCalcDeleteBitmapPartitionInfo, 0, size) + for i := 0; i < size; i++ { + _elem := agentservice.NewTCalcDeleteBitmapPartitionInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.RespPartitions = append(p.RespPartitions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TFinishTaskRequest) FastWrite(buf []byte) int { return 0 @@ -1845,6 +2068,8 @@ func (p *TFinishTaskRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bi offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField17(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1873,6 +2098,8 @@ func (p *TFinishTaskRequest) BLength() int { l += p.field16Length() l += p.field17Length() l += p.field18Length() + l += p.field19Length() + l += p.field20Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -2119,12 +2346,12 @@ func (p *TFinishTaskRequest) fastWriteField17(buf []byte, binaryWriter bthrift.B func (p *TFinishTaskRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTabletIdToDeltaNumRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id_to_delta_num_rows", thrift.MAP, 18) + if p.IsSetTableIdToDeltaNumRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id_to_delta_num_rows", thrift.MAP, 18) mapBeginOffset := offset offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0) var length int - for k, v := range p.TabletIdToDeltaNumRows { + for k, v := range p.TableIdToDeltaNumRows { length++ offset += bthrift.Binary.WriteI64(buf[offset:], k) @@ -2139,6 +2366,57 @@ func (p *TFinishTaskRequest) fastWriteField18(buf []byte, binaryWriter bthrift.B return offset } +func (p *TFinishTaskRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableIdToTabletIdToDeltaNumRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id_to_tablet_id_to_delta_num_rows", thrift.MAP, 19) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.MAP, 0) + var length int + for k, v := range p.TableIdToTabletIdToDeltaNumRows { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0) + var length int + for k, v := range v { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.I64, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.MAP, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFinishTaskRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRespPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resp_partitions", thrift.LIST, 20) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.RespPartitions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TFinishTaskRequest) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("backend", thrift.STRUCT, 1) @@ -2343,18 +2621,53 @@ func (p *TFinishTaskRequest) field17Length() int { func (p *TFinishTaskRequest) field18Length() int { l := 0 - if p.IsSetTabletIdToDeltaNumRows() { - l += bthrift.Binary.FieldBeginLength("tablet_id_to_delta_num_rows", thrift.MAP, 18) - l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.TabletIdToDeltaNumRows)) + if p.IsSetTableIdToDeltaNumRows() { + l += bthrift.Binary.FieldBeginLength("table_id_to_delta_num_rows", thrift.MAP, 18) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.TableIdToDeltaNumRows)) var tmpK int64 var tmpV int64 - l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.TabletIdToDeltaNumRows) + l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.TableIdToDeltaNumRows) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFinishTaskRequest) field19Length() int { + l := 0 + if p.IsSetTableIdToTabletIdToDeltaNumRows() { + l += bthrift.Binary.FieldBeginLength("table_id_to_tablet_id_to_delta_num_rows", thrift.MAP, 19) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.MAP, len(p.TableIdToTabletIdToDeltaNumRows)) + for k, v := range p.TableIdToTabletIdToDeltaNumRows { + + l += bthrift.Binary.I64Length(k) + + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(v)) + var tmpK int64 + var tmpV int64 + l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(v) + l += bthrift.Binary.MapEndLength() + } l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } +func (p *TFinishTaskRequest) field20Length() int { + l := 0 + if p.IsSetRespPartitions() { + l += bthrift.Binary.FieldBeginLength("resp_partitions", thrift.LIST, 20) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.RespPartitions)) + for _, v := range p.RespPartitions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TTablet) FastRead(buf []byte) (int, error) { var err error var offset int @@ -3461,6 +3774,34 @@ func (p *TReportRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 13: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -3790,6 +4131,59 @@ func (p *TReportRequest) FastReadField12(buf []byte) (int, error) { return offset, nil } +func (p *TReportRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionsVersion = make(map[types.TPartitionId]types.TVersion, size) + for i := 0; i < size; i++ { + var _key types.TPartitionId + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val types.TVersion + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PartitionsVersion[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TReportRequest) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumTablets = &v + + } + return offset, nil +} + // for compatibility func (p *TReportRequest) FastWrite(buf []byte) int { return 0 @@ -3804,6 +4198,7 @@ func (p *TReportRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) @@ -3811,6 +4206,7 @@ func (p *TReportRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -3833,6 +4229,8 @@ func (p *TReportRequest) BLength() int { l += p.field10Length() l += p.field11Length() l += p.field12Length() + l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -4037,6 +4435,39 @@ func (p *TReportRequest) fastWriteField12(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TReportRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionsVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions_version", thrift.MAP, 13) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0) + var length int + for k, v := range p.PartitionsVersion { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.I64, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TReportRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumTablets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_tablets", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.NumTablets) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TReportRequest) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("backend", thrift.STRUCT, 1) @@ -4205,6 +4636,31 @@ func (p *TReportRequest) field12Length() int { return l } +func (p *TReportRequest) field13Length() int { + l := 0 + if p.IsSetPartitionsVersion() { + l += bthrift.Binary.FieldBeginLength("partitions_version", thrift.MAP, 13) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.PartitionsVersion)) + var tmpK types.TPartitionId + var tmpV types.TVersion + l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.PartitionsVersion) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TReportRequest) field14Length() int { + l := 0 + if p.IsSetNumTablets() { + l += bthrift.Binary.FieldBeginLength("num_tablets", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.NumTablets) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMasterResult_) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/metrics/Metrics.go b/pkg/rpc/kitex_gen/metrics/Metrics.go index b78e0636..12fa1e80 100644 --- a/pkg/rpc/kitex_gen/metrics/Metrics.go +++ b/pkg/rpc/kitex_gen/metrics/Metrics.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package metrics diff --git a/pkg/rpc/kitex_gen/metrics/k-Metrics.go b/pkg/rpc/kitex_gen/metrics/k-Metrics.go index f730ac4a..2e0133d3 100644 --- a/pkg/rpc/kitex_gen/metrics/k-Metrics.go +++ b/pkg/rpc/kitex_gen/metrics/k-Metrics.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package metrics diff --git a/pkg/rpc/kitex_gen/opcodes/Opcodes.go b/pkg/rpc/kitex_gen/opcodes/Opcodes.go index 17ffaf0d..e783c52c 100644 --- a/pkg/rpc/kitex_gen/opcodes/Opcodes.go +++ b/pkg/rpc/kitex_gen/opcodes/Opcodes.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package opcodes @@ -86,6 +86,9 @@ const ( TExprOpcode_MATCH_ELEMENT_GT TExprOpcode = 72 TExprOpcode_MATCH_ELEMENT_LE TExprOpcode = 73 TExprOpcode_MATCH_ELEMENT_GE TExprOpcode = 74 + TExprOpcode_MATCH_PHRASE_PREFIX TExprOpcode = 75 + TExprOpcode_MATCH_REGEXP TExprOpcode = 76 + TExprOpcode_MATCH_PHRASE_EDGE TExprOpcode = 77 ) func (p TExprOpcode) String() string { @@ -240,6 +243,12 @@ func (p TExprOpcode) String() string { return "MATCH_ELEMENT_LE" case TExprOpcode_MATCH_ELEMENT_GE: return "MATCH_ELEMENT_GE" + case TExprOpcode_MATCH_PHRASE_PREFIX: + return "MATCH_PHRASE_PREFIX" + case TExprOpcode_MATCH_REGEXP: + return "MATCH_REGEXP" + case TExprOpcode_MATCH_PHRASE_EDGE: + return "MATCH_PHRASE_EDGE" } return "" } @@ -396,6 +405,12 @@ func TExprOpcodeFromString(s string) (TExprOpcode, error) { return TExprOpcode_MATCH_ELEMENT_LE, nil case "MATCH_ELEMENT_GE": return TExprOpcode_MATCH_ELEMENT_GE, nil + case "MATCH_PHRASE_PREFIX": + return TExprOpcode_MATCH_PHRASE_PREFIX, nil + case "MATCH_REGEXP": + return TExprOpcode_MATCH_REGEXP, nil + case "MATCH_PHRASE_EDGE": + return TExprOpcode_MATCH_PHRASE_EDGE, nil } return TExprOpcode(0), fmt.Errorf("not a valid TExprOpcode string") } diff --git a/pkg/rpc/kitex_gen/opcodes/k-Opcodes.go b/pkg/rpc/kitex_gen/opcodes/k-Opcodes.go index b23ff5dd..93e97ce6 100644 --- a/pkg/rpc/kitex_gen/opcodes/k-Opcodes.go +++ b/pkg/rpc/kitex_gen/opcodes/k-Opcodes.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package opcodes diff --git a/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go b/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go index 24ee46fe..21d37ce0 100644 --- a/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go +++ b/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package palointernalservice @@ -164,6 +164,48 @@ func (p *TPrefetchMode) Value() (driver.Value, error) { return int64(*p), nil } +type TSerdeDialect int64 + +const ( + TSerdeDialect_DORIS TSerdeDialect = 0 + TSerdeDialect_PRESTO TSerdeDialect = 1 +) + +func (p TSerdeDialect) String() string { + switch p { + case TSerdeDialect_DORIS: + return "DORIS" + case TSerdeDialect_PRESTO: + return "PRESTO" + } + return "" +} + +func TSerdeDialectFromString(s string) (TSerdeDialect, error) { + switch s { + case "DORIS": + return TSerdeDialect_DORIS, nil + case "PRESTO": + return TSerdeDialect_PRESTO, nil + } + return TSerdeDialect(0), fmt.Errorf("not a valid TSerdeDialect string") +} + +func TSerdeDialectPtr(v TSerdeDialect) *TSerdeDialect { return &v } +func (p *TSerdeDialect) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TSerdeDialect(result.Int64) + return +} + +func (p *TSerdeDialect) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type PaloInternalServiceVersion int64 const ( @@ -203,6 +245,58 @@ func (p *PaloInternalServiceVersion) Value() (driver.Value, error) { return int64(*p), nil } +type TCompoundType int64 + +const ( + TCompoundType_UNKNOWN TCompoundType = 0 + TCompoundType_AND TCompoundType = 1 + TCompoundType_OR TCompoundType = 2 + TCompoundType_NOT TCompoundType = 3 +) + +func (p TCompoundType) String() string { + switch p { + case TCompoundType_UNKNOWN: + return "UNKNOWN" + case TCompoundType_AND: + return "AND" + case TCompoundType_OR: + return "OR" + case TCompoundType_NOT: + return "NOT" + } + return "" +} + +func TCompoundTypeFromString(s string) (TCompoundType, error) { + switch s { + case "UNKNOWN": + return TCompoundType_UNKNOWN, nil + case "AND": + return TCompoundType_AND, nil + case "OR": + return TCompoundType_OR, nil + case "NOT": + return TCompoundType_NOT, nil + } + return TCompoundType(0), fmt.Errorf("not a valid TCompoundType string") +} + +func TCompoundTypePtr(v TCompoundType) *TCompoundType { return &v } +func (p *TCompoundType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TCompoundType(result.Int64) + return +} + +func (p *TCompoundType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TMysqlErrorHubInfo struct { Host string `thrift:"host,1,required" frugal:"1,required,string" json:"host"` Port int32 `thrift:"port,2,required" frugal:"2,required,i32" json:"port"` @@ -217,7 +311,6 @@ func NewTMysqlErrorHubInfo() *TMysqlErrorHubInfo { } func (p *TMysqlErrorHubInfo) InitDefault() { - *p = TMysqlErrorHubInfo{} } func (p *TMysqlErrorHubInfo) GetHost() (v string) { @@ -302,10 +395,8 @@ func (p *TMysqlErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHost = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -313,10 +404,8 @@ func (p *TMysqlErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -324,10 +413,8 @@ func (p *TMysqlErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -335,10 +422,8 @@ func (p *TMysqlErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPasswd = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { @@ -346,10 +431,8 @@ func (p *TMysqlErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDb = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { @@ -357,17 +440,14 @@ func (p *TMysqlErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -424,56 +504,69 @@ RequiredFieldNotSetError: } func (p *TMysqlErrorHubInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = v + _field = v } + p.Host = _field return nil } - func (p *TMysqlErrorHubInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Port = v + _field = v } + p.Port = _field return nil } - func (p *TMysqlErrorHubInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TMysqlErrorHubInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Passwd = v + _field = v } + p.Passwd = _field return nil } - func (p *TMysqlErrorHubInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = v + _field = v } + p.Db = _field return nil } - func (p *TMysqlErrorHubInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = v + _field = v } + p.Table = _field return nil } @@ -507,7 +600,6 @@ func (p *TMysqlErrorHubInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -633,6 +725,7 @@ func (p *TMysqlErrorHubInfo) String() string { return "" } return fmt.Sprintf("TMysqlErrorHubInfo(%+v)", *p) + } func (p *TMysqlErrorHubInfo) DeepEqual(ano *TMysqlErrorHubInfo) bool { @@ -716,7 +809,6 @@ func NewTBrokerErrorHubInfo() *TBrokerErrorHubInfo { } func (p *TBrokerErrorHubInfo) InitDefault() { - *p = TBrokerErrorHubInfo{} } var TBrokerErrorHubInfo_BrokerAddr_DEFAULT *types.TNetworkAddress @@ -783,10 +875,8 @@ func (p *TBrokerErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBrokerAddr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -794,10 +884,8 @@ func (p *TBrokerErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { @@ -805,17 +893,14 @@ func (p *TBrokerErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetProp = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -857,28 +942,30 @@ RequiredFieldNotSetError: } func (p *TBrokerErrorHubInfo) ReadField1(iprot thrift.TProtocol) error { - p.BrokerAddr = types.NewTNetworkAddress() - if err := p.BrokerAddr.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerAddr = _field return nil } - func (p *TBrokerErrorHubInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Path = v + _field = v } + p.Path = _field return nil } - func (p *TBrokerErrorHubInfo) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Prop = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -894,11 +981,12 @@ func (p *TBrokerErrorHubInfo) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.Prop[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Prop = _field return nil } @@ -920,7 +1008,6 @@ func (p *TBrokerErrorHubInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -981,11 +1068,9 @@ func (p *TBrokerErrorHubInfo) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Prop { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -1008,6 +1093,7 @@ func (p *TBrokerErrorHubInfo) String() string { return "" } return fmt.Sprintf("TBrokerErrorHubInfo(%+v)", *p) + } func (p *TBrokerErrorHubInfo) DeepEqual(ano *TBrokerErrorHubInfo) bool { @@ -1070,10 +1156,7 @@ func NewTLoadErrorHubInfo() *TLoadErrorHubInfo { } func (p *TLoadErrorHubInfo) InitDefault() { - *p = TLoadErrorHubInfo{ - - Type: TErrorHubType_NULL_TYPE, - } + p.Type = TErrorHubType_NULL_TYPE } func (p *TLoadErrorHubInfo) GetType() (v TErrorHubType) { @@ -1147,37 +1230,30 @@ func (p *TLoadErrorHubInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1209,27 +1285,30 @@ RequiredFieldNotSetError: } func (p *TLoadErrorHubInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field TErrorHubType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TErrorHubType(v) + _field = TErrorHubType(v) } + p.Type = _field return nil } - func (p *TLoadErrorHubInfo) ReadField2(iprot thrift.TProtocol) error { - p.MysqlInfo = NewTMysqlErrorHubInfo() - if err := p.MysqlInfo.Read(iprot); err != nil { + _field := NewTMysqlErrorHubInfo() + if err := _field.Read(iprot); err != nil { return err } + p.MysqlInfo = _field return nil } - func (p *TLoadErrorHubInfo) ReadField3(iprot thrift.TProtocol) error { - p.BrokerInfo = NewTBrokerErrorHubInfo() - if err := p.BrokerInfo.Read(iprot); err != nil { + _field := NewTBrokerErrorHubInfo() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerInfo = _field return nil } @@ -1251,7 +1330,6 @@ func (p *TLoadErrorHubInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1330,6 +1408,7 @@ func (p *TLoadErrorHubInfo) String() string { return "" } return fmt.Sprintf("TLoadErrorHubInfo(%+v)", *p) + } func (p *TLoadErrorHubInfo) DeepEqual(ano *TLoadErrorHubInfo) bool { @@ -1381,7 +1460,6 @@ func NewTResourceLimit() *TResourceLimit { } func (p *TResourceLimit) InitDefault() { - *p = TResourceLimit{} } var TResourceLimit_CpuLimit_DEFAULT int32 @@ -1428,17 +1506,14 @@ func (p *TResourceLimit) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1464,11 +1539,14 @@ ReadStructEndError: } func (p *TResourceLimit) ReadField1(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.CpuLimit = &v + _field = &v } + p.CpuLimit = _field return nil } @@ -1482,7 +1560,6 @@ func (p *TResourceLimit) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1525,6 +1602,7 @@ func (p *TResourceLimit) String() string { return "" } return fmt.Sprintf("TResourceLimit(%+v)", *p) + } func (p *TResourceLimit) DeepEqual(ano *TResourceLimit) bool { @@ -1553,229 +1631,392 @@ func (p *TResourceLimit) Field1DeepEqual(src *int32) bool { } type TQueryOptions struct { - AbortOnError bool `thrift:"abort_on_error,1,optional" frugal:"1,optional,bool" json:"abort_on_error,omitempty"` - MaxErrors int32 `thrift:"max_errors,2,optional" frugal:"2,optional,i32" json:"max_errors,omitempty"` - DisableCodegen bool `thrift:"disable_codegen,3,optional" frugal:"3,optional,bool" json:"disable_codegen,omitempty"` - BatchSize int32 `thrift:"batch_size,4,optional" frugal:"4,optional,i32" json:"batch_size,omitempty"` - NumNodes int32 `thrift:"num_nodes,5,optional" frugal:"5,optional,i32" json:"num_nodes,omitempty"` - MaxScanRangeLength int64 `thrift:"max_scan_range_length,6,optional" frugal:"6,optional,i64" json:"max_scan_range_length,omitempty"` - NumScannerThreads int32 `thrift:"num_scanner_threads,7,optional" frugal:"7,optional,i32" json:"num_scanner_threads,omitempty"` - MaxIoBuffers int32 `thrift:"max_io_buffers,8,optional" frugal:"8,optional,i32" json:"max_io_buffers,omitempty"` - AllowUnsupportedFormats bool `thrift:"allow_unsupported_formats,9,optional" frugal:"9,optional,bool" json:"allow_unsupported_formats,omitempty"` - DefaultOrderByLimit int64 `thrift:"default_order_by_limit,10,optional" frugal:"10,optional,i64" json:"default_order_by_limit,omitempty"` - MemLimit int64 `thrift:"mem_limit,12,optional" frugal:"12,optional,i64" json:"mem_limit,omitempty"` - AbortOnDefaultLimitExceeded bool `thrift:"abort_on_default_limit_exceeded,13,optional" frugal:"13,optional,bool" json:"abort_on_default_limit_exceeded,omitempty"` - QueryTimeout int32 `thrift:"query_timeout,14,optional" frugal:"14,optional,i32" json:"query_timeout,omitempty"` - IsReportSuccess bool `thrift:"is_report_success,15,optional" frugal:"15,optional,bool" json:"is_report_success,omitempty"` - CodegenLevel int32 `thrift:"codegen_level,16,optional" frugal:"16,optional,i32" json:"codegen_level,omitempty"` - KuduLatestObservedTs int64 `thrift:"kudu_latest_observed_ts,17,optional" frugal:"17,optional,i64" json:"kudu_latest_observed_ts,omitempty"` - QueryType TQueryType `thrift:"query_type,18,optional" frugal:"18,optional,TQueryType" json:"query_type,omitempty"` - MinReservation int64 `thrift:"min_reservation,19,optional" frugal:"19,optional,i64" json:"min_reservation,omitempty"` - MaxReservation int64 `thrift:"max_reservation,20,optional" frugal:"20,optional,i64" json:"max_reservation,omitempty"` - InitialReservationTotalClaims int64 `thrift:"initial_reservation_total_claims,21,optional" frugal:"21,optional,i64" json:"initial_reservation_total_claims,omitempty"` - BufferPoolLimit int64 `thrift:"buffer_pool_limit,22,optional" frugal:"22,optional,i64" json:"buffer_pool_limit,omitempty"` - DefaultSpillableBufferSize int64 `thrift:"default_spillable_buffer_size,23,optional" frugal:"23,optional,i64" json:"default_spillable_buffer_size,omitempty"` - MinSpillableBufferSize int64 `thrift:"min_spillable_buffer_size,24,optional" frugal:"24,optional,i64" json:"min_spillable_buffer_size,omitempty"` - MaxRowSize int64 `thrift:"max_row_size,25,optional" frugal:"25,optional,i64" json:"max_row_size,omitempty"` - DisableStreamPreaggregations bool `thrift:"disable_stream_preaggregations,26,optional" frugal:"26,optional,bool" json:"disable_stream_preaggregations,omitempty"` - MtDop int32 `thrift:"mt_dop,27,optional" frugal:"27,optional,i32" json:"mt_dop,omitempty"` - LoadMemLimit int64 `thrift:"load_mem_limit,28,optional" frugal:"28,optional,i64" json:"load_mem_limit,omitempty"` - MaxScanKeyNum *int32 `thrift:"max_scan_key_num,29,optional" frugal:"29,optional,i32" json:"max_scan_key_num,omitempty"` - MaxPushdownConditionsPerColumn *int32 `thrift:"max_pushdown_conditions_per_column,30,optional" frugal:"30,optional,i32" json:"max_pushdown_conditions_per_column,omitempty"` - EnableSpilling bool `thrift:"enable_spilling,31,optional" frugal:"31,optional,bool" json:"enable_spilling,omitempty"` - EnableEnableExchangeNodeParallelMerge bool `thrift:"enable_enable_exchange_node_parallel_merge,32,optional" frugal:"32,optional,bool" json:"enable_enable_exchange_node_parallel_merge,omitempty"` - RuntimeFilterWaitTimeMs int32 `thrift:"runtime_filter_wait_time_ms,33,optional" frugal:"33,optional,i32" json:"runtime_filter_wait_time_ms,omitempty"` - RuntimeFilterMaxInNum int32 `thrift:"runtime_filter_max_in_num,34,optional" frugal:"34,optional,i32" json:"runtime_filter_max_in_num,omitempty"` - ResourceLimit *TResourceLimit `thrift:"resource_limit,42,optional" frugal:"42,optional,TResourceLimit" json:"resource_limit,omitempty"` - ReturnObjectDataAsBinary bool `thrift:"return_object_data_as_binary,43,optional" frugal:"43,optional,bool" json:"return_object_data_as_binary,omitempty"` - TrimTailingSpacesForExternalTableQuery bool `thrift:"trim_tailing_spaces_for_external_table_query,44,optional" frugal:"44,optional,bool" json:"trim_tailing_spaces_for_external_table_query,omitempty"` - EnableFunctionPushdown *bool `thrift:"enable_function_pushdown,45,optional" frugal:"45,optional,bool" json:"enable_function_pushdown,omitempty"` - FragmentTransmissionCompressionCodec *string `thrift:"fragment_transmission_compression_codec,46,optional" frugal:"46,optional,string" json:"fragment_transmission_compression_codec,omitempty"` - EnableLocalExchange *bool `thrift:"enable_local_exchange,48,optional" frugal:"48,optional,bool" json:"enable_local_exchange,omitempty"` - SkipStorageEngineMerge bool `thrift:"skip_storage_engine_merge,49,optional" frugal:"49,optional,bool" json:"skip_storage_engine_merge,omitempty"` - SkipDeletePredicate bool `thrift:"skip_delete_predicate,50,optional" frugal:"50,optional,bool" json:"skip_delete_predicate,omitempty"` - EnableNewShuffleHashMethod *bool `thrift:"enable_new_shuffle_hash_method,51,optional" frugal:"51,optional,bool" json:"enable_new_shuffle_hash_method,omitempty"` - BeExecVersion int32 `thrift:"be_exec_version,52,optional" frugal:"52,optional,i32" json:"be_exec_version,omitempty"` - PartitionedHashJoinRowsThreshold int32 `thrift:"partitioned_hash_join_rows_threshold,53,optional" frugal:"53,optional,i32" json:"partitioned_hash_join_rows_threshold,omitempty"` - EnableShareHashTableForBroadcastJoin *bool `thrift:"enable_share_hash_table_for_broadcast_join,54,optional" frugal:"54,optional,bool" json:"enable_share_hash_table_for_broadcast_join,omitempty"` - CheckOverflowForDecimal bool `thrift:"check_overflow_for_decimal,55,optional" frugal:"55,optional,bool" json:"check_overflow_for_decimal,omitempty"` - SkipDeleteBitmap bool `thrift:"skip_delete_bitmap,56,optional" frugal:"56,optional,bool" json:"skip_delete_bitmap,omitempty"` - EnablePipelineEngine bool `thrift:"enable_pipeline_engine,57,optional" frugal:"57,optional,bool" json:"enable_pipeline_engine,omitempty"` - RepeatMaxNum int32 `thrift:"repeat_max_num,58,optional" frugal:"58,optional,i32" json:"repeat_max_num,omitempty"` - ExternalSortBytesThreshold int64 `thrift:"external_sort_bytes_threshold,59,optional" frugal:"59,optional,i64" json:"external_sort_bytes_threshold,omitempty"` - PartitionedHashAggRowsThreshold int32 `thrift:"partitioned_hash_agg_rows_threshold,60,optional" frugal:"60,optional,i32" json:"partitioned_hash_agg_rows_threshold,omitempty"` - EnableFileCache bool `thrift:"enable_file_cache,61,optional" frugal:"61,optional,bool" json:"enable_file_cache,omitempty"` - InsertTimeout int32 `thrift:"insert_timeout,62,optional" frugal:"62,optional,i32" json:"insert_timeout,omitempty"` - ExecutionTimeout int32 `thrift:"execution_timeout,63,optional" frugal:"63,optional,i32" json:"execution_timeout,omitempty"` - DryRunQuery bool `thrift:"dry_run_query,64,optional" frugal:"64,optional,bool" json:"dry_run_query,omitempty"` - EnableCommonExprPushdown bool `thrift:"enable_common_expr_pushdown,65,optional" frugal:"65,optional,bool" json:"enable_common_expr_pushdown,omitempty"` - ParallelInstance int32 `thrift:"parallel_instance,66,optional" frugal:"66,optional,i32" json:"parallel_instance,omitempty"` - MysqlRowBinaryFormat bool `thrift:"mysql_row_binary_format,67,optional" frugal:"67,optional,bool" json:"mysql_row_binary_format,omitempty"` - ExternalAggBytesThreshold int64 `thrift:"external_agg_bytes_threshold,68,optional" frugal:"68,optional,i64" json:"external_agg_bytes_threshold,omitempty"` - ExternalAggPartitionBits int32 `thrift:"external_agg_partition_bits,69,optional" frugal:"69,optional,i32" json:"external_agg_partition_bits,omitempty"` - FileCacheBasePath *string `thrift:"file_cache_base_path,70,optional" frugal:"70,optional,string" json:"file_cache_base_path,omitempty"` - EnableParquetLazyMat bool `thrift:"enable_parquet_lazy_mat,71,optional" frugal:"71,optional,bool" json:"enable_parquet_lazy_mat,omitempty"` - EnableOrcLazyMat bool `thrift:"enable_orc_lazy_mat,72,optional" frugal:"72,optional,bool" json:"enable_orc_lazy_mat,omitempty"` - ScanQueueMemLimit *int64 `thrift:"scan_queue_mem_limit,73,optional" frugal:"73,optional,i64" json:"scan_queue_mem_limit,omitempty"` - EnableScanNodeRunSerial bool `thrift:"enable_scan_node_run_serial,74,optional" frugal:"74,optional,bool" json:"enable_scan_node_run_serial,omitempty"` - EnableInsertStrict bool `thrift:"enable_insert_strict,75,optional" frugal:"75,optional,bool" json:"enable_insert_strict,omitempty"` - EnableInvertedIndexQuery bool `thrift:"enable_inverted_index_query,76,optional" frugal:"76,optional,bool" json:"enable_inverted_index_query,omitempty"` - TruncateCharOrVarcharColumns bool `thrift:"truncate_char_or_varchar_columns,77,optional" frugal:"77,optional,bool" json:"truncate_char_or_varchar_columns,omitempty"` - EnableHashJoinEarlyStartProbe bool `thrift:"enable_hash_join_early_start_probe,78,optional" frugal:"78,optional,bool" json:"enable_hash_join_early_start_probe,omitempty"` - EnablePipelineXEngine bool `thrift:"enable_pipeline_x_engine,79,optional" frugal:"79,optional,bool" json:"enable_pipeline_x_engine,omitempty"` - EnableMemtableOnSinkNode bool `thrift:"enable_memtable_on_sink_node,80,optional" frugal:"80,optional,bool" json:"enable_memtable_on_sink_node,omitempty"` - EnableDeleteSubPredicateV2 bool `thrift:"enable_delete_sub_predicate_v2,81,optional" frugal:"81,optional,bool" json:"enable_delete_sub_predicate_v2,omitempty"` - FeProcessUuid int64 `thrift:"fe_process_uuid,82,optional" frugal:"82,optional,i64" json:"fe_process_uuid,omitempty"` - InvertedIndexConjunctionOptThreshold int32 `thrift:"inverted_index_conjunction_opt_threshold,83,optional" frugal:"83,optional,i32" json:"inverted_index_conjunction_opt_threshold,omitempty"` - EnableProfile bool `thrift:"enable_profile,84,optional" frugal:"84,optional,bool" json:"enable_profile,omitempty"` - EnablePageCache bool `thrift:"enable_page_cache,85,optional" frugal:"85,optional,bool" json:"enable_page_cache,omitempty"` - AnalyzeTimeout int32 `thrift:"analyze_timeout,86,optional" frugal:"86,optional,i32" json:"analyze_timeout,omitempty"` + AbortOnError bool `thrift:"abort_on_error,1,optional" frugal:"1,optional,bool" json:"abort_on_error,omitempty"` + MaxErrors int32 `thrift:"max_errors,2,optional" frugal:"2,optional,i32" json:"max_errors,omitempty"` + DisableCodegen bool `thrift:"disable_codegen,3,optional" frugal:"3,optional,bool" json:"disable_codegen,omitempty"` + BatchSize int32 `thrift:"batch_size,4,optional" frugal:"4,optional,i32" json:"batch_size,omitempty"` + NumNodes int32 `thrift:"num_nodes,5,optional" frugal:"5,optional,i32" json:"num_nodes,omitempty"` + MaxScanRangeLength int64 `thrift:"max_scan_range_length,6,optional" frugal:"6,optional,i64" json:"max_scan_range_length,omitempty"` + NumScannerThreads int32 `thrift:"num_scanner_threads,7,optional" frugal:"7,optional,i32" json:"num_scanner_threads,omitempty"` + MaxIoBuffers int32 `thrift:"max_io_buffers,8,optional" frugal:"8,optional,i32" json:"max_io_buffers,omitempty"` + AllowUnsupportedFormats bool `thrift:"allow_unsupported_formats,9,optional" frugal:"9,optional,bool" json:"allow_unsupported_formats,omitempty"` + DefaultOrderByLimit int64 `thrift:"default_order_by_limit,10,optional" frugal:"10,optional,i64" json:"default_order_by_limit,omitempty"` + MemLimit int64 `thrift:"mem_limit,12,optional" frugal:"12,optional,i64" json:"mem_limit,omitempty"` + AbortOnDefaultLimitExceeded bool `thrift:"abort_on_default_limit_exceeded,13,optional" frugal:"13,optional,bool" json:"abort_on_default_limit_exceeded,omitempty"` + QueryTimeout int32 `thrift:"query_timeout,14,optional" frugal:"14,optional,i32" json:"query_timeout,omitempty"` + IsReportSuccess bool `thrift:"is_report_success,15,optional" frugal:"15,optional,bool" json:"is_report_success,omitempty"` + CodegenLevel int32 `thrift:"codegen_level,16,optional" frugal:"16,optional,i32" json:"codegen_level,omitempty"` + KuduLatestObservedTs int64 `thrift:"kudu_latest_observed_ts,17,optional" frugal:"17,optional,i64" json:"kudu_latest_observed_ts,omitempty"` + QueryType TQueryType `thrift:"query_type,18,optional" frugal:"18,optional,TQueryType" json:"query_type,omitempty"` + MinReservation int64 `thrift:"min_reservation,19,optional" frugal:"19,optional,i64" json:"min_reservation,omitempty"` + MaxReservation int64 `thrift:"max_reservation,20,optional" frugal:"20,optional,i64" json:"max_reservation,omitempty"` + InitialReservationTotalClaims int64 `thrift:"initial_reservation_total_claims,21,optional" frugal:"21,optional,i64" json:"initial_reservation_total_claims,omitempty"` + BufferPoolLimit int64 `thrift:"buffer_pool_limit,22,optional" frugal:"22,optional,i64" json:"buffer_pool_limit,omitempty"` + DefaultSpillableBufferSize int64 `thrift:"default_spillable_buffer_size,23,optional" frugal:"23,optional,i64" json:"default_spillable_buffer_size,omitempty"` + MinSpillableBufferSize int64 `thrift:"min_spillable_buffer_size,24,optional" frugal:"24,optional,i64" json:"min_spillable_buffer_size,omitempty"` + MaxRowSize int64 `thrift:"max_row_size,25,optional" frugal:"25,optional,i64" json:"max_row_size,omitempty"` + DisableStreamPreaggregations bool `thrift:"disable_stream_preaggregations,26,optional" frugal:"26,optional,bool" json:"disable_stream_preaggregations,omitempty"` + MtDop int32 `thrift:"mt_dop,27,optional" frugal:"27,optional,i32" json:"mt_dop,omitempty"` + LoadMemLimit int64 `thrift:"load_mem_limit,28,optional" frugal:"28,optional,i64" json:"load_mem_limit,omitempty"` + MaxScanKeyNum *int32 `thrift:"max_scan_key_num,29,optional" frugal:"29,optional,i32" json:"max_scan_key_num,omitempty"` + MaxPushdownConditionsPerColumn *int32 `thrift:"max_pushdown_conditions_per_column,30,optional" frugal:"30,optional,i32" json:"max_pushdown_conditions_per_column,omitempty"` + EnableSpilling bool `thrift:"enable_spilling,31,optional" frugal:"31,optional,bool" json:"enable_spilling,omitempty"` + EnableEnableExchangeNodeParallelMerge bool `thrift:"enable_enable_exchange_node_parallel_merge,32,optional" frugal:"32,optional,bool" json:"enable_enable_exchange_node_parallel_merge,omitempty"` + RuntimeFilterWaitTimeMs int32 `thrift:"runtime_filter_wait_time_ms,33,optional" frugal:"33,optional,i32" json:"runtime_filter_wait_time_ms,omitempty"` + RuntimeFilterMaxInNum int32 `thrift:"runtime_filter_max_in_num,34,optional" frugal:"34,optional,i32" json:"runtime_filter_max_in_num,omitempty"` + ResourceLimit *TResourceLimit `thrift:"resource_limit,42,optional" frugal:"42,optional,TResourceLimit" json:"resource_limit,omitempty"` + ReturnObjectDataAsBinary bool `thrift:"return_object_data_as_binary,43,optional" frugal:"43,optional,bool" json:"return_object_data_as_binary,omitempty"` + TrimTailingSpacesForExternalTableQuery bool `thrift:"trim_tailing_spaces_for_external_table_query,44,optional" frugal:"44,optional,bool" json:"trim_tailing_spaces_for_external_table_query,omitempty"` + EnableFunctionPushdown *bool `thrift:"enable_function_pushdown,45,optional" frugal:"45,optional,bool" json:"enable_function_pushdown,omitempty"` + FragmentTransmissionCompressionCodec *string `thrift:"fragment_transmission_compression_codec,46,optional" frugal:"46,optional,string" json:"fragment_transmission_compression_codec,omitempty"` + EnableLocalExchange *bool `thrift:"enable_local_exchange,48,optional" frugal:"48,optional,bool" json:"enable_local_exchange,omitempty"` + SkipStorageEngineMerge bool `thrift:"skip_storage_engine_merge,49,optional" frugal:"49,optional,bool" json:"skip_storage_engine_merge,omitempty"` + SkipDeletePredicate bool `thrift:"skip_delete_predicate,50,optional" frugal:"50,optional,bool" json:"skip_delete_predicate,omitempty"` + EnableNewShuffleHashMethod *bool `thrift:"enable_new_shuffle_hash_method,51,optional" frugal:"51,optional,bool" json:"enable_new_shuffle_hash_method,omitempty"` + BeExecVersion int32 `thrift:"be_exec_version,52,optional" frugal:"52,optional,i32" json:"be_exec_version,omitempty"` + PartitionedHashJoinRowsThreshold int32 `thrift:"partitioned_hash_join_rows_threshold,53,optional" frugal:"53,optional,i32" json:"partitioned_hash_join_rows_threshold,omitempty"` + EnableShareHashTableForBroadcastJoin *bool `thrift:"enable_share_hash_table_for_broadcast_join,54,optional" frugal:"54,optional,bool" json:"enable_share_hash_table_for_broadcast_join,omitempty"` + CheckOverflowForDecimal bool `thrift:"check_overflow_for_decimal,55,optional" frugal:"55,optional,bool" json:"check_overflow_for_decimal,omitempty"` + SkipDeleteBitmap bool `thrift:"skip_delete_bitmap,56,optional" frugal:"56,optional,bool" json:"skip_delete_bitmap,omitempty"` + EnablePipelineEngine bool `thrift:"enable_pipeline_engine,57,optional" frugal:"57,optional,bool" json:"enable_pipeline_engine,omitempty"` + RepeatMaxNum int32 `thrift:"repeat_max_num,58,optional" frugal:"58,optional,i32" json:"repeat_max_num,omitempty"` + ExternalSortBytesThreshold int64 `thrift:"external_sort_bytes_threshold,59,optional" frugal:"59,optional,i64" json:"external_sort_bytes_threshold,omitempty"` + PartitionedHashAggRowsThreshold int32 `thrift:"partitioned_hash_agg_rows_threshold,60,optional" frugal:"60,optional,i32" json:"partitioned_hash_agg_rows_threshold,omitempty"` + EnableFileCache bool `thrift:"enable_file_cache,61,optional" frugal:"61,optional,bool" json:"enable_file_cache,omitempty"` + InsertTimeout int32 `thrift:"insert_timeout,62,optional" frugal:"62,optional,i32" json:"insert_timeout,omitempty"` + ExecutionTimeout int32 `thrift:"execution_timeout,63,optional" frugal:"63,optional,i32" json:"execution_timeout,omitempty"` + DryRunQuery bool `thrift:"dry_run_query,64,optional" frugal:"64,optional,bool" json:"dry_run_query,omitempty"` + EnableCommonExprPushdown bool `thrift:"enable_common_expr_pushdown,65,optional" frugal:"65,optional,bool" json:"enable_common_expr_pushdown,omitempty"` + ParallelInstance int32 `thrift:"parallel_instance,66,optional" frugal:"66,optional,i32" json:"parallel_instance,omitempty"` + MysqlRowBinaryFormat bool `thrift:"mysql_row_binary_format,67,optional" frugal:"67,optional,bool" json:"mysql_row_binary_format,omitempty"` + ExternalAggBytesThreshold int64 `thrift:"external_agg_bytes_threshold,68,optional" frugal:"68,optional,i64" json:"external_agg_bytes_threshold,omitempty"` + ExternalAggPartitionBits int32 `thrift:"external_agg_partition_bits,69,optional" frugal:"69,optional,i32" json:"external_agg_partition_bits,omitempty"` + FileCacheBasePath *string `thrift:"file_cache_base_path,70,optional" frugal:"70,optional,string" json:"file_cache_base_path,omitempty"` + EnableParquetLazyMat bool `thrift:"enable_parquet_lazy_mat,71,optional" frugal:"71,optional,bool" json:"enable_parquet_lazy_mat,omitempty"` + EnableOrcLazyMat bool `thrift:"enable_orc_lazy_mat,72,optional" frugal:"72,optional,bool" json:"enable_orc_lazy_mat,omitempty"` + ScanQueueMemLimit *int64 `thrift:"scan_queue_mem_limit,73,optional" frugal:"73,optional,i64" json:"scan_queue_mem_limit,omitempty"` + EnableScanNodeRunSerial bool `thrift:"enable_scan_node_run_serial,74,optional" frugal:"74,optional,bool" json:"enable_scan_node_run_serial,omitempty"` + EnableInsertStrict bool `thrift:"enable_insert_strict,75,optional" frugal:"75,optional,bool" json:"enable_insert_strict,omitempty"` + EnableInvertedIndexQuery bool `thrift:"enable_inverted_index_query,76,optional" frugal:"76,optional,bool" json:"enable_inverted_index_query,omitempty"` + TruncateCharOrVarcharColumns bool `thrift:"truncate_char_or_varchar_columns,77,optional" frugal:"77,optional,bool" json:"truncate_char_or_varchar_columns,omitempty"` + EnableHashJoinEarlyStartProbe bool `thrift:"enable_hash_join_early_start_probe,78,optional" frugal:"78,optional,bool" json:"enable_hash_join_early_start_probe,omitempty"` + EnablePipelineXEngine bool `thrift:"enable_pipeline_x_engine,79,optional" frugal:"79,optional,bool" json:"enable_pipeline_x_engine,omitempty"` + EnableMemtableOnSinkNode bool `thrift:"enable_memtable_on_sink_node,80,optional" frugal:"80,optional,bool" json:"enable_memtable_on_sink_node,omitempty"` + EnableDeleteSubPredicateV2 bool `thrift:"enable_delete_sub_predicate_v2,81,optional" frugal:"81,optional,bool" json:"enable_delete_sub_predicate_v2,omitempty"` + FeProcessUuid int64 `thrift:"fe_process_uuid,82,optional" frugal:"82,optional,i64" json:"fe_process_uuid,omitempty"` + InvertedIndexConjunctionOptThreshold int32 `thrift:"inverted_index_conjunction_opt_threshold,83,optional" frugal:"83,optional,i32" json:"inverted_index_conjunction_opt_threshold,omitempty"` + EnableProfile bool `thrift:"enable_profile,84,optional" frugal:"84,optional,bool" json:"enable_profile,omitempty"` + EnablePageCache bool `thrift:"enable_page_cache,85,optional" frugal:"85,optional,bool" json:"enable_page_cache,omitempty"` + AnalyzeTimeout int32 `thrift:"analyze_timeout,86,optional" frugal:"86,optional,i32" json:"analyze_timeout,omitempty"` + FasterFloatConvert bool `thrift:"faster_float_convert,87,optional" frugal:"87,optional,bool" json:"faster_float_convert,omitempty"` + EnableDecimal256 bool `thrift:"enable_decimal256,88,optional" frugal:"88,optional,bool" json:"enable_decimal256,omitempty"` + EnableLocalShuffle bool `thrift:"enable_local_shuffle,89,optional" frugal:"89,optional,bool" json:"enable_local_shuffle,omitempty"` + SkipMissingVersion bool `thrift:"skip_missing_version,90,optional" frugal:"90,optional,bool" json:"skip_missing_version,omitempty"` + RuntimeFilterWaitInfinitely bool `thrift:"runtime_filter_wait_infinitely,91,optional" frugal:"91,optional,bool" json:"runtime_filter_wait_infinitely,omitempty"` + WaitFullBlockScheduleTimes int32 `thrift:"wait_full_block_schedule_times,92,optional" frugal:"92,optional,i32" json:"wait_full_block_schedule_times,omitempty"` + InvertedIndexMaxExpansions int32 `thrift:"inverted_index_max_expansions,93,optional" frugal:"93,optional,i32" json:"inverted_index_max_expansions,omitempty"` + InvertedIndexSkipThreshold int32 `thrift:"inverted_index_skip_threshold,94,optional" frugal:"94,optional,i32" json:"inverted_index_skip_threshold,omitempty"` + EnableParallelScan bool `thrift:"enable_parallel_scan,95,optional" frugal:"95,optional,bool" json:"enable_parallel_scan,omitempty"` + ParallelScanMaxScannersCount int32 `thrift:"parallel_scan_max_scanners_count,96,optional" frugal:"96,optional,i32" json:"parallel_scan_max_scanners_count,omitempty"` + ParallelScanMinRowsPerScanner int64 `thrift:"parallel_scan_min_rows_per_scanner,97,optional" frugal:"97,optional,i64" json:"parallel_scan_min_rows_per_scanner,omitempty"` + SkipBadTablet bool `thrift:"skip_bad_tablet,98,optional" frugal:"98,optional,bool" json:"skip_bad_tablet,omitempty"` + ScannerScaleUpRatio float64 `thrift:"scanner_scale_up_ratio,99,optional" frugal:"99,optional,double" json:"scanner_scale_up_ratio,omitempty"` + EnableDistinctStreamingAggregation bool `thrift:"enable_distinct_streaming_aggregation,100,optional" frugal:"100,optional,bool" json:"enable_distinct_streaming_aggregation,omitempty"` + EnableJoinSpill bool `thrift:"enable_join_spill,101,optional" frugal:"101,optional,bool" json:"enable_join_spill,omitempty"` + EnableSortSpill bool `thrift:"enable_sort_spill,102,optional" frugal:"102,optional,bool" json:"enable_sort_spill,omitempty"` + EnableAggSpill bool `thrift:"enable_agg_spill,103,optional" frugal:"103,optional,bool" json:"enable_agg_spill,omitempty"` + MinRevocableMem int64 `thrift:"min_revocable_mem,104,optional" frugal:"104,optional,i64" json:"min_revocable_mem,omitempty"` + SpillStreamingAggMemLimit int64 `thrift:"spill_streaming_agg_mem_limit,105,optional" frugal:"105,optional,i64" json:"spill_streaming_agg_mem_limit,omitempty"` + DataQueueMaxBlocks int64 `thrift:"data_queue_max_blocks,106,optional" frugal:"106,optional,i64" json:"data_queue_max_blocks,omitempty"` + EnableCommonExprPushdownForInvertedIndex bool `thrift:"enable_common_expr_pushdown_for_inverted_index,107,optional" frugal:"107,optional,bool" json:"enable_common_expr_pushdown_for_inverted_index,omitempty"` + LocalExchangeFreeBlocksLimit *int64 `thrift:"local_exchange_free_blocks_limit,108,optional" frugal:"108,optional,i64" json:"local_exchange_free_blocks_limit,omitempty"` + EnableForceSpill bool `thrift:"enable_force_spill,109,optional" frugal:"109,optional,bool" json:"enable_force_spill,omitempty"` + EnableParquetFilterByMinMax bool `thrift:"enable_parquet_filter_by_min_max,110,optional" frugal:"110,optional,bool" json:"enable_parquet_filter_by_min_max,omitempty"` + EnableOrcFilterByMinMax bool `thrift:"enable_orc_filter_by_min_max,111,optional" frugal:"111,optional,bool" json:"enable_orc_filter_by_min_max,omitempty"` + MaxColumnReaderNum int32 `thrift:"max_column_reader_num,112,optional" frugal:"112,optional,i32" json:"max_column_reader_num,omitempty"` + EnableLocalMergeSort bool `thrift:"enable_local_merge_sort,113,optional" frugal:"113,optional,bool" json:"enable_local_merge_sort,omitempty"` + EnableParallelResultSink bool `thrift:"enable_parallel_result_sink,114,optional" frugal:"114,optional,bool" json:"enable_parallel_result_sink,omitempty"` + EnableShortCircuitQueryAccessColumnStore bool `thrift:"enable_short_circuit_query_access_column_store,115,optional" frugal:"115,optional,bool" json:"enable_short_circuit_query_access_column_store,omitempty"` + EnableNoNeedReadDataOpt bool `thrift:"enable_no_need_read_data_opt,116,optional" frugal:"116,optional,bool" json:"enable_no_need_read_data_opt,omitempty"` + ReadCsvEmptyLineAsNull bool `thrift:"read_csv_empty_line_as_null,117,optional" frugal:"117,optional,bool" json:"read_csv_empty_line_as_null,omitempty"` + SerdeDialect TSerdeDialect `thrift:"serde_dialect,118,optional" frugal:"118,optional,TSerdeDialect" json:"serde_dialect,omitempty"` + EnableMatchWithoutInvertedIndex bool `thrift:"enable_match_without_inverted_index,119,optional" frugal:"119,optional,bool" json:"enable_match_without_inverted_index,omitempty"` + EnableFallbackOnMissingInvertedIndex bool `thrift:"enable_fallback_on_missing_inverted_index,120,optional" frugal:"120,optional,bool" json:"enable_fallback_on_missing_inverted_index,omitempty"` + KeepCarriageReturn bool `thrift:"keep_carriage_return,121,optional" frugal:"121,optional,bool" json:"keep_carriage_return,omitempty"` + RuntimeBloomFilterMinSize int32 `thrift:"runtime_bloom_filter_min_size,122,optional" frugal:"122,optional,i32" json:"runtime_bloom_filter_min_size,omitempty"` + HiveParquetUseColumnNames bool `thrift:"hive_parquet_use_column_names,123,optional" frugal:"123,optional,bool" json:"hive_parquet_use_column_names,omitempty"` + HiveOrcUseColumnNames bool `thrift:"hive_orc_use_column_names,124,optional" frugal:"124,optional,bool" json:"hive_orc_use_column_names,omitempty"` + EnableSegmentCache bool `thrift:"enable_segment_cache,125,optional" frugal:"125,optional,bool" json:"enable_segment_cache,omitempty"` + RuntimeBloomFilterMaxSize int32 `thrift:"runtime_bloom_filter_max_size,126,optional" frugal:"126,optional,i32" json:"runtime_bloom_filter_max_size,omitempty"` + InListValueCountThreshold int32 `thrift:"in_list_value_count_threshold,127,optional" frugal:"127,optional,i32" json:"in_list_value_count_threshold,omitempty"` + EnableVerboseProfile bool `thrift:"enable_verbose_profile,128,optional" frugal:"128,optional,bool" json:"enable_verbose_profile,omitempty"` + RpcVerboseProfileMaxInstanceCount int32 `thrift:"rpc_verbose_profile_max_instance_count,129,optional" frugal:"129,optional,i32" json:"rpc_verbose_profile_max_instance_count,omitempty"` + EnableAdaptivePipelineTaskSerialReadOnLimit bool `thrift:"enable_adaptive_pipeline_task_serial_read_on_limit,130,optional" frugal:"130,optional,bool" json:"enable_adaptive_pipeline_task_serial_read_on_limit,omitempty"` + AdaptivePipelineTaskSerialReadOnLimit int32 `thrift:"adaptive_pipeline_task_serial_read_on_limit,131,optional" frugal:"131,optional,i32" json:"adaptive_pipeline_task_serial_read_on_limit,omitempty"` + ParallelPrepareThreshold int32 `thrift:"parallel_prepare_threshold,132,optional" frugal:"132,optional,i32" json:"parallel_prepare_threshold,omitempty"` + PartitionTopnMaxPartitions int32 `thrift:"partition_topn_max_partitions,133,optional" frugal:"133,optional,i32" json:"partition_topn_max_partitions,omitempty"` + PartitionTopnPrePartitionRows int32 `thrift:"partition_topn_pre_partition_rows,134,optional" frugal:"134,optional,i32" json:"partition_topn_pre_partition_rows,omitempty"` + EnableParallelOutfile bool `thrift:"enable_parallel_outfile,135,optional" frugal:"135,optional,bool" json:"enable_parallel_outfile,omitempty"` + EnablePhraseQuerySequentialOpt bool `thrift:"enable_phrase_query_sequential_opt,136,optional" frugal:"136,optional,bool" json:"enable_phrase_query_sequential_opt,omitempty"` + EnableAutoCreateWhenOverwrite bool `thrift:"enable_auto_create_when_overwrite,137,optional" frugal:"137,optional,bool" json:"enable_auto_create_when_overwrite,omitempty"` + OrcTinyStripeThresholdBytes int64 `thrift:"orc_tiny_stripe_threshold_bytes,138,optional" frugal:"138,optional,i64" json:"orc_tiny_stripe_threshold_bytes,omitempty"` + OrcOnceMaxReadBytes int64 `thrift:"orc_once_max_read_bytes,139,optional" frugal:"139,optional,i64" json:"orc_once_max_read_bytes,omitempty"` + OrcMaxMergeDistanceBytes int64 `thrift:"orc_max_merge_distance_bytes,140,optional" frugal:"140,optional,i64" json:"orc_max_merge_distance_bytes,omitempty"` + IgnoreRuntimeFilterError bool `thrift:"ignore_runtime_filter_error,141,optional" frugal:"141,optional,bool" json:"ignore_runtime_filter_error,omitempty"` + DisableFileCache bool `thrift:"disable_file_cache,1000,optional" frugal:"1000,optional,bool" json:"disable_file_cache,omitempty"` } func NewTQueryOptions() *TQueryOptions { return &TQueryOptions{ - AbortOnError: false, - MaxErrors: 0, - DisableCodegen: true, - BatchSize: 0, - NumNodes: int32(NUM_NODES_ALL), - MaxScanRangeLength: 0, - NumScannerThreads: 0, - MaxIoBuffers: 0, - AllowUnsupportedFormats: false, - DefaultOrderByLimit: -1, - MemLimit: 2147483648, - AbortOnDefaultLimitExceeded: false, - QueryTimeout: 3600, - IsReportSuccess: false, - CodegenLevel: 0, - KuduLatestObservedTs: 9223372036854775807, - QueryType: TQueryType_SELECT, - MinReservation: 0, - MaxReservation: 107374182400, - InitialReservationTotalClaims: 2147483647, - BufferPoolLimit: 2147483648, - DefaultSpillableBufferSize: 2097152, - MinSpillableBufferSize: 65536, - MaxRowSize: 524288, - DisableStreamPreaggregations: false, - MtDop: 0, - LoadMemLimit: 0, - EnableSpilling: false, - EnableEnableExchangeNodeParallelMerge: false, - RuntimeFilterWaitTimeMs: 1000, - RuntimeFilterMaxInNum: 1024, - ReturnObjectDataAsBinary: false, - TrimTailingSpacesForExternalTableQuery: false, - SkipStorageEngineMerge: false, - SkipDeletePredicate: false, - BeExecVersion: 0, - PartitionedHashJoinRowsThreshold: 0, - CheckOverflowForDecimal: false, - SkipDeleteBitmap: false, - EnablePipelineEngine: false, - RepeatMaxNum: 0, - ExternalSortBytesThreshold: 0, - PartitionedHashAggRowsThreshold: 0, - EnableFileCache: false, - InsertTimeout: 14400, - ExecutionTimeout: 3600, - DryRunQuery: false, - EnableCommonExprPushdown: false, - ParallelInstance: 1, - MysqlRowBinaryFormat: false, - ExternalAggBytesThreshold: 0, - ExternalAggPartitionBits: 4, - EnableParquetLazyMat: true, - EnableOrcLazyMat: true, - EnableScanNodeRunSerial: false, - EnableInsertStrict: false, - EnableInvertedIndexQuery: true, - TruncateCharOrVarcharColumns: false, - EnableHashJoinEarlyStartProbe: false, - EnablePipelineXEngine: false, - EnableMemtableOnSinkNode: false, - EnableDeleteSubPredicateV2: false, - FeProcessUuid: 0, - InvertedIndexConjunctionOptThreshold: 1000, - EnableProfile: false, - EnablePageCache: false, - AnalyzeTimeout: 43200, + AbortOnError: false, + MaxErrors: 0, + DisableCodegen: true, + BatchSize: 0, + NumNodes: int32(NUM_NODES_ALL), + MaxScanRangeLength: 0, + NumScannerThreads: 0, + MaxIoBuffers: 0, + AllowUnsupportedFormats: false, + DefaultOrderByLimit: -1, + MemLimit: 2147483648, + AbortOnDefaultLimitExceeded: false, + QueryTimeout: 3600, + IsReportSuccess: false, + CodegenLevel: 0, + KuduLatestObservedTs: 9223372036854775807, + QueryType: TQueryType_SELECT, + MinReservation: 0, + MaxReservation: 107374182400, + InitialReservationTotalClaims: 2147483647, + BufferPoolLimit: 2147483648, + DefaultSpillableBufferSize: 2097152, + MinSpillableBufferSize: 65536, + MaxRowSize: 524288, + DisableStreamPreaggregations: false, + MtDop: 0, + LoadMemLimit: 0, + EnableSpilling: false, + EnableEnableExchangeNodeParallelMerge: false, + RuntimeFilterWaitTimeMs: 1000, + RuntimeFilterMaxInNum: 1024, + ReturnObjectDataAsBinary: false, + TrimTailingSpacesForExternalTableQuery: false, + SkipStorageEngineMerge: false, + SkipDeletePredicate: false, + BeExecVersion: 0, + PartitionedHashJoinRowsThreshold: 0, + CheckOverflowForDecimal: true, + SkipDeleteBitmap: false, + EnablePipelineEngine: true, + RepeatMaxNum: 0, + ExternalSortBytesThreshold: 0, + PartitionedHashAggRowsThreshold: 0, + EnableFileCache: false, + InsertTimeout: 14400, + ExecutionTimeout: 3600, + DryRunQuery: false, + EnableCommonExprPushdown: false, + ParallelInstance: 1, + MysqlRowBinaryFormat: false, + ExternalAggBytesThreshold: 0, + ExternalAggPartitionBits: 4, + EnableParquetLazyMat: true, + EnableOrcLazyMat: true, + EnableScanNodeRunSerial: false, + EnableInsertStrict: false, + EnableInvertedIndexQuery: true, + TruncateCharOrVarcharColumns: false, + EnableHashJoinEarlyStartProbe: false, + EnablePipelineXEngine: true, + EnableMemtableOnSinkNode: false, + EnableDeleteSubPredicateV2: false, + FeProcessUuid: 0, + InvertedIndexConjunctionOptThreshold: 1000, + EnableProfile: false, + EnablePageCache: false, + AnalyzeTimeout: 43200, + FasterFloatConvert: false, + EnableDecimal256: false, + EnableLocalShuffle: false, + SkipMissingVersion: false, + RuntimeFilterWaitInfinitely: false, + WaitFullBlockScheduleTimes: 1, + InvertedIndexMaxExpansions: 50, + InvertedIndexSkipThreshold: 50, + EnableParallelScan: false, + ParallelScanMaxScannersCount: 0, + ParallelScanMinRowsPerScanner: 0, + SkipBadTablet: false, + ScannerScaleUpRatio: 0.0, + EnableDistinctStreamingAggregation: true, + EnableJoinSpill: false, + EnableSortSpill: false, + EnableAggSpill: false, + MinRevocableMem: 0, + SpillStreamingAggMemLimit: 0, + DataQueueMaxBlocks: 0, + EnableCommonExprPushdownForInvertedIndex: false, + EnableForceSpill: false, + EnableParquetFilterByMinMax: true, + EnableOrcFilterByMinMax: true, + MaxColumnReaderNum: 0, + EnableLocalMergeSort: false, + EnableParallelResultSink: false, + EnableShortCircuitQueryAccessColumnStore: false, + EnableNoNeedReadDataOpt: true, + ReadCsvEmptyLineAsNull: false, + SerdeDialect: TSerdeDialect_DORIS, + EnableMatchWithoutInvertedIndex: true, + EnableFallbackOnMissingInvertedIndex: true, + KeepCarriageReturn: false, + RuntimeBloomFilterMinSize: 1048576, + HiveParquetUseColumnNames: true, + HiveOrcUseColumnNames: true, + EnableSegmentCache: true, + RuntimeBloomFilterMaxSize: 16777216, + InListValueCountThreshold: 10, + EnableVerboseProfile: false, + RpcVerboseProfileMaxInstanceCount: 0, + EnableAdaptivePipelineTaskSerialReadOnLimit: true, + AdaptivePipelineTaskSerialReadOnLimit: 10000, + ParallelPrepareThreshold: 0, + PartitionTopnMaxPartitions: 1024, + PartitionTopnPrePartitionRows: 1000, + EnableParallelOutfile: false, + EnablePhraseQuerySequentialOpt: true, + EnableAutoCreateWhenOverwrite: false, + OrcTinyStripeThresholdBytes: 8388608, + OrcOnceMaxReadBytes: 8388608, + OrcMaxMergeDistanceBytes: 1048576, + IgnoreRuntimeFilterError: false, + DisableFileCache: false, } } func (p *TQueryOptions) InitDefault() { - *p = TQueryOptions{ - - AbortOnError: false, - MaxErrors: 0, - DisableCodegen: true, - BatchSize: 0, - NumNodes: int32(NUM_NODES_ALL), - MaxScanRangeLength: 0, - NumScannerThreads: 0, - MaxIoBuffers: 0, - AllowUnsupportedFormats: false, - DefaultOrderByLimit: -1, - MemLimit: 2147483648, - AbortOnDefaultLimitExceeded: false, - QueryTimeout: 3600, - IsReportSuccess: false, - CodegenLevel: 0, - KuduLatestObservedTs: 9223372036854775807, - QueryType: TQueryType_SELECT, - MinReservation: 0, - MaxReservation: 107374182400, - InitialReservationTotalClaims: 2147483647, - BufferPoolLimit: 2147483648, - DefaultSpillableBufferSize: 2097152, - MinSpillableBufferSize: 65536, - MaxRowSize: 524288, - DisableStreamPreaggregations: false, - MtDop: 0, - LoadMemLimit: 0, - EnableSpilling: false, - EnableEnableExchangeNodeParallelMerge: false, - RuntimeFilterWaitTimeMs: 1000, - RuntimeFilterMaxInNum: 1024, - ReturnObjectDataAsBinary: false, - TrimTailingSpacesForExternalTableQuery: false, - SkipStorageEngineMerge: false, - SkipDeletePredicate: false, - BeExecVersion: 0, - PartitionedHashJoinRowsThreshold: 0, - CheckOverflowForDecimal: false, - SkipDeleteBitmap: false, - EnablePipelineEngine: false, - RepeatMaxNum: 0, - ExternalSortBytesThreshold: 0, - PartitionedHashAggRowsThreshold: 0, - EnableFileCache: false, - InsertTimeout: 14400, - ExecutionTimeout: 3600, - DryRunQuery: false, - EnableCommonExprPushdown: false, - ParallelInstance: 1, - MysqlRowBinaryFormat: false, - ExternalAggBytesThreshold: 0, - ExternalAggPartitionBits: 4, - EnableParquetLazyMat: true, - EnableOrcLazyMat: true, - EnableScanNodeRunSerial: false, - EnableInsertStrict: false, - EnableInvertedIndexQuery: true, - TruncateCharOrVarcharColumns: false, - EnableHashJoinEarlyStartProbe: false, - EnablePipelineXEngine: false, - EnableMemtableOnSinkNode: false, - EnableDeleteSubPredicateV2: false, - FeProcessUuid: 0, - InvertedIndexConjunctionOptThreshold: 1000, - EnableProfile: false, - EnablePageCache: false, - AnalyzeTimeout: 43200, - } + p.AbortOnError = false + p.MaxErrors = 0 + p.DisableCodegen = true + p.BatchSize = 0 + p.NumNodes = int32(NUM_NODES_ALL) + p.MaxScanRangeLength = 0 + p.NumScannerThreads = 0 + p.MaxIoBuffers = 0 + p.AllowUnsupportedFormats = false + p.DefaultOrderByLimit = -1 + p.MemLimit = 2147483648 + p.AbortOnDefaultLimitExceeded = false + p.QueryTimeout = 3600 + p.IsReportSuccess = false + p.CodegenLevel = 0 + p.KuduLatestObservedTs = 9223372036854775807 + p.QueryType = TQueryType_SELECT + p.MinReservation = 0 + p.MaxReservation = 107374182400 + p.InitialReservationTotalClaims = 2147483647 + p.BufferPoolLimit = 2147483648 + p.DefaultSpillableBufferSize = 2097152 + p.MinSpillableBufferSize = 65536 + p.MaxRowSize = 524288 + p.DisableStreamPreaggregations = false + p.MtDop = 0 + p.LoadMemLimit = 0 + p.EnableSpilling = false + p.EnableEnableExchangeNodeParallelMerge = false + p.RuntimeFilterWaitTimeMs = 1000 + p.RuntimeFilterMaxInNum = 1024 + p.ReturnObjectDataAsBinary = false + p.TrimTailingSpacesForExternalTableQuery = false + p.SkipStorageEngineMerge = false + p.SkipDeletePredicate = false + p.BeExecVersion = 0 + p.PartitionedHashJoinRowsThreshold = 0 + p.CheckOverflowForDecimal = true + p.SkipDeleteBitmap = false + p.EnablePipelineEngine = true + p.RepeatMaxNum = 0 + p.ExternalSortBytesThreshold = 0 + p.PartitionedHashAggRowsThreshold = 0 + p.EnableFileCache = false + p.InsertTimeout = 14400 + p.ExecutionTimeout = 3600 + p.DryRunQuery = false + p.EnableCommonExprPushdown = false + p.ParallelInstance = 1 + p.MysqlRowBinaryFormat = false + p.ExternalAggBytesThreshold = 0 + p.ExternalAggPartitionBits = 4 + p.EnableParquetLazyMat = true + p.EnableOrcLazyMat = true + p.EnableScanNodeRunSerial = false + p.EnableInsertStrict = false + p.EnableInvertedIndexQuery = true + p.TruncateCharOrVarcharColumns = false + p.EnableHashJoinEarlyStartProbe = false + p.EnablePipelineXEngine = true + p.EnableMemtableOnSinkNode = false + p.EnableDeleteSubPredicateV2 = false + p.FeProcessUuid = 0 + p.InvertedIndexConjunctionOptThreshold = 1000 + p.EnableProfile = false + p.EnablePageCache = false + p.AnalyzeTimeout = 43200 + p.FasterFloatConvert = false + p.EnableDecimal256 = false + p.EnableLocalShuffle = false + p.SkipMissingVersion = false + p.RuntimeFilterWaitInfinitely = false + p.WaitFullBlockScheduleTimes = 1 + p.InvertedIndexMaxExpansions = 50 + p.InvertedIndexSkipThreshold = 50 + p.EnableParallelScan = false + p.ParallelScanMaxScannersCount = 0 + p.ParallelScanMinRowsPerScanner = 0 + p.SkipBadTablet = false + p.ScannerScaleUpRatio = 0.0 + p.EnableDistinctStreamingAggregation = true + p.EnableJoinSpill = false + p.EnableSortSpill = false + p.EnableAggSpill = false + p.MinRevocableMem = 0 + p.SpillStreamingAggMemLimit = 0 + p.DataQueueMaxBlocks = 0 + p.EnableCommonExprPushdownForInvertedIndex = false + p.EnableForceSpill = false + p.EnableParquetFilterByMinMax = true + p.EnableOrcFilterByMinMax = true + p.MaxColumnReaderNum = 0 + p.EnableLocalMergeSort = false + p.EnableParallelResultSink = false + p.EnableShortCircuitQueryAccessColumnStore = false + p.EnableNoNeedReadDataOpt = true + p.ReadCsvEmptyLineAsNull = false + p.SerdeDialect = TSerdeDialect_DORIS + p.EnableMatchWithoutInvertedIndex = true + p.EnableFallbackOnMissingInvertedIndex = true + p.KeepCarriageReturn = false + p.RuntimeBloomFilterMinSize = 1048576 + p.HiveParquetUseColumnNames = true + p.HiveOrcUseColumnNames = true + p.EnableSegmentCache = true + p.RuntimeBloomFilterMaxSize = 16777216 + p.InListValueCountThreshold = 10 + p.EnableVerboseProfile = false + p.RpcVerboseProfileMaxInstanceCount = 0 + p.EnableAdaptivePipelineTaskSerialReadOnLimit = true + p.AdaptivePipelineTaskSerialReadOnLimit = 10000 + p.ParallelPrepareThreshold = 0 + p.PartitionTopnMaxPartitions = 1024 + p.PartitionTopnPrePartitionRows = 1000 + p.EnableParallelOutfile = false + p.EnablePhraseQuerySequentialOpt = true + p.EnableAutoCreateWhenOverwrite = false + p.OrcTinyStripeThresholdBytes = 8388608 + p.OrcOnceMaxReadBytes = 8388608 + p.OrcMaxMergeDistanceBytes = 1048576 + p.IgnoreRuntimeFilterError = false + p.DisableFileCache = false } var TQueryOptions_AbortOnError_DEFAULT bool = false @@ -2183,7 +2424,7 @@ func (p *TQueryOptions) GetEnableShareHashTableForBroadcastJoin() (v bool) { return *p.EnableShareHashTableForBroadcastJoin } -var TQueryOptions_CheckOverflowForDecimal_DEFAULT bool = false +var TQueryOptions_CheckOverflowForDecimal_DEFAULT bool = true func (p *TQueryOptions) GetCheckOverflowForDecimal() (v bool) { if !p.IsSetCheckOverflowForDecimal() { @@ -2201,7 +2442,7 @@ func (p *TQueryOptions) GetSkipDeleteBitmap() (v bool) { return p.SkipDeleteBitmap } -var TQueryOptions_EnablePipelineEngine_DEFAULT bool = false +var TQueryOptions_EnablePipelineEngine_DEFAULT bool = true func (p *TQueryOptions) GetEnablePipelineEngine() (v bool) { if !p.IsSetEnablePipelineEngine() { @@ -2399,7 +2640,7 @@ func (p *TQueryOptions) GetEnableHashJoinEarlyStartProbe() (v bool) { return p.EnableHashJoinEarlyStartProbe } -var TQueryOptions_EnablePipelineXEngine_DEFAULT bool = false +var TQueryOptions_EnablePipelineXEngine_DEFAULT bool = true func (p *TQueryOptions) GetEnablePipelineXEngine() (v bool) { if !p.IsSetEnablePipelineXEngine() { @@ -2470,86 +2711,590 @@ func (p *TQueryOptions) GetAnalyzeTimeout() (v int32) { } return p.AnalyzeTimeout } -func (p *TQueryOptions) SetAbortOnError(val bool) { - p.AbortOnError = val -} -func (p *TQueryOptions) SetMaxErrors(val int32) { - p.MaxErrors = val + +var TQueryOptions_FasterFloatConvert_DEFAULT bool = false + +func (p *TQueryOptions) GetFasterFloatConvert() (v bool) { + if !p.IsSetFasterFloatConvert() { + return TQueryOptions_FasterFloatConvert_DEFAULT + } + return p.FasterFloatConvert } -func (p *TQueryOptions) SetDisableCodegen(val bool) { - p.DisableCodegen = val + +var TQueryOptions_EnableDecimal256_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableDecimal256() (v bool) { + if !p.IsSetEnableDecimal256() { + return TQueryOptions_EnableDecimal256_DEFAULT + } + return p.EnableDecimal256 } -func (p *TQueryOptions) SetBatchSize(val int32) { - p.BatchSize = val + +var TQueryOptions_EnableLocalShuffle_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableLocalShuffle() (v bool) { + if !p.IsSetEnableLocalShuffle() { + return TQueryOptions_EnableLocalShuffle_DEFAULT + } + return p.EnableLocalShuffle } -func (p *TQueryOptions) SetNumNodes(val int32) { - p.NumNodes = val + +var TQueryOptions_SkipMissingVersion_DEFAULT bool = false + +func (p *TQueryOptions) GetSkipMissingVersion() (v bool) { + if !p.IsSetSkipMissingVersion() { + return TQueryOptions_SkipMissingVersion_DEFAULT + } + return p.SkipMissingVersion } -func (p *TQueryOptions) SetMaxScanRangeLength(val int64) { - p.MaxScanRangeLength = val + +var TQueryOptions_RuntimeFilterWaitInfinitely_DEFAULT bool = false + +func (p *TQueryOptions) GetRuntimeFilterWaitInfinitely() (v bool) { + if !p.IsSetRuntimeFilterWaitInfinitely() { + return TQueryOptions_RuntimeFilterWaitInfinitely_DEFAULT + } + return p.RuntimeFilterWaitInfinitely } -func (p *TQueryOptions) SetNumScannerThreads(val int32) { - p.NumScannerThreads = val + +var TQueryOptions_WaitFullBlockScheduleTimes_DEFAULT int32 = 1 + +func (p *TQueryOptions) GetWaitFullBlockScheduleTimes() (v int32) { + if !p.IsSetWaitFullBlockScheduleTimes() { + return TQueryOptions_WaitFullBlockScheduleTimes_DEFAULT + } + return p.WaitFullBlockScheduleTimes } -func (p *TQueryOptions) SetMaxIoBuffers(val int32) { - p.MaxIoBuffers = val + +var TQueryOptions_InvertedIndexMaxExpansions_DEFAULT int32 = 50 + +func (p *TQueryOptions) GetInvertedIndexMaxExpansions() (v int32) { + if !p.IsSetInvertedIndexMaxExpansions() { + return TQueryOptions_InvertedIndexMaxExpansions_DEFAULT + } + return p.InvertedIndexMaxExpansions } -func (p *TQueryOptions) SetAllowUnsupportedFormats(val bool) { - p.AllowUnsupportedFormats = val + +var TQueryOptions_InvertedIndexSkipThreshold_DEFAULT int32 = 50 + +func (p *TQueryOptions) GetInvertedIndexSkipThreshold() (v int32) { + if !p.IsSetInvertedIndexSkipThreshold() { + return TQueryOptions_InvertedIndexSkipThreshold_DEFAULT + } + return p.InvertedIndexSkipThreshold } -func (p *TQueryOptions) SetDefaultOrderByLimit(val int64) { - p.DefaultOrderByLimit = val + +var TQueryOptions_EnableParallelScan_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableParallelScan() (v bool) { + if !p.IsSetEnableParallelScan() { + return TQueryOptions_EnableParallelScan_DEFAULT + } + return p.EnableParallelScan } -func (p *TQueryOptions) SetMemLimit(val int64) { - p.MemLimit = val + +var TQueryOptions_ParallelScanMaxScannersCount_DEFAULT int32 = 0 + +func (p *TQueryOptions) GetParallelScanMaxScannersCount() (v int32) { + if !p.IsSetParallelScanMaxScannersCount() { + return TQueryOptions_ParallelScanMaxScannersCount_DEFAULT + } + return p.ParallelScanMaxScannersCount } -func (p *TQueryOptions) SetAbortOnDefaultLimitExceeded(val bool) { - p.AbortOnDefaultLimitExceeded = val + +var TQueryOptions_ParallelScanMinRowsPerScanner_DEFAULT int64 = 0 + +func (p *TQueryOptions) GetParallelScanMinRowsPerScanner() (v int64) { + if !p.IsSetParallelScanMinRowsPerScanner() { + return TQueryOptions_ParallelScanMinRowsPerScanner_DEFAULT + } + return p.ParallelScanMinRowsPerScanner } -func (p *TQueryOptions) SetQueryTimeout(val int32) { - p.QueryTimeout = val + +var TQueryOptions_SkipBadTablet_DEFAULT bool = false + +func (p *TQueryOptions) GetSkipBadTablet() (v bool) { + if !p.IsSetSkipBadTablet() { + return TQueryOptions_SkipBadTablet_DEFAULT + } + return p.SkipBadTablet } -func (p *TQueryOptions) SetIsReportSuccess(val bool) { - p.IsReportSuccess = val + +var TQueryOptions_ScannerScaleUpRatio_DEFAULT float64 = 0.0 + +func (p *TQueryOptions) GetScannerScaleUpRatio() (v float64) { + if !p.IsSetScannerScaleUpRatio() { + return TQueryOptions_ScannerScaleUpRatio_DEFAULT + } + return p.ScannerScaleUpRatio } -func (p *TQueryOptions) SetCodegenLevel(val int32) { - p.CodegenLevel = val + +var TQueryOptions_EnableDistinctStreamingAggregation_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableDistinctStreamingAggregation() (v bool) { + if !p.IsSetEnableDistinctStreamingAggregation() { + return TQueryOptions_EnableDistinctStreamingAggregation_DEFAULT + } + return p.EnableDistinctStreamingAggregation } -func (p *TQueryOptions) SetKuduLatestObservedTs(val int64) { - p.KuduLatestObservedTs = val + +var TQueryOptions_EnableJoinSpill_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableJoinSpill() (v bool) { + if !p.IsSetEnableJoinSpill() { + return TQueryOptions_EnableJoinSpill_DEFAULT + } + return p.EnableJoinSpill } -func (p *TQueryOptions) SetQueryType(val TQueryType) { - p.QueryType = val + +var TQueryOptions_EnableSortSpill_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableSortSpill() (v bool) { + if !p.IsSetEnableSortSpill() { + return TQueryOptions_EnableSortSpill_DEFAULT + } + return p.EnableSortSpill } -func (p *TQueryOptions) SetMinReservation(val int64) { - p.MinReservation = val + +var TQueryOptions_EnableAggSpill_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableAggSpill() (v bool) { + if !p.IsSetEnableAggSpill() { + return TQueryOptions_EnableAggSpill_DEFAULT + } + return p.EnableAggSpill } -func (p *TQueryOptions) SetMaxReservation(val int64) { - p.MaxReservation = val + +var TQueryOptions_MinRevocableMem_DEFAULT int64 = 0 + +func (p *TQueryOptions) GetMinRevocableMem() (v int64) { + if !p.IsSetMinRevocableMem() { + return TQueryOptions_MinRevocableMem_DEFAULT + } + return p.MinRevocableMem } -func (p *TQueryOptions) SetInitialReservationTotalClaims(val int64) { - p.InitialReservationTotalClaims = val + +var TQueryOptions_SpillStreamingAggMemLimit_DEFAULT int64 = 0 + +func (p *TQueryOptions) GetSpillStreamingAggMemLimit() (v int64) { + if !p.IsSetSpillStreamingAggMemLimit() { + return TQueryOptions_SpillStreamingAggMemLimit_DEFAULT + } + return p.SpillStreamingAggMemLimit } -func (p *TQueryOptions) SetBufferPoolLimit(val int64) { - p.BufferPoolLimit = val + +var TQueryOptions_DataQueueMaxBlocks_DEFAULT int64 = 0 + +func (p *TQueryOptions) GetDataQueueMaxBlocks() (v int64) { + if !p.IsSetDataQueueMaxBlocks() { + return TQueryOptions_DataQueueMaxBlocks_DEFAULT + } + return p.DataQueueMaxBlocks } -func (p *TQueryOptions) SetDefaultSpillableBufferSize(val int64) { - p.DefaultSpillableBufferSize = val + +var TQueryOptions_EnableCommonExprPushdownForInvertedIndex_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableCommonExprPushdownForInvertedIndex() (v bool) { + if !p.IsSetEnableCommonExprPushdownForInvertedIndex() { + return TQueryOptions_EnableCommonExprPushdownForInvertedIndex_DEFAULT + } + return p.EnableCommonExprPushdownForInvertedIndex } -func (p *TQueryOptions) SetMinSpillableBufferSize(val int64) { - p.MinSpillableBufferSize = val + +var TQueryOptions_LocalExchangeFreeBlocksLimit_DEFAULT int64 + +func (p *TQueryOptions) GetLocalExchangeFreeBlocksLimit() (v int64) { + if !p.IsSetLocalExchangeFreeBlocksLimit() { + return TQueryOptions_LocalExchangeFreeBlocksLimit_DEFAULT + } + return *p.LocalExchangeFreeBlocksLimit } -func (p *TQueryOptions) SetMaxRowSize(val int64) { - p.MaxRowSize = val + +var TQueryOptions_EnableForceSpill_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableForceSpill() (v bool) { + if !p.IsSetEnableForceSpill() { + return TQueryOptions_EnableForceSpill_DEFAULT + } + return p.EnableForceSpill } -func (p *TQueryOptions) SetDisableStreamPreaggregations(val bool) { - p.DisableStreamPreaggregations = val + +var TQueryOptions_EnableParquetFilterByMinMax_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableParquetFilterByMinMax() (v bool) { + if !p.IsSetEnableParquetFilterByMinMax() { + return TQueryOptions_EnableParquetFilterByMinMax_DEFAULT + } + return p.EnableParquetFilterByMinMax } -func (p *TQueryOptions) SetMtDop(val int32) { - p.MtDop = val + +var TQueryOptions_EnableOrcFilterByMinMax_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableOrcFilterByMinMax() (v bool) { + if !p.IsSetEnableOrcFilterByMinMax() { + return TQueryOptions_EnableOrcFilterByMinMax_DEFAULT + } + return p.EnableOrcFilterByMinMax } -func (p *TQueryOptions) SetLoadMemLimit(val int64) { - p.LoadMemLimit = val + +var TQueryOptions_MaxColumnReaderNum_DEFAULT int32 = 0 + +func (p *TQueryOptions) GetMaxColumnReaderNum() (v int32) { + if !p.IsSetMaxColumnReaderNum() { + return TQueryOptions_MaxColumnReaderNum_DEFAULT + } + return p.MaxColumnReaderNum +} + +var TQueryOptions_EnableLocalMergeSort_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableLocalMergeSort() (v bool) { + if !p.IsSetEnableLocalMergeSort() { + return TQueryOptions_EnableLocalMergeSort_DEFAULT + } + return p.EnableLocalMergeSort +} + +var TQueryOptions_EnableParallelResultSink_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableParallelResultSink() (v bool) { + if !p.IsSetEnableParallelResultSink() { + return TQueryOptions_EnableParallelResultSink_DEFAULT + } + return p.EnableParallelResultSink +} + +var TQueryOptions_EnableShortCircuitQueryAccessColumnStore_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableShortCircuitQueryAccessColumnStore() (v bool) { + if !p.IsSetEnableShortCircuitQueryAccessColumnStore() { + return TQueryOptions_EnableShortCircuitQueryAccessColumnStore_DEFAULT + } + return p.EnableShortCircuitQueryAccessColumnStore +} + +var TQueryOptions_EnableNoNeedReadDataOpt_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableNoNeedReadDataOpt() (v bool) { + if !p.IsSetEnableNoNeedReadDataOpt() { + return TQueryOptions_EnableNoNeedReadDataOpt_DEFAULT + } + return p.EnableNoNeedReadDataOpt +} + +var TQueryOptions_ReadCsvEmptyLineAsNull_DEFAULT bool = false + +func (p *TQueryOptions) GetReadCsvEmptyLineAsNull() (v bool) { + if !p.IsSetReadCsvEmptyLineAsNull() { + return TQueryOptions_ReadCsvEmptyLineAsNull_DEFAULT + } + return p.ReadCsvEmptyLineAsNull +} + +var TQueryOptions_SerdeDialect_DEFAULT TSerdeDialect = TSerdeDialect_DORIS + +func (p *TQueryOptions) GetSerdeDialect() (v TSerdeDialect) { + if !p.IsSetSerdeDialect() { + return TQueryOptions_SerdeDialect_DEFAULT + } + return p.SerdeDialect +} + +var TQueryOptions_EnableMatchWithoutInvertedIndex_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableMatchWithoutInvertedIndex() (v bool) { + if !p.IsSetEnableMatchWithoutInvertedIndex() { + return TQueryOptions_EnableMatchWithoutInvertedIndex_DEFAULT + } + return p.EnableMatchWithoutInvertedIndex +} + +var TQueryOptions_EnableFallbackOnMissingInvertedIndex_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableFallbackOnMissingInvertedIndex() (v bool) { + if !p.IsSetEnableFallbackOnMissingInvertedIndex() { + return TQueryOptions_EnableFallbackOnMissingInvertedIndex_DEFAULT + } + return p.EnableFallbackOnMissingInvertedIndex +} + +var TQueryOptions_KeepCarriageReturn_DEFAULT bool = false + +func (p *TQueryOptions) GetKeepCarriageReturn() (v bool) { + if !p.IsSetKeepCarriageReturn() { + return TQueryOptions_KeepCarriageReturn_DEFAULT + } + return p.KeepCarriageReturn +} + +var TQueryOptions_RuntimeBloomFilterMinSize_DEFAULT int32 = 1048576 + +func (p *TQueryOptions) GetRuntimeBloomFilterMinSize() (v int32) { + if !p.IsSetRuntimeBloomFilterMinSize() { + return TQueryOptions_RuntimeBloomFilterMinSize_DEFAULT + } + return p.RuntimeBloomFilterMinSize +} + +var TQueryOptions_HiveParquetUseColumnNames_DEFAULT bool = true + +func (p *TQueryOptions) GetHiveParquetUseColumnNames() (v bool) { + if !p.IsSetHiveParquetUseColumnNames() { + return TQueryOptions_HiveParquetUseColumnNames_DEFAULT + } + return p.HiveParquetUseColumnNames +} + +var TQueryOptions_HiveOrcUseColumnNames_DEFAULT bool = true + +func (p *TQueryOptions) GetHiveOrcUseColumnNames() (v bool) { + if !p.IsSetHiveOrcUseColumnNames() { + return TQueryOptions_HiveOrcUseColumnNames_DEFAULT + } + return p.HiveOrcUseColumnNames +} + +var TQueryOptions_EnableSegmentCache_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableSegmentCache() (v bool) { + if !p.IsSetEnableSegmentCache() { + return TQueryOptions_EnableSegmentCache_DEFAULT + } + return p.EnableSegmentCache +} + +var TQueryOptions_RuntimeBloomFilterMaxSize_DEFAULT int32 = 16777216 + +func (p *TQueryOptions) GetRuntimeBloomFilterMaxSize() (v int32) { + if !p.IsSetRuntimeBloomFilterMaxSize() { + return TQueryOptions_RuntimeBloomFilterMaxSize_DEFAULT + } + return p.RuntimeBloomFilterMaxSize +} + +var TQueryOptions_InListValueCountThreshold_DEFAULT int32 = 10 + +func (p *TQueryOptions) GetInListValueCountThreshold() (v int32) { + if !p.IsSetInListValueCountThreshold() { + return TQueryOptions_InListValueCountThreshold_DEFAULT + } + return p.InListValueCountThreshold +} + +var TQueryOptions_EnableVerboseProfile_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableVerboseProfile() (v bool) { + if !p.IsSetEnableVerboseProfile() { + return TQueryOptions_EnableVerboseProfile_DEFAULT + } + return p.EnableVerboseProfile +} + +var TQueryOptions_RpcVerboseProfileMaxInstanceCount_DEFAULT int32 = 0 + +func (p *TQueryOptions) GetRpcVerboseProfileMaxInstanceCount() (v int32) { + if !p.IsSetRpcVerboseProfileMaxInstanceCount() { + return TQueryOptions_RpcVerboseProfileMaxInstanceCount_DEFAULT + } + return p.RpcVerboseProfileMaxInstanceCount +} + +var TQueryOptions_EnableAdaptivePipelineTaskSerialReadOnLimit_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableAdaptivePipelineTaskSerialReadOnLimit() (v bool) { + if !p.IsSetEnableAdaptivePipelineTaskSerialReadOnLimit() { + return TQueryOptions_EnableAdaptivePipelineTaskSerialReadOnLimit_DEFAULT + } + return p.EnableAdaptivePipelineTaskSerialReadOnLimit +} + +var TQueryOptions_AdaptivePipelineTaskSerialReadOnLimit_DEFAULT int32 = 10000 + +func (p *TQueryOptions) GetAdaptivePipelineTaskSerialReadOnLimit() (v int32) { + if !p.IsSetAdaptivePipelineTaskSerialReadOnLimit() { + return TQueryOptions_AdaptivePipelineTaskSerialReadOnLimit_DEFAULT + } + return p.AdaptivePipelineTaskSerialReadOnLimit +} + +var TQueryOptions_ParallelPrepareThreshold_DEFAULT int32 = 0 + +func (p *TQueryOptions) GetParallelPrepareThreshold() (v int32) { + if !p.IsSetParallelPrepareThreshold() { + return TQueryOptions_ParallelPrepareThreshold_DEFAULT + } + return p.ParallelPrepareThreshold +} + +var TQueryOptions_PartitionTopnMaxPartitions_DEFAULT int32 = 1024 + +func (p *TQueryOptions) GetPartitionTopnMaxPartitions() (v int32) { + if !p.IsSetPartitionTopnMaxPartitions() { + return TQueryOptions_PartitionTopnMaxPartitions_DEFAULT + } + return p.PartitionTopnMaxPartitions +} + +var TQueryOptions_PartitionTopnPrePartitionRows_DEFAULT int32 = 1000 + +func (p *TQueryOptions) GetPartitionTopnPrePartitionRows() (v int32) { + if !p.IsSetPartitionTopnPrePartitionRows() { + return TQueryOptions_PartitionTopnPrePartitionRows_DEFAULT + } + return p.PartitionTopnPrePartitionRows +} + +var TQueryOptions_EnableParallelOutfile_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableParallelOutfile() (v bool) { + if !p.IsSetEnableParallelOutfile() { + return TQueryOptions_EnableParallelOutfile_DEFAULT + } + return p.EnableParallelOutfile +} + +var TQueryOptions_EnablePhraseQuerySequentialOpt_DEFAULT bool = true + +func (p *TQueryOptions) GetEnablePhraseQuerySequentialOpt() (v bool) { + if !p.IsSetEnablePhraseQuerySequentialOpt() { + return TQueryOptions_EnablePhraseQuerySequentialOpt_DEFAULT + } + return p.EnablePhraseQuerySequentialOpt +} + +var TQueryOptions_EnableAutoCreateWhenOverwrite_DEFAULT bool = false + +func (p *TQueryOptions) GetEnableAutoCreateWhenOverwrite() (v bool) { + if !p.IsSetEnableAutoCreateWhenOverwrite() { + return TQueryOptions_EnableAutoCreateWhenOverwrite_DEFAULT + } + return p.EnableAutoCreateWhenOverwrite +} + +var TQueryOptions_OrcTinyStripeThresholdBytes_DEFAULT int64 = 8388608 + +func (p *TQueryOptions) GetOrcTinyStripeThresholdBytes() (v int64) { + if !p.IsSetOrcTinyStripeThresholdBytes() { + return TQueryOptions_OrcTinyStripeThresholdBytes_DEFAULT + } + return p.OrcTinyStripeThresholdBytes +} + +var TQueryOptions_OrcOnceMaxReadBytes_DEFAULT int64 = 8388608 + +func (p *TQueryOptions) GetOrcOnceMaxReadBytes() (v int64) { + if !p.IsSetOrcOnceMaxReadBytes() { + return TQueryOptions_OrcOnceMaxReadBytes_DEFAULT + } + return p.OrcOnceMaxReadBytes +} + +var TQueryOptions_OrcMaxMergeDistanceBytes_DEFAULT int64 = 1048576 + +func (p *TQueryOptions) GetOrcMaxMergeDistanceBytes() (v int64) { + if !p.IsSetOrcMaxMergeDistanceBytes() { + return TQueryOptions_OrcMaxMergeDistanceBytes_DEFAULT + } + return p.OrcMaxMergeDistanceBytes +} + +var TQueryOptions_IgnoreRuntimeFilterError_DEFAULT bool = false + +func (p *TQueryOptions) GetIgnoreRuntimeFilterError() (v bool) { + if !p.IsSetIgnoreRuntimeFilterError() { + return TQueryOptions_IgnoreRuntimeFilterError_DEFAULT + } + return p.IgnoreRuntimeFilterError +} + +var TQueryOptions_DisableFileCache_DEFAULT bool = false + +func (p *TQueryOptions) GetDisableFileCache() (v bool) { + if !p.IsSetDisableFileCache() { + return TQueryOptions_DisableFileCache_DEFAULT + } + return p.DisableFileCache +} +func (p *TQueryOptions) SetAbortOnError(val bool) { + p.AbortOnError = val +} +func (p *TQueryOptions) SetMaxErrors(val int32) { + p.MaxErrors = val +} +func (p *TQueryOptions) SetDisableCodegen(val bool) { + p.DisableCodegen = val +} +func (p *TQueryOptions) SetBatchSize(val int32) { + p.BatchSize = val +} +func (p *TQueryOptions) SetNumNodes(val int32) { + p.NumNodes = val +} +func (p *TQueryOptions) SetMaxScanRangeLength(val int64) { + p.MaxScanRangeLength = val +} +func (p *TQueryOptions) SetNumScannerThreads(val int32) { + p.NumScannerThreads = val +} +func (p *TQueryOptions) SetMaxIoBuffers(val int32) { + p.MaxIoBuffers = val +} +func (p *TQueryOptions) SetAllowUnsupportedFormats(val bool) { + p.AllowUnsupportedFormats = val +} +func (p *TQueryOptions) SetDefaultOrderByLimit(val int64) { + p.DefaultOrderByLimit = val +} +func (p *TQueryOptions) SetMemLimit(val int64) { + p.MemLimit = val +} +func (p *TQueryOptions) SetAbortOnDefaultLimitExceeded(val bool) { + p.AbortOnDefaultLimitExceeded = val +} +func (p *TQueryOptions) SetQueryTimeout(val int32) { + p.QueryTimeout = val +} +func (p *TQueryOptions) SetIsReportSuccess(val bool) { + p.IsReportSuccess = val +} +func (p *TQueryOptions) SetCodegenLevel(val int32) { + p.CodegenLevel = val +} +func (p *TQueryOptions) SetKuduLatestObservedTs(val int64) { + p.KuduLatestObservedTs = val +} +func (p *TQueryOptions) SetQueryType(val TQueryType) { + p.QueryType = val +} +func (p *TQueryOptions) SetMinReservation(val int64) { + p.MinReservation = val +} +func (p *TQueryOptions) SetMaxReservation(val int64) { + p.MaxReservation = val +} +func (p *TQueryOptions) SetInitialReservationTotalClaims(val int64) { + p.InitialReservationTotalClaims = val +} +func (p *TQueryOptions) SetBufferPoolLimit(val int64) { + p.BufferPoolLimit = val +} +func (p *TQueryOptions) SetDefaultSpillableBufferSize(val int64) { + p.DefaultSpillableBufferSize = val +} +func (p *TQueryOptions) SetMinSpillableBufferSize(val int64) { + p.MinSpillableBufferSize = val +} +func (p *TQueryOptions) SetMaxRowSize(val int64) { + p.MaxRowSize = val +} +func (p *TQueryOptions) SetDisableStreamPreaggregations(val bool) { + p.DisableStreamPreaggregations = val +} +func (p *TQueryOptions) SetMtDop(val int32) { + p.MtDop = val +} +func (p *TQueryOptions) SetLoadMemLimit(val int64) { + p.LoadMemLimit = val } func (p *TQueryOptions) SetMaxScanKeyNum(val *int32) { p.MaxScanKeyNum = val @@ -2701,156 +3446,380 @@ func (p *TQueryOptions) SetEnablePageCache(val bool) { func (p *TQueryOptions) SetAnalyzeTimeout(val int32) { p.AnalyzeTimeout = val } - -var fieldIDToName_TQueryOptions = map[int16]string{ - 1: "abort_on_error", - 2: "max_errors", - 3: "disable_codegen", - 4: "batch_size", - 5: "num_nodes", - 6: "max_scan_range_length", - 7: "num_scanner_threads", - 8: "max_io_buffers", - 9: "allow_unsupported_formats", - 10: "default_order_by_limit", - 12: "mem_limit", - 13: "abort_on_default_limit_exceeded", - 14: "query_timeout", - 15: "is_report_success", - 16: "codegen_level", - 17: "kudu_latest_observed_ts", - 18: "query_type", - 19: "min_reservation", - 20: "max_reservation", - 21: "initial_reservation_total_claims", - 22: "buffer_pool_limit", - 23: "default_spillable_buffer_size", - 24: "min_spillable_buffer_size", - 25: "max_row_size", - 26: "disable_stream_preaggregations", - 27: "mt_dop", - 28: "load_mem_limit", - 29: "max_scan_key_num", - 30: "max_pushdown_conditions_per_column", - 31: "enable_spilling", - 32: "enable_enable_exchange_node_parallel_merge", - 33: "runtime_filter_wait_time_ms", - 34: "runtime_filter_max_in_num", - 42: "resource_limit", - 43: "return_object_data_as_binary", - 44: "trim_tailing_spaces_for_external_table_query", - 45: "enable_function_pushdown", - 46: "fragment_transmission_compression_codec", - 48: "enable_local_exchange", - 49: "skip_storage_engine_merge", - 50: "skip_delete_predicate", - 51: "enable_new_shuffle_hash_method", - 52: "be_exec_version", - 53: "partitioned_hash_join_rows_threshold", - 54: "enable_share_hash_table_for_broadcast_join", - 55: "check_overflow_for_decimal", - 56: "skip_delete_bitmap", - 57: "enable_pipeline_engine", - 58: "repeat_max_num", - 59: "external_sort_bytes_threshold", - 60: "partitioned_hash_agg_rows_threshold", - 61: "enable_file_cache", - 62: "insert_timeout", - 63: "execution_timeout", - 64: "dry_run_query", - 65: "enable_common_expr_pushdown", - 66: "parallel_instance", - 67: "mysql_row_binary_format", - 68: "external_agg_bytes_threshold", - 69: "external_agg_partition_bits", - 70: "file_cache_base_path", - 71: "enable_parquet_lazy_mat", - 72: "enable_orc_lazy_mat", - 73: "scan_queue_mem_limit", - 74: "enable_scan_node_run_serial", - 75: "enable_insert_strict", - 76: "enable_inverted_index_query", - 77: "truncate_char_or_varchar_columns", - 78: "enable_hash_join_early_start_probe", - 79: "enable_pipeline_x_engine", - 80: "enable_memtable_on_sink_node", - 81: "enable_delete_sub_predicate_v2", - 82: "fe_process_uuid", - 83: "inverted_index_conjunction_opt_threshold", - 84: "enable_profile", - 85: "enable_page_cache", - 86: "analyze_timeout", +func (p *TQueryOptions) SetFasterFloatConvert(val bool) { + p.FasterFloatConvert = val } - -func (p *TQueryOptions) IsSetAbortOnError() bool { - return p.AbortOnError != TQueryOptions_AbortOnError_DEFAULT +func (p *TQueryOptions) SetEnableDecimal256(val bool) { + p.EnableDecimal256 = val } - -func (p *TQueryOptions) IsSetMaxErrors() bool { - return p.MaxErrors != TQueryOptions_MaxErrors_DEFAULT +func (p *TQueryOptions) SetEnableLocalShuffle(val bool) { + p.EnableLocalShuffle = val } - -func (p *TQueryOptions) IsSetDisableCodegen() bool { - return p.DisableCodegen != TQueryOptions_DisableCodegen_DEFAULT +func (p *TQueryOptions) SetSkipMissingVersion(val bool) { + p.SkipMissingVersion = val } - -func (p *TQueryOptions) IsSetBatchSize() bool { - return p.BatchSize != TQueryOptions_BatchSize_DEFAULT +func (p *TQueryOptions) SetRuntimeFilterWaitInfinitely(val bool) { + p.RuntimeFilterWaitInfinitely = val } - -func (p *TQueryOptions) IsSetNumNodes() bool { - return p.NumNodes != TQueryOptions_NumNodes_DEFAULT +func (p *TQueryOptions) SetWaitFullBlockScheduleTimes(val int32) { + p.WaitFullBlockScheduleTimes = val } - -func (p *TQueryOptions) IsSetMaxScanRangeLength() bool { - return p.MaxScanRangeLength != TQueryOptions_MaxScanRangeLength_DEFAULT +func (p *TQueryOptions) SetInvertedIndexMaxExpansions(val int32) { + p.InvertedIndexMaxExpansions = val } - -func (p *TQueryOptions) IsSetNumScannerThreads() bool { - return p.NumScannerThreads != TQueryOptions_NumScannerThreads_DEFAULT +func (p *TQueryOptions) SetInvertedIndexSkipThreshold(val int32) { + p.InvertedIndexSkipThreshold = val } - -func (p *TQueryOptions) IsSetMaxIoBuffers() bool { - return p.MaxIoBuffers != TQueryOptions_MaxIoBuffers_DEFAULT +func (p *TQueryOptions) SetEnableParallelScan(val bool) { + p.EnableParallelScan = val } - -func (p *TQueryOptions) IsSetAllowUnsupportedFormats() bool { - return p.AllowUnsupportedFormats != TQueryOptions_AllowUnsupportedFormats_DEFAULT +func (p *TQueryOptions) SetParallelScanMaxScannersCount(val int32) { + p.ParallelScanMaxScannersCount = val } - -func (p *TQueryOptions) IsSetDefaultOrderByLimit() bool { - return p.DefaultOrderByLimit != TQueryOptions_DefaultOrderByLimit_DEFAULT +func (p *TQueryOptions) SetParallelScanMinRowsPerScanner(val int64) { + p.ParallelScanMinRowsPerScanner = val } - -func (p *TQueryOptions) IsSetMemLimit() bool { - return p.MemLimit != TQueryOptions_MemLimit_DEFAULT +func (p *TQueryOptions) SetSkipBadTablet(val bool) { + p.SkipBadTablet = val } - -func (p *TQueryOptions) IsSetAbortOnDefaultLimitExceeded() bool { - return p.AbortOnDefaultLimitExceeded != TQueryOptions_AbortOnDefaultLimitExceeded_DEFAULT +func (p *TQueryOptions) SetScannerScaleUpRatio(val float64) { + p.ScannerScaleUpRatio = val } - -func (p *TQueryOptions) IsSetQueryTimeout() bool { - return p.QueryTimeout != TQueryOptions_QueryTimeout_DEFAULT +func (p *TQueryOptions) SetEnableDistinctStreamingAggregation(val bool) { + p.EnableDistinctStreamingAggregation = val } - -func (p *TQueryOptions) IsSetIsReportSuccess() bool { - return p.IsReportSuccess != TQueryOptions_IsReportSuccess_DEFAULT +func (p *TQueryOptions) SetEnableJoinSpill(val bool) { + p.EnableJoinSpill = val } - -func (p *TQueryOptions) IsSetCodegenLevel() bool { - return p.CodegenLevel != TQueryOptions_CodegenLevel_DEFAULT +func (p *TQueryOptions) SetEnableSortSpill(val bool) { + p.EnableSortSpill = val } - -func (p *TQueryOptions) IsSetKuduLatestObservedTs() bool { - return p.KuduLatestObservedTs != TQueryOptions_KuduLatestObservedTs_DEFAULT +func (p *TQueryOptions) SetEnableAggSpill(val bool) { + p.EnableAggSpill = val } - -func (p *TQueryOptions) IsSetQueryType() bool { - return p.QueryType != TQueryOptions_QueryType_DEFAULT +func (p *TQueryOptions) SetMinRevocableMem(val int64) { + p.MinRevocableMem = val } - -func (p *TQueryOptions) IsSetMinReservation() bool { +func (p *TQueryOptions) SetSpillStreamingAggMemLimit(val int64) { + p.SpillStreamingAggMemLimit = val +} +func (p *TQueryOptions) SetDataQueueMaxBlocks(val int64) { + p.DataQueueMaxBlocks = val +} +func (p *TQueryOptions) SetEnableCommonExprPushdownForInvertedIndex(val bool) { + p.EnableCommonExprPushdownForInvertedIndex = val +} +func (p *TQueryOptions) SetLocalExchangeFreeBlocksLimit(val *int64) { + p.LocalExchangeFreeBlocksLimit = val +} +func (p *TQueryOptions) SetEnableForceSpill(val bool) { + p.EnableForceSpill = val +} +func (p *TQueryOptions) SetEnableParquetFilterByMinMax(val bool) { + p.EnableParquetFilterByMinMax = val +} +func (p *TQueryOptions) SetEnableOrcFilterByMinMax(val bool) { + p.EnableOrcFilterByMinMax = val +} +func (p *TQueryOptions) SetMaxColumnReaderNum(val int32) { + p.MaxColumnReaderNum = val +} +func (p *TQueryOptions) SetEnableLocalMergeSort(val bool) { + p.EnableLocalMergeSort = val +} +func (p *TQueryOptions) SetEnableParallelResultSink(val bool) { + p.EnableParallelResultSink = val +} +func (p *TQueryOptions) SetEnableShortCircuitQueryAccessColumnStore(val bool) { + p.EnableShortCircuitQueryAccessColumnStore = val +} +func (p *TQueryOptions) SetEnableNoNeedReadDataOpt(val bool) { + p.EnableNoNeedReadDataOpt = val +} +func (p *TQueryOptions) SetReadCsvEmptyLineAsNull(val bool) { + p.ReadCsvEmptyLineAsNull = val +} +func (p *TQueryOptions) SetSerdeDialect(val TSerdeDialect) { + p.SerdeDialect = val +} +func (p *TQueryOptions) SetEnableMatchWithoutInvertedIndex(val bool) { + p.EnableMatchWithoutInvertedIndex = val +} +func (p *TQueryOptions) SetEnableFallbackOnMissingInvertedIndex(val bool) { + p.EnableFallbackOnMissingInvertedIndex = val +} +func (p *TQueryOptions) SetKeepCarriageReturn(val bool) { + p.KeepCarriageReturn = val +} +func (p *TQueryOptions) SetRuntimeBloomFilterMinSize(val int32) { + p.RuntimeBloomFilterMinSize = val +} +func (p *TQueryOptions) SetHiveParquetUseColumnNames(val bool) { + p.HiveParquetUseColumnNames = val +} +func (p *TQueryOptions) SetHiveOrcUseColumnNames(val bool) { + p.HiveOrcUseColumnNames = val +} +func (p *TQueryOptions) SetEnableSegmentCache(val bool) { + p.EnableSegmentCache = val +} +func (p *TQueryOptions) SetRuntimeBloomFilterMaxSize(val int32) { + p.RuntimeBloomFilterMaxSize = val +} +func (p *TQueryOptions) SetInListValueCountThreshold(val int32) { + p.InListValueCountThreshold = val +} +func (p *TQueryOptions) SetEnableVerboseProfile(val bool) { + p.EnableVerboseProfile = val +} +func (p *TQueryOptions) SetRpcVerboseProfileMaxInstanceCount(val int32) { + p.RpcVerboseProfileMaxInstanceCount = val +} +func (p *TQueryOptions) SetEnableAdaptivePipelineTaskSerialReadOnLimit(val bool) { + p.EnableAdaptivePipelineTaskSerialReadOnLimit = val +} +func (p *TQueryOptions) SetAdaptivePipelineTaskSerialReadOnLimit(val int32) { + p.AdaptivePipelineTaskSerialReadOnLimit = val +} +func (p *TQueryOptions) SetParallelPrepareThreshold(val int32) { + p.ParallelPrepareThreshold = val +} +func (p *TQueryOptions) SetPartitionTopnMaxPartitions(val int32) { + p.PartitionTopnMaxPartitions = val +} +func (p *TQueryOptions) SetPartitionTopnPrePartitionRows(val int32) { + p.PartitionTopnPrePartitionRows = val +} +func (p *TQueryOptions) SetEnableParallelOutfile(val bool) { + p.EnableParallelOutfile = val +} +func (p *TQueryOptions) SetEnablePhraseQuerySequentialOpt(val bool) { + p.EnablePhraseQuerySequentialOpt = val +} +func (p *TQueryOptions) SetEnableAutoCreateWhenOverwrite(val bool) { + p.EnableAutoCreateWhenOverwrite = val +} +func (p *TQueryOptions) SetOrcTinyStripeThresholdBytes(val int64) { + p.OrcTinyStripeThresholdBytes = val +} +func (p *TQueryOptions) SetOrcOnceMaxReadBytes(val int64) { + p.OrcOnceMaxReadBytes = val +} +func (p *TQueryOptions) SetOrcMaxMergeDistanceBytes(val int64) { + p.OrcMaxMergeDistanceBytes = val +} +func (p *TQueryOptions) SetIgnoreRuntimeFilterError(val bool) { + p.IgnoreRuntimeFilterError = val +} +func (p *TQueryOptions) SetDisableFileCache(val bool) { + p.DisableFileCache = val +} + +var fieldIDToName_TQueryOptions = map[int16]string{ + 1: "abort_on_error", + 2: "max_errors", + 3: "disable_codegen", + 4: "batch_size", + 5: "num_nodes", + 6: "max_scan_range_length", + 7: "num_scanner_threads", + 8: "max_io_buffers", + 9: "allow_unsupported_formats", + 10: "default_order_by_limit", + 12: "mem_limit", + 13: "abort_on_default_limit_exceeded", + 14: "query_timeout", + 15: "is_report_success", + 16: "codegen_level", + 17: "kudu_latest_observed_ts", + 18: "query_type", + 19: "min_reservation", + 20: "max_reservation", + 21: "initial_reservation_total_claims", + 22: "buffer_pool_limit", + 23: "default_spillable_buffer_size", + 24: "min_spillable_buffer_size", + 25: "max_row_size", + 26: "disable_stream_preaggregations", + 27: "mt_dop", + 28: "load_mem_limit", + 29: "max_scan_key_num", + 30: "max_pushdown_conditions_per_column", + 31: "enable_spilling", + 32: "enable_enable_exchange_node_parallel_merge", + 33: "runtime_filter_wait_time_ms", + 34: "runtime_filter_max_in_num", + 42: "resource_limit", + 43: "return_object_data_as_binary", + 44: "trim_tailing_spaces_for_external_table_query", + 45: "enable_function_pushdown", + 46: "fragment_transmission_compression_codec", + 48: "enable_local_exchange", + 49: "skip_storage_engine_merge", + 50: "skip_delete_predicate", + 51: "enable_new_shuffle_hash_method", + 52: "be_exec_version", + 53: "partitioned_hash_join_rows_threshold", + 54: "enable_share_hash_table_for_broadcast_join", + 55: "check_overflow_for_decimal", + 56: "skip_delete_bitmap", + 57: "enable_pipeline_engine", + 58: "repeat_max_num", + 59: "external_sort_bytes_threshold", + 60: "partitioned_hash_agg_rows_threshold", + 61: "enable_file_cache", + 62: "insert_timeout", + 63: "execution_timeout", + 64: "dry_run_query", + 65: "enable_common_expr_pushdown", + 66: "parallel_instance", + 67: "mysql_row_binary_format", + 68: "external_agg_bytes_threshold", + 69: "external_agg_partition_bits", + 70: "file_cache_base_path", + 71: "enable_parquet_lazy_mat", + 72: "enable_orc_lazy_mat", + 73: "scan_queue_mem_limit", + 74: "enable_scan_node_run_serial", + 75: "enable_insert_strict", + 76: "enable_inverted_index_query", + 77: "truncate_char_or_varchar_columns", + 78: "enable_hash_join_early_start_probe", + 79: "enable_pipeline_x_engine", + 80: "enable_memtable_on_sink_node", + 81: "enable_delete_sub_predicate_v2", + 82: "fe_process_uuid", + 83: "inverted_index_conjunction_opt_threshold", + 84: "enable_profile", + 85: "enable_page_cache", + 86: "analyze_timeout", + 87: "faster_float_convert", + 88: "enable_decimal256", + 89: "enable_local_shuffle", + 90: "skip_missing_version", + 91: "runtime_filter_wait_infinitely", + 92: "wait_full_block_schedule_times", + 93: "inverted_index_max_expansions", + 94: "inverted_index_skip_threshold", + 95: "enable_parallel_scan", + 96: "parallel_scan_max_scanners_count", + 97: "parallel_scan_min_rows_per_scanner", + 98: "skip_bad_tablet", + 99: "scanner_scale_up_ratio", + 100: "enable_distinct_streaming_aggregation", + 101: "enable_join_spill", + 102: "enable_sort_spill", + 103: "enable_agg_spill", + 104: "min_revocable_mem", + 105: "spill_streaming_agg_mem_limit", + 106: "data_queue_max_blocks", + 107: "enable_common_expr_pushdown_for_inverted_index", + 108: "local_exchange_free_blocks_limit", + 109: "enable_force_spill", + 110: "enable_parquet_filter_by_min_max", + 111: "enable_orc_filter_by_min_max", + 112: "max_column_reader_num", + 113: "enable_local_merge_sort", + 114: "enable_parallel_result_sink", + 115: "enable_short_circuit_query_access_column_store", + 116: "enable_no_need_read_data_opt", + 117: "read_csv_empty_line_as_null", + 118: "serde_dialect", + 119: "enable_match_without_inverted_index", + 120: "enable_fallback_on_missing_inverted_index", + 121: "keep_carriage_return", + 122: "runtime_bloom_filter_min_size", + 123: "hive_parquet_use_column_names", + 124: "hive_orc_use_column_names", + 125: "enable_segment_cache", + 126: "runtime_bloom_filter_max_size", + 127: "in_list_value_count_threshold", + 128: "enable_verbose_profile", + 129: "rpc_verbose_profile_max_instance_count", + 130: "enable_adaptive_pipeline_task_serial_read_on_limit", + 131: "adaptive_pipeline_task_serial_read_on_limit", + 132: "parallel_prepare_threshold", + 133: "partition_topn_max_partitions", + 134: "partition_topn_pre_partition_rows", + 135: "enable_parallel_outfile", + 136: "enable_phrase_query_sequential_opt", + 137: "enable_auto_create_when_overwrite", + 138: "orc_tiny_stripe_threshold_bytes", + 139: "orc_once_max_read_bytes", + 140: "orc_max_merge_distance_bytes", + 141: "ignore_runtime_filter_error", + 1000: "disable_file_cache", +} + +func (p *TQueryOptions) IsSetAbortOnError() bool { + return p.AbortOnError != TQueryOptions_AbortOnError_DEFAULT +} + +func (p *TQueryOptions) IsSetMaxErrors() bool { + return p.MaxErrors != TQueryOptions_MaxErrors_DEFAULT +} + +func (p *TQueryOptions) IsSetDisableCodegen() bool { + return p.DisableCodegen != TQueryOptions_DisableCodegen_DEFAULT +} + +func (p *TQueryOptions) IsSetBatchSize() bool { + return p.BatchSize != TQueryOptions_BatchSize_DEFAULT +} + +func (p *TQueryOptions) IsSetNumNodes() bool { + return p.NumNodes != TQueryOptions_NumNodes_DEFAULT +} + +func (p *TQueryOptions) IsSetMaxScanRangeLength() bool { + return p.MaxScanRangeLength != TQueryOptions_MaxScanRangeLength_DEFAULT +} + +func (p *TQueryOptions) IsSetNumScannerThreads() bool { + return p.NumScannerThreads != TQueryOptions_NumScannerThreads_DEFAULT +} + +func (p *TQueryOptions) IsSetMaxIoBuffers() bool { + return p.MaxIoBuffers != TQueryOptions_MaxIoBuffers_DEFAULT +} + +func (p *TQueryOptions) IsSetAllowUnsupportedFormats() bool { + return p.AllowUnsupportedFormats != TQueryOptions_AllowUnsupportedFormats_DEFAULT +} + +func (p *TQueryOptions) IsSetDefaultOrderByLimit() bool { + return p.DefaultOrderByLimit != TQueryOptions_DefaultOrderByLimit_DEFAULT +} + +func (p *TQueryOptions) IsSetMemLimit() bool { + return p.MemLimit != TQueryOptions_MemLimit_DEFAULT +} + +func (p *TQueryOptions) IsSetAbortOnDefaultLimitExceeded() bool { + return p.AbortOnDefaultLimitExceeded != TQueryOptions_AbortOnDefaultLimitExceeded_DEFAULT +} + +func (p *TQueryOptions) IsSetQueryTimeout() bool { + return p.QueryTimeout != TQueryOptions_QueryTimeout_DEFAULT +} + +func (p *TQueryOptions) IsSetIsReportSuccess() bool { + return p.IsReportSuccess != TQueryOptions_IsReportSuccess_DEFAULT +} + +func (p *TQueryOptions) IsSetCodegenLevel() bool { + return p.CodegenLevel != TQueryOptions_CodegenLevel_DEFAULT +} + +func (p *TQueryOptions) IsSetKuduLatestObservedTs() bool { + return p.KuduLatestObservedTs != TQueryOptions_KuduLatestObservedTs_DEFAULT +} + +func (p *TQueryOptions) IsSetQueryType() bool { + return p.QueryType != TQueryOptions_QueryType_DEFAULT +} + +func (p *TQueryOptions) IsSetMinReservation() bool { return p.MinReservation != TQueryOptions_MinReservation_DEFAULT } @@ -3090,6 +4059,230 @@ func (p *TQueryOptions) IsSetAnalyzeTimeout() bool { return p.AnalyzeTimeout != TQueryOptions_AnalyzeTimeout_DEFAULT } +func (p *TQueryOptions) IsSetFasterFloatConvert() bool { + return p.FasterFloatConvert != TQueryOptions_FasterFloatConvert_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableDecimal256() bool { + return p.EnableDecimal256 != TQueryOptions_EnableDecimal256_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableLocalShuffle() bool { + return p.EnableLocalShuffle != TQueryOptions_EnableLocalShuffle_DEFAULT +} + +func (p *TQueryOptions) IsSetSkipMissingVersion() bool { + return p.SkipMissingVersion != TQueryOptions_SkipMissingVersion_DEFAULT +} + +func (p *TQueryOptions) IsSetRuntimeFilterWaitInfinitely() bool { + return p.RuntimeFilterWaitInfinitely != TQueryOptions_RuntimeFilterWaitInfinitely_DEFAULT +} + +func (p *TQueryOptions) IsSetWaitFullBlockScheduleTimes() bool { + return p.WaitFullBlockScheduleTimes != TQueryOptions_WaitFullBlockScheduleTimes_DEFAULT +} + +func (p *TQueryOptions) IsSetInvertedIndexMaxExpansions() bool { + return p.InvertedIndexMaxExpansions != TQueryOptions_InvertedIndexMaxExpansions_DEFAULT +} + +func (p *TQueryOptions) IsSetInvertedIndexSkipThreshold() bool { + return p.InvertedIndexSkipThreshold != TQueryOptions_InvertedIndexSkipThreshold_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableParallelScan() bool { + return p.EnableParallelScan != TQueryOptions_EnableParallelScan_DEFAULT +} + +func (p *TQueryOptions) IsSetParallelScanMaxScannersCount() bool { + return p.ParallelScanMaxScannersCount != TQueryOptions_ParallelScanMaxScannersCount_DEFAULT +} + +func (p *TQueryOptions) IsSetParallelScanMinRowsPerScanner() bool { + return p.ParallelScanMinRowsPerScanner != TQueryOptions_ParallelScanMinRowsPerScanner_DEFAULT +} + +func (p *TQueryOptions) IsSetSkipBadTablet() bool { + return p.SkipBadTablet != TQueryOptions_SkipBadTablet_DEFAULT +} + +func (p *TQueryOptions) IsSetScannerScaleUpRatio() bool { + return p.ScannerScaleUpRatio != TQueryOptions_ScannerScaleUpRatio_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableDistinctStreamingAggregation() bool { + return p.EnableDistinctStreamingAggregation != TQueryOptions_EnableDistinctStreamingAggregation_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableJoinSpill() bool { + return p.EnableJoinSpill != TQueryOptions_EnableJoinSpill_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableSortSpill() bool { + return p.EnableSortSpill != TQueryOptions_EnableSortSpill_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableAggSpill() bool { + return p.EnableAggSpill != TQueryOptions_EnableAggSpill_DEFAULT +} + +func (p *TQueryOptions) IsSetMinRevocableMem() bool { + return p.MinRevocableMem != TQueryOptions_MinRevocableMem_DEFAULT +} + +func (p *TQueryOptions) IsSetSpillStreamingAggMemLimit() bool { + return p.SpillStreamingAggMemLimit != TQueryOptions_SpillStreamingAggMemLimit_DEFAULT +} + +func (p *TQueryOptions) IsSetDataQueueMaxBlocks() bool { + return p.DataQueueMaxBlocks != TQueryOptions_DataQueueMaxBlocks_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableCommonExprPushdownForInvertedIndex() bool { + return p.EnableCommonExprPushdownForInvertedIndex != TQueryOptions_EnableCommonExprPushdownForInvertedIndex_DEFAULT +} + +func (p *TQueryOptions) IsSetLocalExchangeFreeBlocksLimit() bool { + return p.LocalExchangeFreeBlocksLimit != nil +} + +func (p *TQueryOptions) IsSetEnableForceSpill() bool { + return p.EnableForceSpill != TQueryOptions_EnableForceSpill_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableParquetFilterByMinMax() bool { + return p.EnableParquetFilterByMinMax != TQueryOptions_EnableParquetFilterByMinMax_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableOrcFilterByMinMax() bool { + return p.EnableOrcFilterByMinMax != TQueryOptions_EnableOrcFilterByMinMax_DEFAULT +} + +func (p *TQueryOptions) IsSetMaxColumnReaderNum() bool { + return p.MaxColumnReaderNum != TQueryOptions_MaxColumnReaderNum_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableLocalMergeSort() bool { + return p.EnableLocalMergeSort != TQueryOptions_EnableLocalMergeSort_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableParallelResultSink() bool { + return p.EnableParallelResultSink != TQueryOptions_EnableParallelResultSink_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableShortCircuitQueryAccessColumnStore() bool { + return p.EnableShortCircuitQueryAccessColumnStore != TQueryOptions_EnableShortCircuitQueryAccessColumnStore_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableNoNeedReadDataOpt() bool { + return p.EnableNoNeedReadDataOpt != TQueryOptions_EnableNoNeedReadDataOpt_DEFAULT +} + +func (p *TQueryOptions) IsSetReadCsvEmptyLineAsNull() bool { + return p.ReadCsvEmptyLineAsNull != TQueryOptions_ReadCsvEmptyLineAsNull_DEFAULT +} + +func (p *TQueryOptions) IsSetSerdeDialect() bool { + return p.SerdeDialect != TQueryOptions_SerdeDialect_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableMatchWithoutInvertedIndex() bool { + return p.EnableMatchWithoutInvertedIndex != TQueryOptions_EnableMatchWithoutInvertedIndex_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableFallbackOnMissingInvertedIndex() bool { + return p.EnableFallbackOnMissingInvertedIndex != TQueryOptions_EnableFallbackOnMissingInvertedIndex_DEFAULT +} + +func (p *TQueryOptions) IsSetKeepCarriageReturn() bool { + return p.KeepCarriageReturn != TQueryOptions_KeepCarriageReturn_DEFAULT +} + +func (p *TQueryOptions) IsSetRuntimeBloomFilterMinSize() bool { + return p.RuntimeBloomFilterMinSize != TQueryOptions_RuntimeBloomFilterMinSize_DEFAULT +} + +func (p *TQueryOptions) IsSetHiveParquetUseColumnNames() bool { + return p.HiveParquetUseColumnNames != TQueryOptions_HiveParquetUseColumnNames_DEFAULT +} + +func (p *TQueryOptions) IsSetHiveOrcUseColumnNames() bool { + return p.HiveOrcUseColumnNames != TQueryOptions_HiveOrcUseColumnNames_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableSegmentCache() bool { + return p.EnableSegmentCache != TQueryOptions_EnableSegmentCache_DEFAULT +} + +func (p *TQueryOptions) IsSetRuntimeBloomFilterMaxSize() bool { + return p.RuntimeBloomFilterMaxSize != TQueryOptions_RuntimeBloomFilterMaxSize_DEFAULT +} + +func (p *TQueryOptions) IsSetInListValueCountThreshold() bool { + return p.InListValueCountThreshold != TQueryOptions_InListValueCountThreshold_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableVerboseProfile() bool { + return p.EnableVerboseProfile != TQueryOptions_EnableVerboseProfile_DEFAULT +} + +func (p *TQueryOptions) IsSetRpcVerboseProfileMaxInstanceCount() bool { + return p.RpcVerboseProfileMaxInstanceCount != TQueryOptions_RpcVerboseProfileMaxInstanceCount_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableAdaptivePipelineTaskSerialReadOnLimit() bool { + return p.EnableAdaptivePipelineTaskSerialReadOnLimit != TQueryOptions_EnableAdaptivePipelineTaskSerialReadOnLimit_DEFAULT +} + +func (p *TQueryOptions) IsSetAdaptivePipelineTaskSerialReadOnLimit() bool { + return p.AdaptivePipelineTaskSerialReadOnLimit != TQueryOptions_AdaptivePipelineTaskSerialReadOnLimit_DEFAULT +} + +func (p *TQueryOptions) IsSetParallelPrepareThreshold() bool { + return p.ParallelPrepareThreshold != TQueryOptions_ParallelPrepareThreshold_DEFAULT +} + +func (p *TQueryOptions) IsSetPartitionTopnMaxPartitions() bool { + return p.PartitionTopnMaxPartitions != TQueryOptions_PartitionTopnMaxPartitions_DEFAULT +} + +func (p *TQueryOptions) IsSetPartitionTopnPrePartitionRows() bool { + return p.PartitionTopnPrePartitionRows != TQueryOptions_PartitionTopnPrePartitionRows_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableParallelOutfile() bool { + return p.EnableParallelOutfile != TQueryOptions_EnableParallelOutfile_DEFAULT +} + +func (p *TQueryOptions) IsSetEnablePhraseQuerySequentialOpt() bool { + return p.EnablePhraseQuerySequentialOpt != TQueryOptions_EnablePhraseQuerySequentialOpt_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableAutoCreateWhenOverwrite() bool { + return p.EnableAutoCreateWhenOverwrite != TQueryOptions_EnableAutoCreateWhenOverwrite_DEFAULT +} + +func (p *TQueryOptions) IsSetOrcTinyStripeThresholdBytes() bool { + return p.OrcTinyStripeThresholdBytes != TQueryOptions_OrcTinyStripeThresholdBytes_DEFAULT +} + +func (p *TQueryOptions) IsSetOrcOnceMaxReadBytes() bool { + return p.OrcOnceMaxReadBytes != TQueryOptions_OrcOnceMaxReadBytes_DEFAULT +} + +func (p *TQueryOptions) IsSetOrcMaxMergeDistanceBytes() bool { + return p.OrcMaxMergeDistanceBytes != TQueryOptions_OrcMaxMergeDistanceBytes_DEFAULT +} + +func (p *TQueryOptions) IsSetIgnoreRuntimeFilterError() bool { + return p.IgnoreRuntimeFilterError != TQueryOptions_IgnoreRuntimeFilterError_DEFAULT +} + +func (p *TQueryOptions) IsSetDisableFileCache() bool { + return p.DisableFileCache != TQueryOptions_DisableFileCache_DEFAULT +} + func (p *TQueryOptions) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -3114,4182 +4307,8008 @@ func (p *TQueryOptions) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.BOOL { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I32 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.BOOL { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.I32 { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.I64 { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.I32 { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.I64 { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.I64 { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: if fieldTypeId == thrift.I64 { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 22: if fieldTypeId == thrift.I64 { if err = p.ReadField22(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 23: if fieldTypeId == thrift.I64 { if err = p.ReadField23(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 24: if fieldTypeId == thrift.I64 { if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 25: if fieldTypeId == thrift.I64 { if err = p.ReadField25(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 26: if fieldTypeId == thrift.BOOL { if err = p.ReadField26(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 27: if fieldTypeId == thrift.I32 { if err = p.ReadField27(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 28: if fieldTypeId == thrift.I64 { if err = p.ReadField28(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 29: if fieldTypeId == thrift.I32 { if err = p.ReadField29(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 30: if fieldTypeId == thrift.I32 { if err = p.ReadField30(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 31: if fieldTypeId == thrift.BOOL { if err = p.ReadField31(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 32: if fieldTypeId == thrift.BOOL { if err = p.ReadField32(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 33: if fieldTypeId == thrift.I32 { if err = p.ReadField33(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 34: if fieldTypeId == thrift.I32 { if err = p.ReadField34(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 42: if fieldTypeId == thrift.STRUCT { if err = p.ReadField42(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 43: if fieldTypeId == thrift.BOOL { if err = p.ReadField43(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 44: if fieldTypeId == thrift.BOOL { if err = p.ReadField44(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 45: if fieldTypeId == thrift.BOOL { if err = p.ReadField45(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 46: if fieldTypeId == thrift.STRING { if err = p.ReadField46(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 48: if fieldTypeId == thrift.BOOL { if err = p.ReadField48(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 49: if fieldTypeId == thrift.BOOL { if err = p.ReadField49(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 50: if fieldTypeId == thrift.BOOL { if err = p.ReadField50(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 51: if fieldTypeId == thrift.BOOL { if err = p.ReadField51(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 52: if fieldTypeId == thrift.I32 { if err = p.ReadField52(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 53: if fieldTypeId == thrift.I32 { if err = p.ReadField53(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 54: if fieldTypeId == thrift.BOOL { if err = p.ReadField54(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 55: if fieldTypeId == thrift.BOOL { if err = p.ReadField55(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 56: if fieldTypeId == thrift.BOOL { if err = p.ReadField56(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 57: if fieldTypeId == thrift.BOOL { if err = p.ReadField57(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 58: if fieldTypeId == thrift.I32 { if err = p.ReadField58(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 59: if fieldTypeId == thrift.I64 { if err = p.ReadField59(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 60: if fieldTypeId == thrift.I32 { if err = p.ReadField60(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 61: if fieldTypeId == thrift.BOOL { if err = p.ReadField61(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 62: if fieldTypeId == thrift.I32 { if err = p.ReadField62(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 63: if fieldTypeId == thrift.I32 { if err = p.ReadField63(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 64: if fieldTypeId == thrift.BOOL { if err = p.ReadField64(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 65: if fieldTypeId == thrift.BOOL { if err = p.ReadField65(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 66: if fieldTypeId == thrift.I32 { if err = p.ReadField66(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 67: if fieldTypeId == thrift.BOOL { if err = p.ReadField67(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 68: if fieldTypeId == thrift.I64 { if err = p.ReadField68(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 69: if fieldTypeId == thrift.I32 { if err = p.ReadField69(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 70: if fieldTypeId == thrift.STRING { if err = p.ReadField70(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 71: if fieldTypeId == thrift.BOOL { if err = p.ReadField71(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 72: if fieldTypeId == thrift.BOOL { if err = p.ReadField72(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 73: if fieldTypeId == thrift.I64 { if err = p.ReadField73(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 74: if fieldTypeId == thrift.BOOL { if err = p.ReadField74(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 75: if fieldTypeId == thrift.BOOL { if err = p.ReadField75(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 76: if fieldTypeId == thrift.BOOL { if err = p.ReadField76(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 77: if fieldTypeId == thrift.BOOL { if err = p.ReadField77(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 78: if fieldTypeId == thrift.BOOL { if err = p.ReadField78(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 79: if fieldTypeId == thrift.BOOL { if err = p.ReadField79(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 80: if fieldTypeId == thrift.BOOL { if err = p.ReadField80(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 81: if fieldTypeId == thrift.BOOL { if err = p.ReadField81(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 82: if fieldTypeId == thrift.I64 { if err = p.ReadField82(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 83: if fieldTypeId == thrift.I32 { if err = p.ReadField83(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 84: if fieldTypeId == thrift.BOOL { if err = p.ReadField84(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 85: if fieldTypeId == thrift.BOOL { if err = p.ReadField85(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 86: if fieldTypeId == thrift.I32 { if err = p.ReadField86(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 87: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField87(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - default: - if err = iprot.Skip(fieldTypeId); err != nil { + case 88: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField88(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryOptions[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TQueryOptions) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.AbortOnError = v - } - return nil -} - -func (p *TQueryOptions) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.MaxErrors = v - } - return nil -} - -func (p *TQueryOptions) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.DisableCodegen = v - } - return nil -} - + case 89: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField89(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 90: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField90(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 91: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField91(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 92: + if fieldTypeId == thrift.I32 { + if err = p.ReadField92(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 93: + if fieldTypeId == thrift.I32 { + if err = p.ReadField93(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 94: + if fieldTypeId == thrift.I32 { + if err = p.ReadField94(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 95: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField95(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 96: + if fieldTypeId == thrift.I32 { + if err = p.ReadField96(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 97: + if fieldTypeId == thrift.I64 { + if err = p.ReadField97(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 98: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField98(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 99: + if fieldTypeId == thrift.DOUBLE { + if err = p.ReadField99(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 100: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField100(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 101: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField101(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 102: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField102(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 103: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField103(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 104: + if fieldTypeId == thrift.I64 { + if err = p.ReadField104(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 105: + if fieldTypeId == thrift.I64 { + if err = p.ReadField105(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 106: + if fieldTypeId == thrift.I64 { + if err = p.ReadField106(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 107: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField107(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 108: + if fieldTypeId == thrift.I64 { + if err = p.ReadField108(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 109: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField109(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 110: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField110(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 111: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField111(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 112: + if fieldTypeId == thrift.I32 { + if err = p.ReadField112(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 113: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField113(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 114: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField114(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 115: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField115(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 116: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField116(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 117: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField117(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 118: + if fieldTypeId == thrift.I32 { + if err = p.ReadField118(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 119: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField119(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 120: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField120(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 121: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField121(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 122: + if fieldTypeId == thrift.I32 { + if err = p.ReadField122(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 123: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField123(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 124: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField124(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 125: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField125(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 126: + if fieldTypeId == thrift.I32 { + if err = p.ReadField126(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 127: + if fieldTypeId == thrift.I32 { + if err = p.ReadField127(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 128: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField128(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 129: + if fieldTypeId == thrift.I32 { + if err = p.ReadField129(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 130: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField130(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 131: + if fieldTypeId == thrift.I32 { + if err = p.ReadField131(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 132: + if fieldTypeId == thrift.I32 { + if err = p.ReadField132(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 133: + if fieldTypeId == thrift.I32 { + if err = p.ReadField133(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 134: + if fieldTypeId == thrift.I32 { + if err = p.ReadField134(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 135: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField135(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 136: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField136(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 137: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField137(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 138: + if fieldTypeId == thrift.I64 { + if err = p.ReadField138(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 139: + if fieldTypeId == thrift.I64 { + if err = p.ReadField139(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 140: + if fieldTypeId == thrift.I64 { + if err = p.ReadField140(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 141: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField141(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryOptions[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryOptions) ReadField1(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.AbortOnError = _field + return nil +} +func (p *TQueryOptions) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.MaxErrors = _field + return nil +} +func (p *TQueryOptions) ReadField3(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.DisableCodegen = _field + return nil +} func (p *TQueryOptions) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.BatchSize = _field + return nil +} +func (p *TQueryOptions) ReadField5(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.NumNodes = _field + return nil +} +func (p *TQueryOptions) ReadField6(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MaxScanRangeLength = _field + return nil +} +func (p *TQueryOptions) ReadField7(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.NumScannerThreads = _field + return nil +} +func (p *TQueryOptions) ReadField8(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.MaxIoBuffers = _field + return nil +} +func (p *TQueryOptions) ReadField9(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.AllowUnsupportedFormats = _field + return nil +} +func (p *TQueryOptions) ReadField10(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.DefaultOrderByLimit = _field + return nil +} +func (p *TQueryOptions) ReadField12(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MemLimit = _field + return nil +} +func (p *TQueryOptions) ReadField13(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.AbortOnDefaultLimitExceeded = _field + return nil +} +func (p *TQueryOptions) ReadField14(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.QueryTimeout = _field + return nil +} +func (p *TQueryOptions) ReadField15(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsReportSuccess = _field + return nil +} +func (p *TQueryOptions) ReadField16(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.CodegenLevel = _field + return nil +} +func (p *TQueryOptions) ReadField17(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.KuduLatestObservedTs = _field + return nil +} +func (p *TQueryOptions) ReadField18(iprot thrift.TProtocol) error { + + var _field TQueryType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TQueryType(v) + } + p.QueryType = _field + return nil +} +func (p *TQueryOptions) ReadField19(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MinReservation = _field + return nil +} +func (p *TQueryOptions) ReadField20(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MaxReservation = _field + return nil +} +func (p *TQueryOptions) ReadField21(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.InitialReservationTotalClaims = _field + return nil +} +func (p *TQueryOptions) ReadField22(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.BufferPoolLimit = _field + return nil +} +func (p *TQueryOptions) ReadField23(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.DefaultSpillableBufferSize = _field + return nil +} +func (p *TQueryOptions) ReadField24(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MinSpillableBufferSize = _field + return nil +} +func (p *TQueryOptions) ReadField25(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MaxRowSize = _field + return nil +} +func (p *TQueryOptions) ReadField26(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.DisableStreamPreaggregations = _field + return nil +} +func (p *TQueryOptions) ReadField27(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.MtDop = _field + return nil +} +func (p *TQueryOptions) ReadField28(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.LoadMemLimit = _field + return nil +} +func (p *TQueryOptions) ReadField29(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.MaxScanKeyNum = _field + return nil +} +func (p *TQueryOptions) ReadField30(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.MaxPushdownConditionsPerColumn = _field + return nil +} +func (p *TQueryOptions) ReadField31(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableSpilling = _field + return nil +} +func (p *TQueryOptions) ReadField32(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableEnableExchangeNodeParallelMerge = _field + return nil +} +func (p *TQueryOptions) ReadField33(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RuntimeFilterWaitTimeMs = _field + return nil +} +func (p *TQueryOptions) ReadField34(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RuntimeFilterMaxInNum = _field + return nil +} +func (p *TQueryOptions) ReadField42(iprot thrift.TProtocol) error { + _field := NewTResourceLimit() + if err := _field.Read(iprot); err != nil { + return err + } + p.ResourceLimit = _field + return nil +} +func (p *TQueryOptions) ReadField43(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.ReturnObjectDataAsBinary = _field + return nil +} +func (p *TQueryOptions) ReadField44(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.TrimTailingSpacesForExternalTableQuery = _field + return nil +} +func (p *TQueryOptions) ReadField45(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableFunctionPushdown = _field + return nil +} +func (p *TQueryOptions) ReadField46(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FragmentTransmissionCompressionCodec = _field + return nil +} +func (p *TQueryOptions) ReadField48(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableLocalExchange = _field + return nil +} +func (p *TQueryOptions) ReadField49(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.SkipStorageEngineMerge = _field + return nil +} +func (p *TQueryOptions) ReadField50(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.SkipDeletePredicate = _field + return nil +} +func (p *TQueryOptions) ReadField51(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableNewShuffleHashMethod = _field + return nil +} +func (p *TQueryOptions) ReadField52(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.BeExecVersion = _field + return nil +} +func (p *TQueryOptions) ReadField53(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.PartitionedHashJoinRowsThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField54(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.EnableShareHashTableForBroadcastJoin = _field + return nil +} +func (p *TQueryOptions) ReadField55(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.CheckOverflowForDecimal = _field + return nil +} +func (p *TQueryOptions) ReadField56(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.SkipDeleteBitmap = _field + return nil +} +func (p *TQueryOptions) ReadField57(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnablePipelineEngine = _field + return nil +} +func (p *TQueryOptions) ReadField58(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RepeatMaxNum = _field + return nil +} +func (p *TQueryOptions) ReadField59(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ExternalSortBytesThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField60(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.PartitionedHashAggRowsThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField61(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableFileCache = _field + return nil +} +func (p *TQueryOptions) ReadField62(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.InsertTimeout = _field + return nil +} +func (p *TQueryOptions) ReadField63(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ExecutionTimeout = _field + return nil +} +func (p *TQueryOptions) ReadField64(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.DryRunQuery = _field + return nil +} +func (p *TQueryOptions) ReadField65(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableCommonExprPushdown = _field + return nil +} +func (p *TQueryOptions) ReadField66(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ParallelInstance = _field + return nil +} +func (p *TQueryOptions) ReadField67(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.MysqlRowBinaryFormat = _field + return nil +} +func (p *TQueryOptions) ReadField68(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ExternalAggBytesThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField69(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ExternalAggPartitionBits = _field + return nil +} +func (p *TQueryOptions) ReadField70(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FileCacheBasePath = _field + return nil +} +func (p *TQueryOptions) ReadField71(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableParquetLazyMat = _field + return nil +} +func (p *TQueryOptions) ReadField72(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableOrcLazyMat = _field + return nil +} +func (p *TQueryOptions) ReadField73(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ScanQueueMemLimit = _field + return nil +} +func (p *TQueryOptions) ReadField74(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableScanNodeRunSerial = _field + return nil +} +func (p *TQueryOptions) ReadField75(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableInsertStrict = _field + return nil +} +func (p *TQueryOptions) ReadField76(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableInvertedIndexQuery = _field + return nil +} +func (p *TQueryOptions) ReadField77(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.TruncateCharOrVarcharColumns = _field + return nil +} +func (p *TQueryOptions) ReadField78(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableHashJoinEarlyStartProbe = _field + return nil +} +func (p *TQueryOptions) ReadField79(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnablePipelineXEngine = _field + return nil +} +func (p *TQueryOptions) ReadField80(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableMemtableOnSinkNode = _field + return nil +} +func (p *TQueryOptions) ReadField81(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableDeleteSubPredicateV2 = _field + return nil +} +func (p *TQueryOptions) ReadField82(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.FeProcessUuid = _field + return nil +} +func (p *TQueryOptions) ReadField83(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.InvertedIndexConjunctionOptThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField84(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableProfile = _field + return nil +} +func (p *TQueryOptions) ReadField85(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnablePageCache = _field + return nil +} +func (p *TQueryOptions) ReadField86(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.AnalyzeTimeout = _field + return nil +} +func (p *TQueryOptions) ReadField87(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.FasterFloatConvert = _field + return nil +} +func (p *TQueryOptions) ReadField88(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableDecimal256 = _field + return nil +} +func (p *TQueryOptions) ReadField89(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableLocalShuffle = _field + return nil +} +func (p *TQueryOptions) ReadField90(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.SkipMissingVersion = _field + return nil +} +func (p *TQueryOptions) ReadField91(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.RuntimeFilterWaitInfinitely = _field + return nil +} +func (p *TQueryOptions) ReadField92(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.WaitFullBlockScheduleTimes = _field + return nil +} +func (p *TQueryOptions) ReadField93(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.InvertedIndexMaxExpansions = _field + return nil +} +func (p *TQueryOptions) ReadField94(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.InvertedIndexSkipThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField95(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableParallelScan = _field + return nil +} +func (p *TQueryOptions) ReadField96(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ParallelScanMaxScannersCount = _field + return nil +} +func (p *TQueryOptions) ReadField97(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ParallelScanMinRowsPerScanner = _field + return nil +} +func (p *TQueryOptions) ReadField98(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.SkipBadTablet = _field + return nil +} +func (p *TQueryOptions) ReadField99(iprot thrift.TProtocol) error { + + var _field float64 + if v, err := iprot.ReadDouble(); err != nil { + return err + } else { + _field = v + } + p.ScannerScaleUpRatio = _field + return nil +} +func (p *TQueryOptions) ReadField100(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableDistinctStreamingAggregation = _field + return nil +} +func (p *TQueryOptions) ReadField101(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableJoinSpill = _field + return nil +} +func (p *TQueryOptions) ReadField102(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableSortSpill = _field + return nil +} +func (p *TQueryOptions) ReadField103(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableAggSpill = _field + return nil +} +func (p *TQueryOptions) ReadField104(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.MinRevocableMem = _field + return nil +} +func (p *TQueryOptions) ReadField105(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.SpillStreamingAggMemLimit = _field + return nil +} +func (p *TQueryOptions) ReadField106(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.DataQueueMaxBlocks = _field + return nil +} +func (p *TQueryOptions) ReadField107(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableCommonExprPushdownForInvertedIndex = _field + return nil +} +func (p *TQueryOptions) ReadField108(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.LocalExchangeFreeBlocksLimit = _field + return nil +} +func (p *TQueryOptions) ReadField109(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableForceSpill = _field + return nil +} +func (p *TQueryOptions) ReadField110(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableParquetFilterByMinMax = _field + return nil +} +func (p *TQueryOptions) ReadField111(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableOrcFilterByMinMax = _field + return nil +} +func (p *TQueryOptions) ReadField112(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.MaxColumnReaderNum = _field + return nil +} +func (p *TQueryOptions) ReadField113(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableLocalMergeSort = _field + return nil +} +func (p *TQueryOptions) ReadField114(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableParallelResultSink = _field + return nil +} +func (p *TQueryOptions) ReadField115(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableShortCircuitQueryAccessColumnStore = _field + return nil +} +func (p *TQueryOptions) ReadField116(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableNoNeedReadDataOpt = _field + return nil +} +func (p *TQueryOptions) ReadField117(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.ReadCsvEmptyLineAsNull = _field + return nil +} +func (p *TQueryOptions) ReadField118(iprot thrift.TProtocol) error { + + var _field TSerdeDialect + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TSerdeDialect(v) + } + p.SerdeDialect = _field + return nil +} +func (p *TQueryOptions) ReadField119(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableMatchWithoutInvertedIndex = _field + return nil +} +func (p *TQueryOptions) ReadField120(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableFallbackOnMissingInvertedIndex = _field + return nil +} +func (p *TQueryOptions) ReadField121(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.KeepCarriageReturn = _field + return nil +} +func (p *TQueryOptions) ReadField122(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RuntimeBloomFilterMinSize = _field + return nil +} +func (p *TQueryOptions) ReadField123(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.HiveParquetUseColumnNames = _field + return nil +} +func (p *TQueryOptions) ReadField124(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.HiveOrcUseColumnNames = _field + return nil +} +func (p *TQueryOptions) ReadField125(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableSegmentCache = _field + return nil +} +func (p *TQueryOptions) ReadField126(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RuntimeBloomFilterMaxSize = _field + return nil +} +func (p *TQueryOptions) ReadField127(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.InListValueCountThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField128(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableVerboseProfile = _field + return nil +} +func (p *TQueryOptions) ReadField129(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RpcVerboseProfileMaxInstanceCount = _field + return nil +} +func (p *TQueryOptions) ReadField130(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableAdaptivePipelineTaskSerialReadOnLimit = _field + return nil +} +func (p *TQueryOptions) ReadField131(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.AdaptivePipelineTaskSerialReadOnLimit = _field + return nil +} +func (p *TQueryOptions) ReadField132(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ParallelPrepareThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField133(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.PartitionTopnMaxPartitions = _field + return nil +} +func (p *TQueryOptions) ReadField134(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BatchSize = v + _field = v + } + p.PartitionTopnPrePartitionRows = _field + return nil +} +func (p *TQueryOptions) ReadField135(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableParallelOutfile = _field + return nil +} +func (p *TQueryOptions) ReadField136(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnablePhraseQuerySequentialOpt = _field + return nil +} +func (p *TQueryOptions) ReadField137(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableAutoCreateWhenOverwrite = _field + return nil +} +func (p *TQueryOptions) ReadField138(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.OrcTinyStripeThresholdBytes = _field + return nil +} +func (p *TQueryOptions) ReadField139(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.OrcOnceMaxReadBytes = _field + return nil +} +func (p *TQueryOptions) ReadField140(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.OrcMaxMergeDistanceBytes = _field + return nil +} +func (p *TQueryOptions) ReadField141(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IgnoreRuntimeFilterError = _field + return nil +} +func (p *TQueryOptions) ReadField1000(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.DisableFileCache = _field + return nil +} + +func (p *TQueryOptions) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TQueryOptions"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } + if err = p.writeField25(oprot); err != nil { + fieldId = 25 + goto WriteFieldError + } + if err = p.writeField26(oprot); err != nil { + fieldId = 26 + goto WriteFieldError + } + if err = p.writeField27(oprot); err != nil { + fieldId = 27 + goto WriteFieldError + } + if err = p.writeField28(oprot); err != nil { + fieldId = 28 + goto WriteFieldError + } + if err = p.writeField29(oprot); err != nil { + fieldId = 29 + goto WriteFieldError + } + if err = p.writeField30(oprot); err != nil { + fieldId = 30 + goto WriteFieldError + } + if err = p.writeField31(oprot); err != nil { + fieldId = 31 + goto WriteFieldError + } + if err = p.writeField32(oprot); err != nil { + fieldId = 32 + goto WriteFieldError + } + if err = p.writeField33(oprot); err != nil { + fieldId = 33 + goto WriteFieldError + } + if err = p.writeField34(oprot); err != nil { + fieldId = 34 + goto WriteFieldError + } + if err = p.writeField42(oprot); err != nil { + fieldId = 42 + goto WriteFieldError + } + if err = p.writeField43(oprot); err != nil { + fieldId = 43 + goto WriteFieldError + } + if err = p.writeField44(oprot); err != nil { + fieldId = 44 + goto WriteFieldError + } + if err = p.writeField45(oprot); err != nil { + fieldId = 45 + goto WriteFieldError + } + if err = p.writeField46(oprot); err != nil { + fieldId = 46 + goto WriteFieldError + } + if err = p.writeField48(oprot); err != nil { + fieldId = 48 + goto WriteFieldError + } + if err = p.writeField49(oprot); err != nil { + fieldId = 49 + goto WriteFieldError + } + if err = p.writeField50(oprot); err != nil { + fieldId = 50 + goto WriteFieldError + } + if err = p.writeField51(oprot); err != nil { + fieldId = 51 + goto WriteFieldError + } + if err = p.writeField52(oprot); err != nil { + fieldId = 52 + goto WriteFieldError + } + if err = p.writeField53(oprot); err != nil { + fieldId = 53 + goto WriteFieldError + } + if err = p.writeField54(oprot); err != nil { + fieldId = 54 + goto WriteFieldError + } + if err = p.writeField55(oprot); err != nil { + fieldId = 55 + goto WriteFieldError + } + if err = p.writeField56(oprot); err != nil { + fieldId = 56 + goto WriteFieldError + } + if err = p.writeField57(oprot); err != nil { + fieldId = 57 + goto WriteFieldError + } + if err = p.writeField58(oprot); err != nil { + fieldId = 58 + goto WriteFieldError + } + if err = p.writeField59(oprot); err != nil { + fieldId = 59 + goto WriteFieldError + } + if err = p.writeField60(oprot); err != nil { + fieldId = 60 + goto WriteFieldError + } + if err = p.writeField61(oprot); err != nil { + fieldId = 61 + goto WriteFieldError + } + if err = p.writeField62(oprot); err != nil { + fieldId = 62 + goto WriteFieldError + } + if err = p.writeField63(oprot); err != nil { + fieldId = 63 + goto WriteFieldError + } + if err = p.writeField64(oprot); err != nil { + fieldId = 64 + goto WriteFieldError + } + if err = p.writeField65(oprot); err != nil { + fieldId = 65 + goto WriteFieldError + } + if err = p.writeField66(oprot); err != nil { + fieldId = 66 + goto WriteFieldError + } + if err = p.writeField67(oprot); err != nil { + fieldId = 67 + goto WriteFieldError + } + if err = p.writeField68(oprot); err != nil { + fieldId = 68 + goto WriteFieldError + } + if err = p.writeField69(oprot); err != nil { + fieldId = 69 + goto WriteFieldError + } + if err = p.writeField70(oprot); err != nil { + fieldId = 70 + goto WriteFieldError + } + if err = p.writeField71(oprot); err != nil { + fieldId = 71 + goto WriteFieldError + } + if err = p.writeField72(oprot); err != nil { + fieldId = 72 + goto WriteFieldError + } + if err = p.writeField73(oprot); err != nil { + fieldId = 73 + goto WriteFieldError + } + if err = p.writeField74(oprot); err != nil { + fieldId = 74 + goto WriteFieldError + } + if err = p.writeField75(oprot); err != nil { + fieldId = 75 + goto WriteFieldError + } + if err = p.writeField76(oprot); err != nil { + fieldId = 76 + goto WriteFieldError + } + if err = p.writeField77(oprot); err != nil { + fieldId = 77 + goto WriteFieldError + } + if err = p.writeField78(oprot); err != nil { + fieldId = 78 + goto WriteFieldError + } + if err = p.writeField79(oprot); err != nil { + fieldId = 79 + goto WriteFieldError + } + if err = p.writeField80(oprot); err != nil { + fieldId = 80 + goto WriteFieldError + } + if err = p.writeField81(oprot); err != nil { + fieldId = 81 + goto WriteFieldError + } + if err = p.writeField82(oprot); err != nil { + fieldId = 82 + goto WriteFieldError + } + if err = p.writeField83(oprot); err != nil { + fieldId = 83 + goto WriteFieldError + } + if err = p.writeField84(oprot); err != nil { + fieldId = 84 + goto WriteFieldError + } + if err = p.writeField85(oprot); err != nil { + fieldId = 85 + goto WriteFieldError + } + if err = p.writeField86(oprot); err != nil { + fieldId = 86 + goto WriteFieldError + } + if err = p.writeField87(oprot); err != nil { + fieldId = 87 + goto WriteFieldError + } + if err = p.writeField88(oprot); err != nil { + fieldId = 88 + goto WriteFieldError + } + if err = p.writeField89(oprot); err != nil { + fieldId = 89 + goto WriteFieldError + } + if err = p.writeField90(oprot); err != nil { + fieldId = 90 + goto WriteFieldError + } + if err = p.writeField91(oprot); err != nil { + fieldId = 91 + goto WriteFieldError + } + if err = p.writeField92(oprot); err != nil { + fieldId = 92 + goto WriteFieldError + } + if err = p.writeField93(oprot); err != nil { + fieldId = 93 + goto WriteFieldError + } + if err = p.writeField94(oprot); err != nil { + fieldId = 94 + goto WriteFieldError + } + if err = p.writeField95(oprot); err != nil { + fieldId = 95 + goto WriteFieldError + } + if err = p.writeField96(oprot); err != nil { + fieldId = 96 + goto WriteFieldError + } + if err = p.writeField97(oprot); err != nil { + fieldId = 97 + goto WriteFieldError + } + if err = p.writeField98(oprot); err != nil { + fieldId = 98 + goto WriteFieldError + } + if err = p.writeField99(oprot); err != nil { + fieldId = 99 + goto WriteFieldError + } + if err = p.writeField100(oprot); err != nil { + fieldId = 100 + goto WriteFieldError + } + if err = p.writeField101(oprot); err != nil { + fieldId = 101 + goto WriteFieldError + } + if err = p.writeField102(oprot); err != nil { + fieldId = 102 + goto WriteFieldError + } + if err = p.writeField103(oprot); err != nil { + fieldId = 103 + goto WriteFieldError + } + if err = p.writeField104(oprot); err != nil { + fieldId = 104 + goto WriteFieldError + } + if err = p.writeField105(oprot); err != nil { + fieldId = 105 + goto WriteFieldError + } + if err = p.writeField106(oprot); err != nil { + fieldId = 106 + goto WriteFieldError + } + if err = p.writeField107(oprot); err != nil { + fieldId = 107 + goto WriteFieldError + } + if err = p.writeField108(oprot); err != nil { + fieldId = 108 + goto WriteFieldError + } + if err = p.writeField109(oprot); err != nil { + fieldId = 109 + goto WriteFieldError + } + if err = p.writeField110(oprot); err != nil { + fieldId = 110 + goto WriteFieldError + } + if err = p.writeField111(oprot); err != nil { + fieldId = 111 + goto WriteFieldError + } + if err = p.writeField112(oprot); err != nil { + fieldId = 112 + goto WriteFieldError + } + if err = p.writeField113(oprot); err != nil { + fieldId = 113 + goto WriteFieldError + } + if err = p.writeField114(oprot); err != nil { + fieldId = 114 + goto WriteFieldError + } + if err = p.writeField115(oprot); err != nil { + fieldId = 115 + goto WriteFieldError + } + if err = p.writeField116(oprot); err != nil { + fieldId = 116 + goto WriteFieldError + } + if err = p.writeField117(oprot); err != nil { + fieldId = 117 + goto WriteFieldError + } + if err = p.writeField118(oprot); err != nil { + fieldId = 118 + goto WriteFieldError + } + if err = p.writeField119(oprot); err != nil { + fieldId = 119 + goto WriteFieldError + } + if err = p.writeField120(oprot); err != nil { + fieldId = 120 + goto WriteFieldError + } + if err = p.writeField121(oprot); err != nil { + fieldId = 121 + goto WriteFieldError + } + if err = p.writeField122(oprot); err != nil { + fieldId = 122 + goto WriteFieldError + } + if err = p.writeField123(oprot); err != nil { + fieldId = 123 + goto WriteFieldError + } + if err = p.writeField124(oprot); err != nil { + fieldId = 124 + goto WriteFieldError + } + if err = p.writeField125(oprot); err != nil { + fieldId = 125 + goto WriteFieldError + } + if err = p.writeField126(oprot); err != nil { + fieldId = 126 + goto WriteFieldError + } + if err = p.writeField127(oprot); err != nil { + fieldId = 127 + goto WriteFieldError + } + if err = p.writeField128(oprot); err != nil { + fieldId = 128 + goto WriteFieldError + } + if err = p.writeField129(oprot); err != nil { + fieldId = 129 + goto WriteFieldError + } + if err = p.writeField130(oprot); err != nil { + fieldId = 130 + goto WriteFieldError + } + if err = p.writeField131(oprot); err != nil { + fieldId = 131 + goto WriteFieldError + } + if err = p.writeField132(oprot); err != nil { + fieldId = 132 + goto WriteFieldError + } + if err = p.writeField133(oprot); err != nil { + fieldId = 133 + goto WriteFieldError + } + if err = p.writeField134(oprot); err != nil { + fieldId = 134 + goto WriteFieldError + } + if err = p.writeField135(oprot); err != nil { + fieldId = 135 + goto WriteFieldError + } + if err = p.writeField136(oprot); err != nil { + fieldId = 136 + goto WriteFieldError + } + if err = p.writeField137(oprot); err != nil { + fieldId = 137 + goto WriteFieldError + } + if err = p.writeField138(oprot); err != nil { + fieldId = 138 + goto WriteFieldError + } + if err = p.writeField139(oprot); err != nil { + fieldId = 139 + goto WriteFieldError + } + if err = p.writeField140(oprot); err != nil { + fieldId = 140 + goto WriteFieldError + } + if err = p.writeField141(oprot); err != nil { + fieldId = 141 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TQueryOptions) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetAbortOnError() { + if err = oprot.WriteFieldBegin("abort_on_error", thrift.BOOL, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.AbortOnError); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TQueryOptions) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxErrors() { + if err = oprot.WriteFieldBegin("max_errors", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.MaxErrors); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TQueryOptions) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDisableCodegen() { + if err = oprot.WriteFieldBegin("disable_codegen", thrift.BOOL, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.DisableCodegen); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TQueryOptions) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBatchSize() { + if err = oprot.WriteFieldBegin("batch_size", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.BatchSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueryOptions) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetNumNodes() { + if err = oprot.WriteFieldBegin("num_nodes", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.NumNodes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TQueryOptions) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxScanRangeLength() { + if err = oprot.WriteFieldBegin("max_scan_range_length", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MaxScanRangeLength); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TQueryOptions) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetNumScannerThreads() { + if err = oprot.WriteFieldBegin("num_scanner_threads", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.NumScannerThreads); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TQueryOptions) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxIoBuffers() { + if err = oprot.WriteFieldBegin("max_io_buffers", thrift.I32, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.MaxIoBuffers); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TQueryOptions) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetAllowUnsupportedFormats() { + if err = oprot.WriteFieldBegin("allow_unsupported_formats", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.AllowUnsupportedFormats); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TQueryOptions) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultOrderByLimit() { + if err = oprot.WriteFieldBegin("default_order_by_limit", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.DefaultOrderByLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TQueryOptions) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetMemLimit() { + if err = oprot.WriteFieldBegin("mem_limit", thrift.I64, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MemLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TQueryOptions) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetAbortOnDefaultLimitExceeded() { + if err = oprot.WriteFieldBegin("abort_on_default_limit_exceeded", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.AbortOnDefaultLimitExceeded); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TQueryOptions) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryTimeout() { + if err = oprot.WriteFieldBegin("query_timeout", thrift.I32, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.QueryTimeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TQueryOptions) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetIsReportSuccess() { + if err = oprot.WriteFieldBegin("is_report_success", thrift.BOOL, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsReportSuccess); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TQueryOptions) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetCodegenLevel() { + if err = oprot.WriteFieldBegin("codegen_level", thrift.I32, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.CodegenLevel); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TQueryOptions) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetKuduLatestObservedTs() { + if err = oprot.WriteFieldBegin("kudu_latest_observed_ts", thrift.I64, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.KuduLatestObservedTs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TQueryOptions) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryType() { + if err = oprot.WriteFieldBegin("query_type", thrift.I32, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.QueryType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TQueryOptions) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetMinReservation() { + if err = oprot.WriteFieldBegin("min_reservation", thrift.I64, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MinReservation); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TQueryOptions) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxReservation() { + if err = oprot.WriteFieldBegin("max_reservation", thrift.I64, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MaxReservation); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + +func (p *TQueryOptions) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetInitialReservationTotalClaims() { + if err = oprot.WriteFieldBegin("initial_reservation_total_claims", thrift.I64, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.InitialReservationTotalClaims); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) +} + +func (p *TQueryOptions) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetBufferPoolLimit() { + if err = oprot.WriteFieldBegin("buffer_pool_limit", thrift.I64, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.BufferPoolLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +} + +func (p *TQueryOptions) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultSpillableBufferSize() { + if err = oprot.WriteFieldBegin("default_spillable_buffer_size", thrift.I64, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.DefaultSpillableBufferSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +} + +func (p *TQueryOptions) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetMinSpillableBufferSize() { + if err = oprot.WriteFieldBegin("min_spillable_buffer_size", thrift.I64, 24); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MinSpillableBufferSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) } -func (p *TQueryOptions) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.NumNodes = v +func (p *TQueryOptions) writeField25(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxRowSize() { + if err = oprot.WriteFieldBegin("max_row_size", thrift.I64, 25); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MaxRowSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) +} + +func (p *TQueryOptions) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetDisableStreamPreaggregations() { + if err = oprot.WriteFieldBegin("disable_stream_preaggregations", thrift.BOOL, 26); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.DisableStreamPreaggregations); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) +} + +func (p *TQueryOptions) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetMtDop() { + if err = oprot.WriteFieldBegin("mt_dop", thrift.I32, 27); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.MtDop); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) +} + +func (p *TQueryOptions) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadMemLimit() { + if err = oprot.WriteFieldBegin("load_mem_limit", thrift.I64, 28); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.LoadMemLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) +} + +func (p *TQueryOptions) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxScanKeyNum() { + if err = oprot.WriteFieldBegin("max_scan_key_num", thrift.I32, 29); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.MaxScanKeyNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) +} + +func (p *TQueryOptions) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxPushdownConditionsPerColumn() { + if err = oprot.WriteFieldBegin("max_pushdown_conditions_per_column", thrift.I32, 30); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.MaxPushdownConditionsPerColumn); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) +} + +func (p *TQueryOptions) writeField31(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableSpilling() { + if err = oprot.WriteFieldBegin("enable_spilling", thrift.BOOL, 31); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableSpilling); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) +} + +func (p *TQueryOptions) writeField32(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableEnableExchangeNodeParallelMerge() { + if err = oprot.WriteFieldBegin("enable_enable_exchange_node_parallel_merge", thrift.BOOL, 32); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableEnableExchangeNodeParallelMerge); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 end error: ", p), err) +} + +func (p *TQueryOptions) writeField33(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterWaitTimeMs() { + if err = oprot.WriteFieldBegin("runtime_filter_wait_time_ms", thrift.I32, 33); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.RuntimeFilterWaitTimeMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 33 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) +} + +func (p *TQueryOptions) writeField34(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterMaxInNum() { + if err = oprot.WriteFieldBegin("runtime_filter_max_in_num", thrift.I32, 34); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.RuntimeFilterMaxInNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) +} + +func (p *TQueryOptions) writeField42(oprot thrift.TProtocol) (err error) { + if p.IsSetResourceLimit() { + if err = oprot.WriteFieldBegin("resource_limit", thrift.STRUCT, 42); err != nil { + goto WriteFieldBeginError + } + if err := p.ResourceLimit.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 42 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 42 end error: ", p), err) +} + +func (p *TQueryOptions) writeField43(oprot thrift.TProtocol) (err error) { + if p.IsSetReturnObjectDataAsBinary() { + if err = oprot.WriteFieldBegin("return_object_data_as_binary", thrift.BOOL, 43); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.ReturnObjectDataAsBinary); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 43 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 43 end error: ", p), err) +} + +func (p *TQueryOptions) writeField44(oprot thrift.TProtocol) (err error) { + if p.IsSetTrimTailingSpacesForExternalTableQuery() { + if err = oprot.WriteFieldBegin("trim_tailing_spaces_for_external_table_query", thrift.BOOL, 44); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.TrimTailingSpacesForExternalTableQuery); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 end error: ", p), err) } -func (p *TQueryOptions) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MaxScanRangeLength = v +func (p *TQueryOptions) writeField45(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableFunctionPushdown() { + if err = oprot.WriteFieldBegin("enable_function_pushdown", thrift.BOOL, 45); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableFunctionPushdown); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 45 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 45 end error: ", p), err) } -func (p *TQueryOptions) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.NumScannerThreads = v +func (p *TQueryOptions) writeField46(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentTransmissionCompressionCodec() { + if err = oprot.WriteFieldBegin("fragment_transmission_compression_codec", thrift.STRING, 46); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FragmentTransmissionCompressionCodec); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 46 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 46 end error: ", p), err) } -func (p *TQueryOptions) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.MaxIoBuffers = v +func (p *TQueryOptions) writeField48(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableLocalExchange() { + if err = oprot.WriteFieldBegin("enable_local_exchange", thrift.BOOL, 48); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableLocalExchange); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 48 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 48 end error: ", p), err) } -func (p *TQueryOptions) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.AllowUnsupportedFormats = v +func (p *TQueryOptions) writeField49(oprot thrift.TProtocol) (err error) { + if p.IsSetSkipStorageEngineMerge() { + if err = oprot.WriteFieldBegin("skip_storage_engine_merge", thrift.BOOL, 49); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.SkipStorageEngineMerge); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 49 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 49 end error: ", p), err) } -func (p *TQueryOptions) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DefaultOrderByLimit = v +func (p *TQueryOptions) writeField50(oprot thrift.TProtocol) (err error) { + if p.IsSetSkipDeletePredicate() { + if err = oprot.WriteFieldBegin("skip_delete_predicate", thrift.BOOL, 50); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.SkipDeletePredicate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 50 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 50 end error: ", p), err) } -func (p *TQueryOptions) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MemLimit = v +func (p *TQueryOptions) writeField51(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableNewShuffleHashMethod() { + if err = oprot.WriteFieldBegin("enable_new_shuffle_hash_method", thrift.BOOL, 51); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableNewShuffleHashMethod); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 51 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 51 end error: ", p), err) } -func (p *TQueryOptions) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.AbortOnDefaultLimitExceeded = v +func (p *TQueryOptions) writeField52(oprot thrift.TProtocol) (err error) { + if p.IsSetBeExecVersion() { + if err = oprot.WriteFieldBegin("be_exec_version", thrift.I32, 52); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.BeExecVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 52 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 52 end error: ", p), err) } -func (p *TQueryOptions) ReadField14(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.QueryTimeout = v +func (p *TQueryOptions) writeField53(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionedHashJoinRowsThreshold() { + if err = oprot.WriteFieldBegin("partitioned_hash_join_rows_threshold", thrift.I32, 53); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.PartitionedHashJoinRowsThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 53 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 53 end error: ", p), err) } -func (p *TQueryOptions) ReadField15(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.IsReportSuccess = v +func (p *TQueryOptions) writeField54(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableShareHashTableForBroadcastJoin() { + if err = oprot.WriteFieldBegin("enable_share_hash_table_for_broadcast_join", thrift.BOOL, 54); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.EnableShareHashTableForBroadcastJoin); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 54 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 54 end error: ", p), err) } -func (p *TQueryOptions) ReadField16(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.CodegenLevel = v +func (p *TQueryOptions) writeField55(oprot thrift.TProtocol) (err error) { + if p.IsSetCheckOverflowForDecimal() { + if err = oprot.WriteFieldBegin("check_overflow_for_decimal", thrift.BOOL, 55); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.CheckOverflowForDecimal); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 55 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 55 end error: ", p), err) } -func (p *TQueryOptions) ReadField17(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.KuduLatestObservedTs = v +func (p *TQueryOptions) writeField56(oprot thrift.TProtocol) (err error) { + if p.IsSetSkipDeleteBitmap() { + if err = oprot.WriteFieldBegin("skip_delete_bitmap", thrift.BOOL, 56); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.SkipDeleteBitmap); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 56 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 56 end error: ", p), err) } -func (p *TQueryOptions) ReadField18(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.QueryType = TQueryType(v) +func (p *TQueryOptions) writeField57(oprot thrift.TProtocol) (err error) { + if p.IsSetEnablePipelineEngine() { + if err = oprot.WriteFieldBegin("enable_pipeline_engine", thrift.BOOL, 57); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnablePipelineEngine); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 57 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 57 end error: ", p), err) } -func (p *TQueryOptions) ReadField19(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MinReservation = v +func (p *TQueryOptions) writeField58(oprot thrift.TProtocol) (err error) { + if p.IsSetRepeatMaxNum() { + if err = oprot.WriteFieldBegin("repeat_max_num", thrift.I32, 58); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.RepeatMaxNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 58 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 58 end error: ", p), err) } -func (p *TQueryOptions) ReadField20(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MaxReservation = v +func (p *TQueryOptions) writeField59(oprot thrift.TProtocol) (err error) { + if p.IsSetExternalSortBytesThreshold() { + if err = oprot.WriteFieldBegin("external_sort_bytes_threshold", thrift.I64, 59); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ExternalSortBytesThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 59 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 59 end error: ", p), err) } -func (p *TQueryOptions) ReadField21(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.InitialReservationTotalClaims = v +func (p *TQueryOptions) writeField60(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionedHashAggRowsThreshold() { + if err = oprot.WriteFieldBegin("partitioned_hash_agg_rows_threshold", thrift.I32, 60); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.PartitionedHashAggRowsThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 60 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 60 end error: ", p), err) } -func (p *TQueryOptions) ReadField22(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.BufferPoolLimit = v +func (p *TQueryOptions) writeField61(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableFileCache() { + if err = oprot.WriteFieldBegin("enable_file_cache", thrift.BOOL, 61); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableFileCache); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 61 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 61 end error: ", p), err) } -func (p *TQueryOptions) ReadField23(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.DefaultSpillableBufferSize = v +func (p *TQueryOptions) writeField62(oprot thrift.TProtocol) (err error) { + if p.IsSetInsertTimeout() { + if err = oprot.WriteFieldBegin("insert_timeout", thrift.I32, 62); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.InsertTimeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 62 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 62 end error: ", p), err) } -func (p *TQueryOptions) ReadField24(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MinSpillableBufferSize = v +func (p *TQueryOptions) writeField63(oprot thrift.TProtocol) (err error) { + if p.IsSetExecutionTimeout() { + if err = oprot.WriteFieldBegin("execution_timeout", thrift.I32, 63); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ExecutionTimeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 63 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 63 end error: ", p), err) } -func (p *TQueryOptions) ReadField25(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.MaxRowSize = v +func (p *TQueryOptions) writeField64(oprot thrift.TProtocol) (err error) { + if p.IsSetDryRunQuery() { + if err = oprot.WriteFieldBegin("dry_run_query", thrift.BOOL, 64); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.DryRunQuery); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 64 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 64 end error: ", p), err) } -func (p *TQueryOptions) ReadField26(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.DisableStreamPreaggregations = v +func (p *TQueryOptions) writeField65(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableCommonExprPushdown() { + if err = oprot.WriteFieldBegin("enable_common_expr_pushdown", thrift.BOOL, 65); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableCommonExprPushdown); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 65 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 65 end error: ", p), err) } -func (p *TQueryOptions) ReadField27(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.MtDop = v +func (p *TQueryOptions) writeField66(oprot thrift.TProtocol) (err error) { + if p.IsSetParallelInstance() { + if err = oprot.WriteFieldBegin("parallel_instance", thrift.I32, 66); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ParallelInstance); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 66 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 66 end error: ", p), err) } -func (p *TQueryOptions) ReadField28(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadMemLimit = v +func (p *TQueryOptions) writeField67(oprot thrift.TProtocol) (err error) { + if p.IsSetMysqlRowBinaryFormat() { + if err = oprot.WriteFieldBegin("mysql_row_binary_format", thrift.BOOL, 67); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.MysqlRowBinaryFormat); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 67 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 67 end error: ", p), err) } -func (p *TQueryOptions) ReadField29(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.MaxScanKeyNum = &v +func (p *TQueryOptions) writeField68(oprot thrift.TProtocol) (err error) { + if p.IsSetExternalAggBytesThreshold() { + if err = oprot.WriteFieldBegin("external_agg_bytes_threshold", thrift.I64, 68); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ExternalAggBytesThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 68 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 68 end error: ", p), err) } -func (p *TQueryOptions) ReadField30(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.MaxPushdownConditionsPerColumn = &v +func (p *TQueryOptions) writeField69(oprot thrift.TProtocol) (err error) { + if p.IsSetExternalAggPartitionBits() { + if err = oprot.WriteFieldBegin("external_agg_partition_bits", thrift.I32, 69); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ExternalAggPartitionBits); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 69 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 69 end error: ", p), err) } -func (p *TQueryOptions) ReadField31(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableSpilling = v +func (p *TQueryOptions) writeField70(oprot thrift.TProtocol) (err error) { + if p.IsSetFileCacheBasePath() { + if err = oprot.WriteFieldBegin("file_cache_base_path", thrift.STRING, 70); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FileCacheBasePath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 70 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 70 end error: ", p), err) } -func (p *TQueryOptions) ReadField32(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableEnableExchangeNodeParallelMerge = v +func (p *TQueryOptions) writeField71(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableParquetLazyMat() { + if err = oprot.WriteFieldBegin("enable_parquet_lazy_mat", thrift.BOOL, 71); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableParquetLazyMat); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil -} - -func (p *TQueryOptions) ReadField33(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.RuntimeFilterWaitTimeMs = v +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 71 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 71 end error: ", p), err) +} + +func (p *TQueryOptions) writeField72(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableOrcLazyMat() { + if err = oprot.WriteFieldBegin("enable_orc_lazy_mat", thrift.BOOL, 72); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableOrcLazyMat); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 72 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 72 end error: ", p), err) } -func (p *TQueryOptions) ReadField34(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.RuntimeFilterMaxInNum = v +func (p *TQueryOptions) writeField73(oprot thrift.TProtocol) (err error) { + if p.IsSetScanQueueMemLimit() { + if err = oprot.WriteFieldBegin("scan_queue_mem_limit", thrift.I64, 73); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ScanQueueMemLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 73 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 73 end error: ", p), err) } -func (p *TQueryOptions) ReadField42(iprot thrift.TProtocol) error { - p.ResourceLimit = NewTResourceLimit() - if err := p.ResourceLimit.Read(iprot); err != nil { - return err +func (p *TQueryOptions) writeField74(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableScanNodeRunSerial() { + if err = oprot.WriteFieldBegin("enable_scan_node_run_serial", thrift.BOOL, 74); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableScanNodeRunSerial); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 74 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 74 end error: ", p), err) } -func (p *TQueryOptions) ReadField43(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.ReturnObjectDataAsBinary = v +func (p *TQueryOptions) writeField75(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableInsertStrict() { + if err = oprot.WriteFieldBegin("enable_insert_strict", thrift.BOOL, 75); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableInsertStrict); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 75 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 75 end error: ", p), err) } -func (p *TQueryOptions) ReadField44(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.TrimTailingSpacesForExternalTableQuery = v +func (p *TQueryOptions) writeField76(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableInvertedIndexQuery() { + if err = oprot.WriteFieldBegin("enable_inverted_index_query", thrift.BOOL, 76); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableInvertedIndexQuery); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 76 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 76 end error: ", p), err) } -func (p *TQueryOptions) ReadField45(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableFunctionPushdown = &v +func (p *TQueryOptions) writeField77(oprot thrift.TProtocol) (err error) { + if p.IsSetTruncateCharOrVarcharColumns() { + if err = oprot.WriteFieldBegin("truncate_char_or_varchar_columns", thrift.BOOL, 77); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.TruncateCharOrVarcharColumns); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 77 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 77 end error: ", p), err) } -func (p *TQueryOptions) ReadField46(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.FragmentTransmissionCompressionCodec = &v +func (p *TQueryOptions) writeField78(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableHashJoinEarlyStartProbe() { + if err = oprot.WriteFieldBegin("enable_hash_join_early_start_probe", thrift.BOOL, 78); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableHashJoinEarlyStartProbe); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 78 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 78 end error: ", p), err) } -func (p *TQueryOptions) ReadField48(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableLocalExchange = &v +func (p *TQueryOptions) writeField79(oprot thrift.TProtocol) (err error) { + if p.IsSetEnablePipelineXEngine() { + if err = oprot.WriteFieldBegin("enable_pipeline_x_engine", thrift.BOOL, 79); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnablePipelineXEngine); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 79 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 79 end error: ", p), err) } -func (p *TQueryOptions) ReadField49(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.SkipStorageEngineMerge = v +func (p *TQueryOptions) writeField80(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableMemtableOnSinkNode() { + if err = oprot.WriteFieldBegin("enable_memtable_on_sink_node", thrift.BOOL, 80); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableMemtableOnSinkNode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 80 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 80 end error: ", p), err) } -func (p *TQueryOptions) ReadField50(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.SkipDeletePredicate = v +func (p *TQueryOptions) writeField81(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableDeleteSubPredicateV2() { + if err = oprot.WriteFieldBegin("enable_delete_sub_predicate_v2", thrift.BOOL, 81); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableDeleteSubPredicateV2); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 81 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 81 end error: ", p), err) } -func (p *TQueryOptions) ReadField51(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableNewShuffleHashMethod = &v +func (p *TQueryOptions) writeField82(oprot thrift.TProtocol) (err error) { + if p.IsSetFeProcessUuid() { + if err = oprot.WriteFieldBegin("fe_process_uuid", thrift.I64, 82); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.FeProcessUuid); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 82 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 82 end error: ", p), err) } -func (p *TQueryOptions) ReadField52(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.BeExecVersion = v +func (p *TQueryOptions) writeField83(oprot thrift.TProtocol) (err error) { + if p.IsSetInvertedIndexConjunctionOptThreshold() { + if err = oprot.WriteFieldBegin("inverted_index_conjunction_opt_threshold", thrift.I32, 83); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.InvertedIndexConjunctionOptThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 83 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 83 end error: ", p), err) } -func (p *TQueryOptions) ReadField53(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.PartitionedHashJoinRowsThreshold = v +func (p *TQueryOptions) writeField84(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableProfile() { + if err = oprot.WriteFieldBegin("enable_profile", thrift.BOOL, 84); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableProfile); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 84 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 84 end error: ", p), err) } -func (p *TQueryOptions) ReadField54(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableShareHashTableForBroadcastJoin = &v +func (p *TQueryOptions) writeField85(oprot thrift.TProtocol) (err error) { + if p.IsSetEnablePageCache() { + if err = oprot.WriteFieldBegin("enable_page_cache", thrift.BOOL, 85); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnablePageCache); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 85 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 85 end error: ", p), err) } -func (p *TQueryOptions) ReadField55(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.CheckOverflowForDecimal = v +func (p *TQueryOptions) writeField86(oprot thrift.TProtocol) (err error) { + if p.IsSetAnalyzeTimeout() { + if err = oprot.WriteFieldBegin("analyze_timeout", thrift.I32, 86); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.AnalyzeTimeout); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 86 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 86 end error: ", p), err) } -func (p *TQueryOptions) ReadField56(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.SkipDeleteBitmap = v +func (p *TQueryOptions) writeField87(oprot thrift.TProtocol) (err error) { + if p.IsSetFasterFloatConvert() { + if err = oprot.WriteFieldBegin("faster_float_convert", thrift.BOOL, 87); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.FasterFloatConvert); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 87 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 87 end error: ", p), err) } -func (p *TQueryOptions) ReadField57(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnablePipelineEngine = v +func (p *TQueryOptions) writeField88(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableDecimal256() { + if err = oprot.WriteFieldBegin("enable_decimal256", thrift.BOOL, 88); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableDecimal256); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 88 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 88 end error: ", p), err) } -func (p *TQueryOptions) ReadField58(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.RepeatMaxNum = v +func (p *TQueryOptions) writeField89(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableLocalShuffle() { + if err = oprot.WriteFieldBegin("enable_local_shuffle", thrift.BOOL, 89); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableLocalShuffle); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 89 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 89 end error: ", p), err) } -func (p *TQueryOptions) ReadField59(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ExternalSortBytesThreshold = v +func (p *TQueryOptions) writeField90(oprot thrift.TProtocol) (err error) { + if p.IsSetSkipMissingVersion() { + if err = oprot.WriteFieldBegin("skip_missing_version", thrift.BOOL, 90); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.SkipMissingVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 90 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 90 end error: ", p), err) } -func (p *TQueryOptions) ReadField60(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.PartitionedHashAggRowsThreshold = v +func (p *TQueryOptions) writeField91(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterWaitInfinitely() { + if err = oprot.WriteFieldBegin("runtime_filter_wait_infinitely", thrift.BOOL, 91); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.RuntimeFilterWaitInfinitely); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 91 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 91 end error: ", p), err) } -func (p *TQueryOptions) ReadField61(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableFileCache = v +func (p *TQueryOptions) writeField92(oprot thrift.TProtocol) (err error) { + if p.IsSetWaitFullBlockScheduleTimes() { + if err = oprot.WriteFieldBegin("wait_full_block_schedule_times", thrift.I32, 92); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.WaitFullBlockScheduleTimes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 92 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 92 end error: ", p), err) } -func (p *TQueryOptions) ReadField62(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.InsertTimeout = v +func (p *TQueryOptions) writeField93(oprot thrift.TProtocol) (err error) { + if p.IsSetInvertedIndexMaxExpansions() { + if err = oprot.WriteFieldBegin("inverted_index_max_expansions", thrift.I32, 93); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.InvertedIndexMaxExpansions); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 93 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 93 end error: ", p), err) } -func (p *TQueryOptions) ReadField63(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ExecutionTimeout = v +func (p *TQueryOptions) writeField94(oprot thrift.TProtocol) (err error) { + if p.IsSetInvertedIndexSkipThreshold() { + if err = oprot.WriteFieldBegin("inverted_index_skip_threshold", thrift.I32, 94); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.InvertedIndexSkipThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 94 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 94 end error: ", p), err) } -func (p *TQueryOptions) ReadField64(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.DryRunQuery = v +func (p *TQueryOptions) writeField95(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableParallelScan() { + if err = oprot.WriteFieldBegin("enable_parallel_scan", thrift.BOOL, 95); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableParallelScan); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 95 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 95 end error: ", p), err) } -func (p *TQueryOptions) ReadField65(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableCommonExprPushdown = v +func (p *TQueryOptions) writeField96(oprot thrift.TProtocol) (err error) { + if p.IsSetParallelScanMaxScannersCount() { + if err = oprot.WriteFieldBegin("parallel_scan_max_scanners_count", thrift.I32, 96); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ParallelScanMaxScannersCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 96 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 96 end error: ", p), err) } -func (p *TQueryOptions) ReadField66(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ParallelInstance = v +func (p *TQueryOptions) writeField97(oprot thrift.TProtocol) (err error) { + if p.IsSetParallelScanMinRowsPerScanner() { + if err = oprot.WriteFieldBegin("parallel_scan_min_rows_per_scanner", thrift.I64, 97); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ParallelScanMinRowsPerScanner); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 97 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 97 end error: ", p), err) } -func (p *TQueryOptions) ReadField67(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.MysqlRowBinaryFormat = v +func (p *TQueryOptions) writeField98(oprot thrift.TProtocol) (err error) { + if p.IsSetSkipBadTablet() { + if err = oprot.WriteFieldBegin("skip_bad_tablet", thrift.BOOL, 98); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.SkipBadTablet); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 98 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 98 end error: ", p), err) } -func (p *TQueryOptions) ReadField68(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ExternalAggBytesThreshold = v +func (p *TQueryOptions) writeField99(oprot thrift.TProtocol) (err error) { + if p.IsSetScannerScaleUpRatio() { + if err = oprot.WriteFieldBegin("scanner_scale_up_ratio", thrift.DOUBLE, 99); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteDouble(p.ScannerScaleUpRatio); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 99 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 99 end error: ", p), err) } -func (p *TQueryOptions) ReadField69(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ExternalAggPartitionBits = v +func (p *TQueryOptions) writeField100(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableDistinctStreamingAggregation() { + if err = oprot.WriteFieldBegin("enable_distinct_streaming_aggregation", thrift.BOOL, 100); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableDistinctStreamingAggregation); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 end error: ", p), err) } -func (p *TQueryOptions) ReadField70(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.FileCacheBasePath = &v +func (p *TQueryOptions) writeField101(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableJoinSpill() { + if err = oprot.WriteFieldBegin("enable_join_spill", thrift.BOOL, 101); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableJoinSpill); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 101 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 101 end error: ", p), err) } -func (p *TQueryOptions) ReadField71(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableParquetLazyMat = v +func (p *TQueryOptions) writeField102(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableSortSpill() { + if err = oprot.WriteFieldBegin("enable_sort_spill", thrift.BOOL, 102); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableSortSpill); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 102 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 102 end error: ", p), err) } -func (p *TQueryOptions) ReadField72(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableOrcLazyMat = v +func (p *TQueryOptions) writeField103(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableAggSpill() { + if err = oprot.WriteFieldBegin("enable_agg_spill", thrift.BOOL, 103); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableAggSpill); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 103 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 103 end error: ", p), err) } -func (p *TQueryOptions) ReadField73(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.ScanQueueMemLimit = &v +func (p *TQueryOptions) writeField104(oprot thrift.TProtocol) (err error) { + if p.IsSetMinRevocableMem() { + if err = oprot.WriteFieldBegin("min_revocable_mem", thrift.I64, 104); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MinRevocableMem); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 104 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 104 end error: ", p), err) } -func (p *TQueryOptions) ReadField74(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableScanNodeRunSerial = v +func (p *TQueryOptions) writeField105(oprot thrift.TProtocol) (err error) { + if p.IsSetSpillStreamingAggMemLimit() { + if err = oprot.WriteFieldBegin("spill_streaming_agg_mem_limit", thrift.I64, 105); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.SpillStreamingAggMemLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 105 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 105 end error: ", p), err) } -func (p *TQueryOptions) ReadField75(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableInsertStrict = v +func (p *TQueryOptions) writeField106(oprot thrift.TProtocol) (err error) { + if p.IsSetDataQueueMaxBlocks() { + if err = oprot.WriteFieldBegin("data_queue_max_blocks", thrift.I64, 106); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.DataQueueMaxBlocks); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 106 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 106 end error: ", p), err) } -func (p *TQueryOptions) ReadField76(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableInvertedIndexQuery = v +func (p *TQueryOptions) writeField107(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableCommonExprPushdownForInvertedIndex() { + if err = oprot.WriteFieldBegin("enable_common_expr_pushdown_for_inverted_index", thrift.BOOL, 107); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableCommonExprPushdownForInvertedIndex); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 107 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 107 end error: ", p), err) } -func (p *TQueryOptions) ReadField77(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.TruncateCharOrVarcharColumns = v +func (p *TQueryOptions) writeField108(oprot thrift.TProtocol) (err error) { + if p.IsSetLocalExchangeFreeBlocksLimit() { + if err = oprot.WriteFieldBegin("local_exchange_free_blocks_limit", thrift.I64, 108); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LocalExchangeFreeBlocksLimit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 108 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 108 end error: ", p), err) } -func (p *TQueryOptions) ReadField78(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableHashJoinEarlyStartProbe = v +func (p *TQueryOptions) writeField109(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableForceSpill() { + if err = oprot.WriteFieldBegin("enable_force_spill", thrift.BOOL, 109); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableForceSpill); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 109 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 109 end error: ", p), err) } -func (p *TQueryOptions) ReadField79(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnablePipelineXEngine = v +func (p *TQueryOptions) writeField110(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableParquetFilterByMinMax() { + if err = oprot.WriteFieldBegin("enable_parquet_filter_by_min_max", thrift.BOOL, 110); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableParquetFilterByMinMax); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 110 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 110 end error: ", p), err) } -func (p *TQueryOptions) ReadField80(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableMemtableOnSinkNode = v +func (p *TQueryOptions) writeField111(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableOrcFilterByMinMax() { + if err = oprot.WriteFieldBegin("enable_orc_filter_by_min_max", thrift.BOOL, 111); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableOrcFilterByMinMax); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 111 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 111 end error: ", p), err) } -func (p *TQueryOptions) ReadField81(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableDeleteSubPredicateV2 = v +func (p *TQueryOptions) writeField112(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxColumnReaderNum() { + if err = oprot.WriteFieldBegin("max_column_reader_num", thrift.I32, 112); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.MaxColumnReaderNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 112 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 112 end error: ", p), err) } -func (p *TQueryOptions) ReadField82(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.FeProcessUuid = v +func (p *TQueryOptions) writeField113(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableLocalMergeSort() { + if err = oprot.WriteFieldBegin("enable_local_merge_sort", thrift.BOOL, 113); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableLocalMergeSort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 113 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 113 end error: ", p), err) } -func (p *TQueryOptions) ReadField83(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.InvertedIndexConjunctionOptThreshold = v +func (p *TQueryOptions) writeField114(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableParallelResultSink() { + if err = oprot.WriteFieldBegin("enable_parallel_result_sink", thrift.BOOL, 114); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableParallelResultSink); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 114 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 114 end error: ", p), err) } -func (p *TQueryOptions) ReadField84(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnableProfile = v +func (p *TQueryOptions) writeField115(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableShortCircuitQueryAccessColumnStore() { + if err = oprot.WriteFieldBegin("enable_short_circuit_query_access_column_store", thrift.BOOL, 115); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableShortCircuitQueryAccessColumnStore); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 115 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 115 end error: ", p), err) } -func (p *TQueryOptions) ReadField85(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnablePageCache = v +func (p *TQueryOptions) writeField116(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableNoNeedReadDataOpt() { + if err = oprot.WriteFieldBegin("enable_no_need_read_data_opt", thrift.BOOL, 116); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableNoNeedReadDataOpt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 116 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 116 end error: ", p), err) } -func (p *TQueryOptions) ReadField86(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.AnalyzeTimeout = v +func (p *TQueryOptions) writeField117(oprot thrift.TProtocol) (err error) { + if p.IsSetReadCsvEmptyLineAsNull() { + if err = oprot.WriteFieldBegin("read_csv_empty_line_as_null", thrift.BOOL, 117); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.ReadCsvEmptyLineAsNull); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 117 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 117 end error: ", p), err) } -func (p *TQueryOptions) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TQueryOptions"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError +func (p *TQueryOptions) writeField118(oprot thrift.TProtocol) (err error) { + if p.IsSetSerdeDialect() { + if err = oprot.WriteFieldBegin("serde_dialect", thrift.I32, 118); err != nil { + goto WriteFieldBeginError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError + if err := oprot.WriteI32(int32(p.SerdeDialect)); err != nil { + return err } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 118 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 118 end error: ", p), err) +} + +func (p *TQueryOptions) writeField119(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableMatchWithoutInvertedIndex() { + if err = oprot.WriteFieldBegin("enable_match_without_inverted_index", thrift.BOOL, 119); err != nil { + goto WriteFieldBeginError } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableMatchWithoutInvertedIndex); err != nil { + return err } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 119 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 119 end error: ", p), err) +} + +func (p *TQueryOptions) writeField120(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableFallbackOnMissingInvertedIndex() { + if err = oprot.WriteFieldBegin("enable_fallback_on_missing_inverted_index", thrift.BOOL, 120); err != nil { + goto WriteFieldBeginError } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableFallbackOnMissingInvertedIndex); err != nil { + return err } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 120 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 120 end error: ", p), err) +} + +func (p *TQueryOptions) writeField121(oprot thrift.TProtocol) (err error) { + if p.IsSetKeepCarriageReturn() { + if err = oprot.WriteFieldBegin("keep_carriage_return", thrift.BOOL, 121); err != nil { + goto WriteFieldBeginError } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError + if err := oprot.WriteBool(p.KeepCarriageReturn); err != nil { + return err } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField17(oprot); err != nil { - fieldId = 17 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 121 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 121 end error: ", p), err) +} + +func (p *TQueryOptions) writeField122(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeBloomFilterMinSize() { + if err = oprot.WriteFieldBegin("runtime_bloom_filter_min_size", thrift.I32, 122); err != nil { + goto WriteFieldBeginError } - if err = p.writeField18(oprot); err != nil { - fieldId = 18 - goto WriteFieldError + if err := oprot.WriteI32(p.RuntimeBloomFilterMinSize); err != nil { + return err } - if err = p.writeField19(oprot); err != nil { - fieldId = 19 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField20(oprot); err != nil { - fieldId = 20 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 122 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 122 end error: ", p), err) +} + +func (p *TQueryOptions) writeField123(oprot thrift.TProtocol) (err error) { + if p.IsSetHiveParquetUseColumnNames() { + if err = oprot.WriteFieldBegin("hive_parquet_use_column_names", thrift.BOOL, 123); err != nil { + goto WriteFieldBeginError } - if err = p.writeField21(oprot); err != nil { - fieldId = 21 - goto WriteFieldError + if err := oprot.WriteBool(p.HiveParquetUseColumnNames); err != nil { + return err } - if err = p.writeField22(oprot); err != nil { - fieldId = 22 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField23(oprot); err != nil { - fieldId = 23 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 123 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 123 end error: ", p), err) +} + +func (p *TQueryOptions) writeField124(oprot thrift.TProtocol) (err error) { + if p.IsSetHiveOrcUseColumnNames() { + if err = oprot.WriteFieldBegin("hive_orc_use_column_names", thrift.BOOL, 124); err != nil { + goto WriteFieldBeginError } - if err = p.writeField24(oprot); err != nil { - fieldId = 24 - goto WriteFieldError + if err := oprot.WriteBool(p.HiveOrcUseColumnNames); err != nil { + return err } - if err = p.writeField25(oprot); err != nil { - fieldId = 25 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField26(oprot); err != nil { - fieldId = 26 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 124 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 124 end error: ", p), err) +} + +func (p *TQueryOptions) writeField125(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableSegmentCache() { + if err = oprot.WriteFieldBegin("enable_segment_cache", thrift.BOOL, 125); err != nil { + goto WriteFieldBeginError } - if err = p.writeField27(oprot); err != nil { - fieldId = 27 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableSegmentCache); err != nil { + return err } - if err = p.writeField28(oprot); err != nil { - fieldId = 28 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField29(oprot); err != nil { - fieldId = 29 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 125 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 125 end error: ", p), err) +} + +func (p *TQueryOptions) writeField126(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeBloomFilterMaxSize() { + if err = oprot.WriteFieldBegin("runtime_bloom_filter_max_size", thrift.I32, 126); err != nil { + goto WriteFieldBeginError } - if err = p.writeField30(oprot); err != nil { - fieldId = 30 - goto WriteFieldError + if err := oprot.WriteI32(p.RuntimeBloomFilterMaxSize); err != nil { + return err } - if err = p.writeField31(oprot); err != nil { - fieldId = 31 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField32(oprot); err != nil { - fieldId = 32 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 126 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 126 end error: ", p), err) +} + +func (p *TQueryOptions) writeField127(oprot thrift.TProtocol) (err error) { + if p.IsSetInListValueCountThreshold() { + if err = oprot.WriteFieldBegin("in_list_value_count_threshold", thrift.I32, 127); err != nil { + goto WriteFieldBeginError } - if err = p.writeField33(oprot); err != nil { - fieldId = 33 - goto WriteFieldError + if err := oprot.WriteI32(p.InListValueCountThreshold); err != nil { + return err } - if err = p.writeField34(oprot); err != nil { - fieldId = 34 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField42(oprot); err != nil { - fieldId = 42 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 127 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 127 end error: ", p), err) +} + +func (p *TQueryOptions) writeField128(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableVerboseProfile() { + if err = oprot.WriteFieldBegin("enable_verbose_profile", thrift.BOOL, 128); err != nil { + goto WriteFieldBeginError } - if err = p.writeField43(oprot); err != nil { - fieldId = 43 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableVerboseProfile); err != nil { + return err } - if err = p.writeField44(oprot); err != nil { - fieldId = 44 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField45(oprot); err != nil { - fieldId = 45 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 128 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 128 end error: ", p), err) +} + +func (p *TQueryOptions) writeField129(oprot thrift.TProtocol) (err error) { + if p.IsSetRpcVerboseProfileMaxInstanceCount() { + if err = oprot.WriteFieldBegin("rpc_verbose_profile_max_instance_count", thrift.I32, 129); err != nil { + goto WriteFieldBeginError } - if err = p.writeField46(oprot); err != nil { - fieldId = 46 - goto WriteFieldError + if err := oprot.WriteI32(p.RpcVerboseProfileMaxInstanceCount); err != nil { + return err } - if err = p.writeField48(oprot); err != nil { - fieldId = 48 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField49(oprot); err != nil { - fieldId = 49 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 129 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 129 end error: ", p), err) +} + +func (p *TQueryOptions) writeField130(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableAdaptivePipelineTaskSerialReadOnLimit() { + if err = oprot.WriteFieldBegin("enable_adaptive_pipeline_task_serial_read_on_limit", thrift.BOOL, 130); err != nil { + goto WriteFieldBeginError } - if err = p.writeField50(oprot); err != nil { - fieldId = 50 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableAdaptivePipelineTaskSerialReadOnLimit); err != nil { + return err } - if err = p.writeField51(oprot); err != nil { - fieldId = 51 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField52(oprot); err != nil { - fieldId = 52 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 130 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 130 end error: ", p), err) +} + +func (p *TQueryOptions) writeField131(oprot thrift.TProtocol) (err error) { + if p.IsSetAdaptivePipelineTaskSerialReadOnLimit() { + if err = oprot.WriteFieldBegin("adaptive_pipeline_task_serial_read_on_limit", thrift.I32, 131); err != nil { + goto WriteFieldBeginError } - if err = p.writeField53(oprot); err != nil { - fieldId = 53 - goto WriteFieldError + if err := oprot.WriteI32(p.AdaptivePipelineTaskSerialReadOnLimit); err != nil { + return err } - if err = p.writeField54(oprot); err != nil { - fieldId = 54 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField55(oprot); err != nil { - fieldId = 55 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 131 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 131 end error: ", p), err) +} + +func (p *TQueryOptions) writeField132(oprot thrift.TProtocol) (err error) { + if p.IsSetParallelPrepareThreshold() { + if err = oprot.WriteFieldBegin("parallel_prepare_threshold", thrift.I32, 132); err != nil { + goto WriteFieldBeginError } - if err = p.writeField56(oprot); err != nil { - fieldId = 56 - goto WriteFieldError + if err := oprot.WriteI32(p.ParallelPrepareThreshold); err != nil { + return err } - if err = p.writeField57(oprot); err != nil { - fieldId = 57 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField58(oprot); err != nil { - fieldId = 58 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 132 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 132 end error: ", p), err) +} + +func (p *TQueryOptions) writeField133(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionTopnMaxPartitions() { + if err = oprot.WriteFieldBegin("partition_topn_max_partitions", thrift.I32, 133); err != nil { + goto WriteFieldBeginError } - if err = p.writeField59(oprot); err != nil { - fieldId = 59 - goto WriteFieldError + if err := oprot.WriteI32(p.PartitionTopnMaxPartitions); err != nil { + return err } - if err = p.writeField60(oprot); err != nil { - fieldId = 60 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField61(oprot); err != nil { - fieldId = 61 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 133 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 133 end error: ", p), err) +} + +func (p *TQueryOptions) writeField134(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionTopnPrePartitionRows() { + if err = oprot.WriteFieldBegin("partition_topn_pre_partition_rows", thrift.I32, 134); err != nil { + goto WriteFieldBeginError } - if err = p.writeField62(oprot); err != nil { - fieldId = 62 - goto WriteFieldError + if err := oprot.WriteI32(p.PartitionTopnPrePartitionRows); err != nil { + return err } - if err = p.writeField63(oprot); err != nil { - fieldId = 63 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 134 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 134 end error: ", p), err) +} + +func (p *TQueryOptions) writeField135(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableParallelOutfile() { + if err = oprot.WriteFieldBegin("enable_parallel_outfile", thrift.BOOL, 135); err != nil { + goto WriteFieldBeginError } - if err = p.writeField64(oprot); err != nil { - fieldId = 64 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableParallelOutfile); err != nil { + return err } - if err = p.writeField65(oprot); err != nil { - fieldId = 65 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField66(oprot); err != nil { - fieldId = 66 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 135 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 135 end error: ", p), err) +} + +func (p *TQueryOptions) writeField136(oprot thrift.TProtocol) (err error) { + if p.IsSetEnablePhraseQuerySequentialOpt() { + if err = oprot.WriteFieldBegin("enable_phrase_query_sequential_opt", thrift.BOOL, 136); err != nil { + goto WriteFieldBeginError } - if err = p.writeField67(oprot); err != nil { - fieldId = 67 - goto WriteFieldError + if err := oprot.WriteBool(p.EnablePhraseQuerySequentialOpt); err != nil { + return err } - if err = p.writeField68(oprot); err != nil { - fieldId = 68 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField69(oprot); err != nil { - fieldId = 69 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 136 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 136 end error: ", p), err) +} + +func (p *TQueryOptions) writeField137(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableAutoCreateWhenOverwrite() { + if err = oprot.WriteFieldBegin("enable_auto_create_when_overwrite", thrift.BOOL, 137); err != nil { + goto WriteFieldBeginError } - if err = p.writeField70(oprot); err != nil { - fieldId = 70 - goto WriteFieldError + if err := oprot.WriteBool(p.EnableAutoCreateWhenOverwrite); err != nil { + return err } - if err = p.writeField71(oprot); err != nil { - fieldId = 71 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField72(oprot); err != nil { - fieldId = 72 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 137 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 137 end error: ", p), err) +} + +func (p *TQueryOptions) writeField138(oprot thrift.TProtocol) (err error) { + if p.IsSetOrcTinyStripeThresholdBytes() { + if err = oprot.WriteFieldBegin("orc_tiny_stripe_threshold_bytes", thrift.I64, 138); err != nil { + goto WriteFieldBeginError } - if err = p.writeField73(oprot); err != nil { - fieldId = 73 - goto WriteFieldError + if err := oprot.WriteI64(p.OrcTinyStripeThresholdBytes); err != nil { + return err } - if err = p.writeField74(oprot); err != nil { - fieldId = 74 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField75(oprot); err != nil { - fieldId = 75 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 138 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 138 end error: ", p), err) +} + +func (p *TQueryOptions) writeField139(oprot thrift.TProtocol) (err error) { + if p.IsSetOrcOnceMaxReadBytes() { + if err = oprot.WriteFieldBegin("orc_once_max_read_bytes", thrift.I64, 139); err != nil { + goto WriteFieldBeginError } - if err = p.writeField76(oprot); err != nil { - fieldId = 76 - goto WriteFieldError + if err := oprot.WriteI64(p.OrcOnceMaxReadBytes); err != nil { + return err } - if err = p.writeField77(oprot); err != nil { - fieldId = 77 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField78(oprot); err != nil { - fieldId = 78 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 139 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 139 end error: ", p), err) +} + +func (p *TQueryOptions) writeField140(oprot thrift.TProtocol) (err error) { + if p.IsSetOrcMaxMergeDistanceBytes() { + if err = oprot.WriteFieldBegin("orc_max_merge_distance_bytes", thrift.I64, 140); err != nil { + goto WriteFieldBeginError } - if err = p.writeField79(oprot); err != nil { - fieldId = 79 - goto WriteFieldError + if err := oprot.WriteI64(p.OrcMaxMergeDistanceBytes); err != nil { + return err } - if err = p.writeField80(oprot); err != nil { - fieldId = 80 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField81(oprot); err != nil { - fieldId = 81 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 140 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 140 end error: ", p), err) +} + +func (p *TQueryOptions) writeField141(oprot thrift.TProtocol) (err error) { + if p.IsSetIgnoreRuntimeFilterError() { + if err = oprot.WriteFieldBegin("ignore_runtime_filter_error", thrift.BOOL, 141); err != nil { + goto WriteFieldBeginError } - if err = p.writeField82(oprot); err != nil { - fieldId = 82 - goto WriteFieldError + if err := oprot.WriteBool(p.IgnoreRuntimeFilterError); err != nil { + return err } - if err = p.writeField83(oprot); err != nil { - fieldId = 83 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err = p.writeField84(oprot); err != nil { - fieldId = 84 - goto WriteFieldError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 141 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 141 end error: ", p), err) +} + +func (p *TQueryOptions) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetDisableFileCache() { + if err = oprot.WriteFieldBegin("disable_file_cache", thrift.BOOL, 1000); err != nil { + goto WriteFieldBeginError } - if err = p.writeField85(oprot); err != nil { - fieldId = 85 - goto WriteFieldError + if err := oprot.WriteBool(p.DisableFileCache); err != nil { + return err } - if err = p.writeField86(oprot); err != nil { - fieldId = 86 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TQueryOptions) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryOptions(%+v)", *p) + +} +func (p *TQueryOptions) DeepEqual(ano *TQueryOptions) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.AbortOnError) { + return false + } + if !p.Field2DeepEqual(ano.MaxErrors) { + return false + } + if !p.Field3DeepEqual(ano.DisableCodegen) { + return false + } + if !p.Field4DeepEqual(ano.BatchSize) { + return false + } + if !p.Field5DeepEqual(ano.NumNodes) { + return false + } + if !p.Field6DeepEqual(ano.MaxScanRangeLength) { + return false + } + if !p.Field7DeepEqual(ano.NumScannerThreads) { + return false + } + if !p.Field8DeepEqual(ano.MaxIoBuffers) { + return false + } + if !p.Field9DeepEqual(ano.AllowUnsupportedFormats) { + return false + } + if !p.Field10DeepEqual(ano.DefaultOrderByLimit) { + return false + } + if !p.Field12DeepEqual(ano.MemLimit) { + return false + } + if !p.Field13DeepEqual(ano.AbortOnDefaultLimitExceeded) { + return false + } + if !p.Field14DeepEqual(ano.QueryTimeout) { + return false + } + if !p.Field15DeepEqual(ano.IsReportSuccess) { + return false + } + if !p.Field16DeepEqual(ano.CodegenLevel) { + return false + } + if !p.Field17DeepEqual(ano.KuduLatestObservedTs) { + return false + } + if !p.Field18DeepEqual(ano.QueryType) { + return false + } + if !p.Field19DeepEqual(ano.MinReservation) { + return false + } + if !p.Field20DeepEqual(ano.MaxReservation) { + return false + } + if !p.Field21DeepEqual(ano.InitialReservationTotalClaims) { + return false + } + if !p.Field22DeepEqual(ano.BufferPoolLimit) { + return false + } + if !p.Field23DeepEqual(ano.DefaultSpillableBufferSize) { + return false + } + if !p.Field24DeepEqual(ano.MinSpillableBufferSize) { + return false + } + if !p.Field25DeepEqual(ano.MaxRowSize) { + return false + } + if !p.Field26DeepEqual(ano.DisableStreamPreaggregations) { + return false + } + if !p.Field27DeepEqual(ano.MtDop) { + return false + } + if !p.Field28DeepEqual(ano.LoadMemLimit) { + return false + } + if !p.Field29DeepEqual(ano.MaxScanKeyNum) { + return false + } + if !p.Field30DeepEqual(ano.MaxPushdownConditionsPerColumn) { + return false + } + if !p.Field31DeepEqual(ano.EnableSpilling) { + return false + } + if !p.Field32DeepEqual(ano.EnableEnableExchangeNodeParallelMerge) { + return false } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if !p.Field33DeepEqual(ano.RuntimeFilterWaitTimeMs) { + return false } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if !p.Field34DeepEqual(ano.RuntimeFilterMaxInNum) { + return false } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *TQueryOptions) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetAbortOnError() { - if err = oprot.WriteFieldBegin("abort_on_error", thrift.BOOL, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.AbortOnError); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.Field42DeepEqual(ano.ResourceLimit) { + return false + } + if !p.Field43DeepEqual(ano.ReturnObjectDataAsBinary) { + return false + } + if !p.Field44DeepEqual(ano.TrimTailingSpacesForExternalTableQuery) { + return false + } + if !p.Field45DeepEqual(ano.EnableFunctionPushdown) { + return false + } + if !p.Field46DeepEqual(ano.FragmentTransmissionCompressionCodec) { + return false + } + if !p.Field48DeepEqual(ano.EnableLocalExchange) { + return false + } + if !p.Field49DeepEqual(ano.SkipStorageEngineMerge) { + return false + } + if !p.Field50DeepEqual(ano.SkipDeletePredicate) { + return false + } + if !p.Field51DeepEqual(ano.EnableNewShuffleHashMethod) { + return false + } + if !p.Field52DeepEqual(ano.BeExecVersion) { + return false + } + if !p.Field53DeepEqual(ano.PartitionedHashJoinRowsThreshold) { + return false + } + if !p.Field54DeepEqual(ano.EnableShareHashTableForBroadcastJoin) { + return false + } + if !p.Field55DeepEqual(ano.CheckOverflowForDecimal) { + return false + } + if !p.Field56DeepEqual(ano.SkipDeleteBitmap) { + return false + } + if !p.Field57DeepEqual(ano.EnablePipelineEngine) { + return false + } + if !p.Field58DeepEqual(ano.RepeatMaxNum) { + return false + } + if !p.Field59DeepEqual(ano.ExternalSortBytesThreshold) { + return false + } + if !p.Field60DeepEqual(ano.PartitionedHashAggRowsThreshold) { + return false + } + if !p.Field61DeepEqual(ano.EnableFileCache) { + return false + } + if !p.Field62DeepEqual(ano.InsertTimeout) { + return false + } + if !p.Field63DeepEqual(ano.ExecutionTimeout) { + return false + } + if !p.Field64DeepEqual(ano.DryRunQuery) { + return false + } + if !p.Field65DeepEqual(ano.EnableCommonExprPushdown) { + return false + } + if !p.Field66DeepEqual(ano.ParallelInstance) { + return false + } + if !p.Field67DeepEqual(ano.MysqlRowBinaryFormat) { + return false + } + if !p.Field68DeepEqual(ano.ExternalAggBytesThreshold) { + return false + } + if !p.Field69DeepEqual(ano.ExternalAggPartitionBits) { + return false + } + if !p.Field70DeepEqual(ano.FileCacheBasePath) { + return false + } + if !p.Field71DeepEqual(ano.EnableParquetLazyMat) { + return false + } + if !p.Field72DeepEqual(ano.EnableOrcLazyMat) { + return false + } + if !p.Field73DeepEqual(ano.ScanQueueMemLimit) { + return false + } + if !p.Field74DeepEqual(ano.EnableScanNodeRunSerial) { + return false + } + if !p.Field75DeepEqual(ano.EnableInsertStrict) { + return false + } + if !p.Field76DeepEqual(ano.EnableInvertedIndexQuery) { + return false + } + if !p.Field77DeepEqual(ano.TruncateCharOrVarcharColumns) { + return false + } + if !p.Field78DeepEqual(ano.EnableHashJoinEarlyStartProbe) { + return false + } + if !p.Field79DeepEqual(ano.EnablePipelineXEngine) { + return false + } + if !p.Field80DeepEqual(ano.EnableMemtableOnSinkNode) { + return false + } + if !p.Field81DeepEqual(ano.EnableDeleteSubPredicateV2) { + return false + } + if !p.Field82DeepEqual(ano.FeProcessUuid) { + return false + } + if !p.Field83DeepEqual(ano.InvertedIndexConjunctionOptThreshold) { + return false + } + if !p.Field84DeepEqual(ano.EnableProfile) { + return false + } + if !p.Field85DeepEqual(ano.EnablePageCache) { + return false + } + if !p.Field86DeepEqual(ano.AnalyzeTimeout) { + return false + } + if !p.Field87DeepEqual(ano.FasterFloatConvert) { + return false + } + if !p.Field88DeepEqual(ano.EnableDecimal256) { + return false + } + if !p.Field89DeepEqual(ano.EnableLocalShuffle) { + return false + } + if !p.Field90DeepEqual(ano.SkipMissingVersion) { + return false + } + if !p.Field91DeepEqual(ano.RuntimeFilterWaitInfinitely) { + return false + } + if !p.Field92DeepEqual(ano.WaitFullBlockScheduleTimes) { + return false + } + if !p.Field93DeepEqual(ano.InvertedIndexMaxExpansions) { + return false + } + if !p.Field94DeepEqual(ano.InvertedIndexSkipThreshold) { + return false + } + if !p.Field95DeepEqual(ano.EnableParallelScan) { + return false + } + if !p.Field96DeepEqual(ano.ParallelScanMaxScannersCount) { + return false + } + if !p.Field97DeepEqual(ano.ParallelScanMinRowsPerScanner) { + return false + } + if !p.Field98DeepEqual(ano.SkipBadTablet) { + return false + } + if !p.Field99DeepEqual(ano.ScannerScaleUpRatio) { + return false + } + if !p.Field100DeepEqual(ano.EnableDistinctStreamingAggregation) { + return false + } + if !p.Field101DeepEqual(ano.EnableJoinSpill) { + return false + } + if !p.Field102DeepEqual(ano.EnableSortSpill) { + return false + } + if !p.Field103DeepEqual(ano.EnableAggSpill) { + return false + } + if !p.Field104DeepEqual(ano.MinRevocableMem) { + return false + } + if !p.Field105DeepEqual(ano.SpillStreamingAggMemLimit) { + return false + } + if !p.Field106DeepEqual(ano.DataQueueMaxBlocks) { + return false + } + if !p.Field107DeepEqual(ano.EnableCommonExprPushdownForInvertedIndex) { + return false + } + if !p.Field108DeepEqual(ano.LocalExchangeFreeBlocksLimit) { + return false + } + if !p.Field109DeepEqual(ano.EnableForceSpill) { + return false + } + if !p.Field110DeepEqual(ano.EnableParquetFilterByMinMax) { + return false + } + if !p.Field111DeepEqual(ano.EnableOrcFilterByMinMax) { + return false + } + if !p.Field112DeepEqual(ano.MaxColumnReaderNum) { + return false + } + if !p.Field113DeepEqual(ano.EnableLocalMergeSort) { + return false + } + if !p.Field114DeepEqual(ano.EnableParallelResultSink) { + return false + } + if !p.Field115DeepEqual(ano.EnableShortCircuitQueryAccessColumnStore) { + return false + } + if !p.Field116DeepEqual(ano.EnableNoNeedReadDataOpt) { + return false + } + if !p.Field117DeepEqual(ano.ReadCsvEmptyLineAsNull) { + return false + } + if !p.Field118DeepEqual(ano.SerdeDialect) { + return false + } + if !p.Field119DeepEqual(ano.EnableMatchWithoutInvertedIndex) { + return false + } + if !p.Field120DeepEqual(ano.EnableFallbackOnMissingInvertedIndex) { + return false + } + if !p.Field121DeepEqual(ano.KeepCarriageReturn) { + return false + } + if !p.Field122DeepEqual(ano.RuntimeBloomFilterMinSize) { + return false + } + if !p.Field123DeepEqual(ano.HiveParquetUseColumnNames) { + return false + } + if !p.Field124DeepEqual(ano.HiveOrcUseColumnNames) { + return false + } + if !p.Field125DeepEqual(ano.EnableSegmentCache) { + return false + } + if !p.Field126DeepEqual(ano.RuntimeBloomFilterMaxSize) { + return false + } + if !p.Field127DeepEqual(ano.InListValueCountThreshold) { + return false + } + if !p.Field128DeepEqual(ano.EnableVerboseProfile) { + return false + } + if !p.Field129DeepEqual(ano.RpcVerboseProfileMaxInstanceCount) { + return false + } + if !p.Field130DeepEqual(ano.EnableAdaptivePipelineTaskSerialReadOnLimit) { + return false + } + if !p.Field131DeepEqual(ano.AdaptivePipelineTaskSerialReadOnLimit) { + return false + } + if !p.Field132DeepEqual(ano.ParallelPrepareThreshold) { + return false + } + if !p.Field133DeepEqual(ano.PartitionTopnMaxPartitions) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TQueryOptions) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxErrors() { - if err = oprot.WriteFieldBegin("max_errors", thrift.I32, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.MaxErrors); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.Field134DeepEqual(ano.PartitionTopnPrePartitionRows) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TQueryOptions) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDisableCodegen() { - if err = oprot.WriteFieldBegin("disable_codegen", thrift.BOOL, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.DisableCodegen); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.Field135DeepEqual(ano.EnableParallelOutfile) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TQueryOptions) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetBatchSize() { - if err = oprot.WriteFieldBegin("batch_size", thrift.I32, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.BatchSize); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.Field136DeepEqual(ano.EnablePhraseQuerySequentialOpt) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + if !p.Field137DeepEqual(ano.EnableAutoCreateWhenOverwrite) { + return false + } + if !p.Field138DeepEqual(ano.OrcTinyStripeThresholdBytes) { + return false + } + if !p.Field139DeepEqual(ano.OrcOnceMaxReadBytes) { + return false + } + if !p.Field140DeepEqual(ano.OrcMaxMergeDistanceBytes) { + return false + } + if !p.Field141DeepEqual(ano.IgnoreRuntimeFilterError) { + return false + } + if !p.Field1000DeepEqual(ano.DisableFileCache) { + return false + } + return true } -func (p *TQueryOptions) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetNumNodes() { - if err = oprot.WriteFieldBegin("num_nodes", thrift.I32, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.NumNodes); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TQueryOptions) Field1DeepEqual(src bool) bool { + + if p.AbortOnError != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return true } +func (p *TQueryOptions) Field2DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxScanRangeLength() { - if err = oprot.WriteFieldBegin("max_scan_range_length", thrift.I64, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MaxScanRangeLength); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxErrors != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return true } +func (p *TQueryOptions) Field3DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetNumScannerThreads() { - if err = oprot.WriteFieldBegin("num_scanner_threads", thrift.I32, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.NumScannerThreads); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.DisableCodegen != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return true } +func (p *TQueryOptions) Field4DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxIoBuffers() { - if err = oprot.WriteFieldBegin("max_io_buffers", thrift.I32, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.MaxIoBuffers); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.BatchSize != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return true } +func (p *TQueryOptions) Field5DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetAllowUnsupportedFormats() { - if err = oprot.WriteFieldBegin("allow_unsupported_formats", thrift.BOOL, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.AllowUnsupportedFormats); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.NumNodes != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return true } +func (p *TQueryOptions) Field6DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultOrderByLimit() { - if err = oprot.WriteFieldBegin("default_order_by_limit", thrift.I64, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.DefaultOrderByLimit); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxScanRangeLength != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return true } +func (p *TQueryOptions) Field7DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetMemLimit() { - if err = oprot.WriteFieldBegin("mem_limit", thrift.I64, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MemLimit); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.NumScannerThreads != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return true } +func (p *TQueryOptions) Field8DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetAbortOnDefaultLimitExceeded() { - if err = oprot.WriteFieldBegin("abort_on_default_limit_exceeded", thrift.BOOL, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.AbortOnDefaultLimitExceeded); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxIoBuffers != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) + return true } +func (p *TQueryOptions) Field9DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryTimeout() { - if err = oprot.WriteFieldBegin("query_timeout", thrift.I32, 14); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.QueryTimeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) + if p.AllowUnsupportedFormats != src { + return false + } + return true } +func (p *TQueryOptions) Field10DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetIsReportSuccess() { - if err = oprot.WriteFieldBegin("is_report_success", thrift.BOOL, 15); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.IsReportSuccess); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.DefaultOrderByLimit != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) + return true } +func (p *TQueryOptions) Field12DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetCodegenLevel() { - if err = oprot.WriteFieldBegin("codegen_level", thrift.I32, 16); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.CodegenLevel); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MemLimit != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) + return true } +func (p *TQueryOptions) Field13DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetKuduLatestObservedTs() { - if err = oprot.WriteFieldBegin("kudu_latest_observed_ts", thrift.I64, 17); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.KuduLatestObservedTs); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.AbortOnDefaultLimitExceeded != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) + return true } +func (p *TQueryOptions) Field14DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryType() { - if err = oprot.WriteFieldBegin("query_type", thrift.I32, 18); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.QueryType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.QueryTimeout != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) + return true } +func (p *TQueryOptions) Field15DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetMinReservation() { - if err = oprot.WriteFieldBegin("min_reservation", thrift.I64, 19); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MinReservation); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.IsReportSuccess != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) + return true } +func (p *TQueryOptions) Field16DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxReservation() { - if err = oprot.WriteFieldBegin("max_reservation", thrift.I64, 20); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MaxReservation); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.CodegenLevel != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) + return true } +func (p *TQueryOptions) Field17DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetInitialReservationTotalClaims() { - if err = oprot.WriteFieldBegin("initial_reservation_total_claims", thrift.I64, 21); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.InitialReservationTotalClaims); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.KuduLatestObservedTs != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) + return true } +func (p *TQueryOptions) Field18DeepEqual(src TQueryType) bool { -func (p *TQueryOptions) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetBufferPoolLimit() { - if err = oprot.WriteFieldBegin("buffer_pool_limit", thrift.I64, 22); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.BufferPoolLimit); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.QueryType != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) + return true } +func (p *TQueryOptions) Field19DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField23(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultSpillableBufferSize() { - if err = oprot.WriteFieldBegin("default_spillable_buffer_size", thrift.I64, 23); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.DefaultSpillableBufferSize); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MinReservation != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) + return true } +func (p *TQueryOptions) Field20DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField24(oprot thrift.TProtocol) (err error) { - if p.IsSetMinSpillableBufferSize() { - if err = oprot.WriteFieldBegin("min_spillable_buffer_size", thrift.I64, 24); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MinSpillableBufferSize); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxReservation != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) + return true } +func (p *TQueryOptions) Field21DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField25(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxRowSize() { - if err = oprot.WriteFieldBegin("max_row_size", thrift.I64, 25); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.MaxRowSize); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.InitialReservationTotalClaims != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) + return true } +func (p *TQueryOptions) Field22DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField26(oprot thrift.TProtocol) (err error) { - if p.IsSetDisableStreamPreaggregations() { - if err = oprot.WriteFieldBegin("disable_stream_preaggregations", thrift.BOOL, 26); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.DisableStreamPreaggregations); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.BufferPoolLimit != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) + return true } +func (p *TQueryOptions) Field23DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField27(oprot thrift.TProtocol) (err error) { - if p.IsSetMtDop() { - if err = oprot.WriteFieldBegin("mt_dop", thrift.I32, 27); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.MtDop); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.DefaultSpillableBufferSize != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) + return true } +func (p *TQueryOptions) Field24DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField28(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadMemLimit() { - if err = oprot.WriteFieldBegin("load_mem_limit", thrift.I64, 28); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.LoadMemLimit); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MinSpillableBufferSize != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) + return true } +func (p *TQueryOptions) Field25DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField29(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxScanKeyNum() { - if err = oprot.WriteFieldBegin("max_scan_key_num", thrift.I32, 29); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.MaxScanKeyNum); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxRowSize != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) + return true } +func (p *TQueryOptions) Field26DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField30(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxPushdownConditionsPerColumn() { - if err = oprot.WriteFieldBegin("max_pushdown_conditions_per_column", thrift.I32, 30); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.MaxPushdownConditionsPerColumn); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.DisableStreamPreaggregations != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) + return true } +func (p *TQueryOptions) Field27DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField31(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableSpilling() { - if err = oprot.WriteFieldBegin("enable_spilling", thrift.BOOL, 31); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableSpilling); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MtDop != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) + return true } +func (p *TQueryOptions) Field28DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField32(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableEnableExchangeNodeParallelMerge() { - if err = oprot.WriteFieldBegin("enable_enable_exchange_node_parallel_merge", thrift.BOOL, 32); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableEnableExchangeNodeParallelMerge); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.LoadMemLimit != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 32 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 32 end error: ", p), err) + return true } +func (p *TQueryOptions) Field29DeepEqual(src *int32) bool { -func (p *TQueryOptions) writeField33(oprot thrift.TProtocol) (err error) { - if p.IsSetRuntimeFilterWaitTimeMs() { - if err = oprot.WriteFieldBegin("runtime_filter_wait_time_ms", thrift.I32, 33); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.RuntimeFilterWaitTimeMs); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxScanKeyNum == src { + return true + } else if p.MaxScanKeyNum == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 33 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) + if *p.MaxScanKeyNum != *src { + return false + } + return true } +func (p *TQueryOptions) Field30DeepEqual(src *int32) bool { -func (p *TQueryOptions) writeField34(oprot thrift.TProtocol) (err error) { - if p.IsSetRuntimeFilterMaxInNum() { - if err = oprot.WriteFieldBegin("runtime_filter_max_in_num", thrift.I32, 34); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.RuntimeFilterMaxInNum); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MaxPushdownConditionsPerColumn == src { + return true + } else if p.MaxPushdownConditionsPerColumn == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) + if *p.MaxPushdownConditionsPerColumn != *src { + return false + } + return true } +func (p *TQueryOptions) Field31DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField42(oprot thrift.TProtocol) (err error) { - if p.IsSetResourceLimit() { - if err = oprot.WriteFieldBegin("resource_limit", thrift.STRUCT, 42); err != nil { - goto WriteFieldBeginError - } - if err := p.ResourceLimit.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableSpilling != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 42 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 42 end error: ", p), err) + return true } +func (p *TQueryOptions) Field32DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField43(oprot thrift.TProtocol) (err error) { - if p.IsSetReturnObjectDataAsBinary() { - if err = oprot.WriteFieldBegin("return_object_data_as_binary", thrift.BOOL, 43); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.ReturnObjectDataAsBinary); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableEnableExchangeNodeParallelMerge != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 43 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 43 end error: ", p), err) + return true } +func (p *TQueryOptions) Field33DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField44(oprot thrift.TProtocol) (err error) { - if p.IsSetTrimTailingSpacesForExternalTableQuery() { - if err = oprot.WriteFieldBegin("trim_tailing_spaces_for_external_table_query", thrift.BOOL, 44); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.TrimTailingSpacesForExternalTableQuery); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.RuntimeFilterWaitTimeMs != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 44 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 44 end error: ", p), err) + return true } +func (p *TQueryOptions) Field34DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField45(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableFunctionPushdown() { - if err = oprot.WriteFieldBegin("enable_function_pushdown", thrift.BOOL, 45); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.EnableFunctionPushdown); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.RuntimeFilterMaxInNum != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 45 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 45 end error: ", p), err) + return true } +func (p *TQueryOptions) Field42DeepEqual(src *TResourceLimit) bool { -func (p *TQueryOptions) writeField46(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentTransmissionCompressionCodec() { - if err = oprot.WriteFieldBegin("fragment_transmission_compression_codec", thrift.STRING, 46); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.FragmentTransmissionCompressionCodec); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.ResourceLimit.DeepEqual(src) { + return false + } + return true +} +func (p *TQueryOptions) Field43DeepEqual(src bool) bool { + + if p.ReturnObjectDataAsBinary != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 46 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 46 end error: ", p), err) + return true } +func (p *TQueryOptions) Field44DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField48(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableLocalExchange() { - if err = oprot.WriteFieldBegin("enable_local_exchange", thrift.BOOL, 48); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.EnableLocalExchange); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.TrimTailingSpacesForExternalTableQuery != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 48 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 48 end error: ", p), err) + return true } +func (p *TQueryOptions) Field45DeepEqual(src *bool) bool { -func (p *TQueryOptions) writeField49(oprot thrift.TProtocol) (err error) { - if p.IsSetSkipStorageEngineMerge() { - if err = oprot.WriteFieldBegin("skip_storage_engine_merge", thrift.BOOL, 49); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.SkipStorageEngineMerge); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableFunctionPushdown == src { + return true + } else if p.EnableFunctionPushdown == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 49 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 49 end error: ", p), err) + if *p.EnableFunctionPushdown != *src { + return false + } + return true } +func (p *TQueryOptions) Field46DeepEqual(src *string) bool { -func (p *TQueryOptions) writeField50(oprot thrift.TProtocol) (err error) { - if p.IsSetSkipDeletePredicate() { - if err = oprot.WriteFieldBegin("skip_delete_predicate", thrift.BOOL, 50); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.SkipDeletePredicate); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.FragmentTransmissionCompressionCodec == src { + return true + } else if p.FragmentTransmissionCompressionCodec == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 50 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 50 end error: ", p), err) + if strings.Compare(*p.FragmentTransmissionCompressionCodec, *src) != 0 { + return false + } + return true } +func (p *TQueryOptions) Field48DeepEqual(src *bool) bool { -func (p *TQueryOptions) writeField51(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableNewShuffleHashMethod() { - if err = oprot.WriteFieldBegin("enable_new_shuffle_hash_method", thrift.BOOL, 51); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.EnableNewShuffleHashMethod); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableLocalExchange == src { + return true + } else if p.EnableLocalExchange == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 51 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 51 end error: ", p), err) + if *p.EnableLocalExchange != *src { + return false + } + return true } +func (p *TQueryOptions) Field49DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField52(oprot thrift.TProtocol) (err error) { - if p.IsSetBeExecVersion() { - if err = oprot.WriteFieldBegin("be_exec_version", thrift.I32, 52); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.BeExecVersion); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.SkipStorageEngineMerge != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 52 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 52 end error: ", p), err) + return true } +func (p *TQueryOptions) Field50DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField53(oprot thrift.TProtocol) (err error) { - if p.IsSetPartitionedHashJoinRowsThreshold() { - if err = oprot.WriteFieldBegin("partitioned_hash_join_rows_threshold", thrift.I32, 53); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.PartitionedHashJoinRowsThreshold); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.SkipDeletePredicate != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 53 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 53 end error: ", p), err) + return true } +func (p *TQueryOptions) Field51DeepEqual(src *bool) bool { -func (p *TQueryOptions) writeField54(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableShareHashTableForBroadcastJoin() { - if err = oprot.WriteFieldBegin("enable_share_hash_table_for_broadcast_join", thrift.BOOL, 54); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.EnableShareHashTableForBroadcastJoin); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableNewShuffleHashMethod == src { + return true + } else if p.EnableNewShuffleHashMethod == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 54 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 54 end error: ", p), err) + if *p.EnableNewShuffleHashMethod != *src { + return false + } + return true } +func (p *TQueryOptions) Field52DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField55(oprot thrift.TProtocol) (err error) { - if p.IsSetCheckOverflowForDecimal() { - if err = oprot.WriteFieldBegin("check_overflow_for_decimal", thrift.BOOL, 55); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.CheckOverflowForDecimal); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.BeExecVersion != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 55 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 55 end error: ", p), err) + return true } +func (p *TQueryOptions) Field53DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField56(oprot thrift.TProtocol) (err error) { - if p.IsSetSkipDeleteBitmap() { - if err = oprot.WriteFieldBegin("skip_delete_bitmap", thrift.BOOL, 56); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.SkipDeleteBitmap); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.PartitionedHashJoinRowsThreshold != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 56 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 56 end error: ", p), err) + return true } +func (p *TQueryOptions) Field54DeepEqual(src *bool) bool { -func (p *TQueryOptions) writeField57(oprot thrift.TProtocol) (err error) { - if p.IsSetEnablePipelineEngine() { - if err = oprot.WriteFieldBegin("enable_pipeline_engine", thrift.BOOL, 57); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnablePipelineEngine); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableShareHashTableForBroadcastJoin == src { + return true + } else if p.EnableShareHashTableForBroadcastJoin == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 57 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 57 end error: ", p), err) + if *p.EnableShareHashTableForBroadcastJoin != *src { + return false + } + return true } +func (p *TQueryOptions) Field55DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField58(oprot thrift.TProtocol) (err error) { - if p.IsSetRepeatMaxNum() { - if err = oprot.WriteFieldBegin("repeat_max_num", thrift.I32, 58); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.RepeatMaxNum); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.CheckOverflowForDecimal != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 58 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 58 end error: ", p), err) + return true +} +func (p *TQueryOptions) Field56DeepEqual(src bool) bool { + + if p.SkipDeleteBitmap != src { + return false + } + return true } +func (p *TQueryOptions) Field57DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField59(oprot thrift.TProtocol) (err error) { - if p.IsSetExternalSortBytesThreshold() { - if err = oprot.WriteFieldBegin("external_sort_bytes_threshold", thrift.I64, 59); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.ExternalSortBytesThreshold); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnablePipelineEngine != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 59 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 59 end error: ", p), err) + return true } +func (p *TQueryOptions) Field58DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField60(oprot thrift.TProtocol) (err error) { - if p.IsSetPartitionedHashAggRowsThreshold() { - if err = oprot.WriteFieldBegin("partitioned_hash_agg_rows_threshold", thrift.I32, 60); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.PartitionedHashAggRowsThreshold); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.RepeatMaxNum != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 60 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 60 end error: ", p), err) + return true } +func (p *TQueryOptions) Field59DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField61(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableFileCache() { - if err = oprot.WriteFieldBegin("enable_file_cache", thrift.BOOL, 61); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableFileCache); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.ExternalSortBytesThreshold != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 61 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 61 end error: ", p), err) + return true } +func (p *TQueryOptions) Field60DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField62(oprot thrift.TProtocol) (err error) { - if p.IsSetInsertTimeout() { - if err = oprot.WriteFieldBegin("insert_timeout", thrift.I32, 62); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.InsertTimeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.PartitionedHashAggRowsThreshold != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 62 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 62 end error: ", p), err) + return true } +func (p *TQueryOptions) Field61DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField63(oprot thrift.TProtocol) (err error) { - if p.IsSetExecutionTimeout() { - if err = oprot.WriteFieldBegin("execution_timeout", thrift.I32, 63); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.ExecutionTimeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableFileCache != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 63 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 63 end error: ", p), err) + return true } +func (p *TQueryOptions) Field62DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField64(oprot thrift.TProtocol) (err error) { - if p.IsSetDryRunQuery() { - if err = oprot.WriteFieldBegin("dry_run_query", thrift.BOOL, 64); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.DryRunQuery); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.InsertTimeout != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 64 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 64 end error: ", p), err) + return true } +func (p *TQueryOptions) Field63DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField65(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableCommonExprPushdown() { - if err = oprot.WriteFieldBegin("enable_common_expr_pushdown", thrift.BOOL, 65); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableCommonExprPushdown); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.ExecutionTimeout != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 65 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 65 end error: ", p), err) + return true } +func (p *TQueryOptions) Field64DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField66(oprot thrift.TProtocol) (err error) { - if p.IsSetParallelInstance() { - if err = oprot.WriteFieldBegin("parallel_instance", thrift.I32, 66); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.ParallelInstance); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.DryRunQuery != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 66 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 66 end error: ", p), err) + return true } +func (p *TQueryOptions) Field65DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField67(oprot thrift.TProtocol) (err error) { - if p.IsSetMysqlRowBinaryFormat() { - if err = oprot.WriteFieldBegin("mysql_row_binary_format", thrift.BOOL, 67); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.MysqlRowBinaryFormat); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableCommonExprPushdown != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 67 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 67 end error: ", p), err) + return true } +func (p *TQueryOptions) Field66DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField68(oprot thrift.TProtocol) (err error) { - if p.IsSetExternalAggBytesThreshold() { - if err = oprot.WriteFieldBegin("external_agg_bytes_threshold", thrift.I64, 68); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.ExternalAggBytesThreshold); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.ParallelInstance != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 68 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 68 end error: ", p), err) + return true } +func (p *TQueryOptions) Field67DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField69(oprot thrift.TProtocol) (err error) { - if p.IsSetExternalAggPartitionBits() { - if err = oprot.WriteFieldBegin("external_agg_partition_bits", thrift.I32, 69); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.ExternalAggPartitionBits); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.MysqlRowBinaryFormat != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 69 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 69 end error: ", p), err) + return true } +func (p *TQueryOptions) Field68DeepEqual(src int64) bool { -func (p *TQueryOptions) writeField70(oprot thrift.TProtocol) (err error) { - if p.IsSetFileCacheBasePath() { - if err = oprot.WriteFieldBegin("file_cache_base_path", thrift.STRING, 70); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.FileCacheBasePath); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.ExternalAggBytesThreshold != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 70 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 70 end error: ", p), err) + return true } - -func (p *TQueryOptions) writeField71(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableParquetLazyMat() { - if err = oprot.WriteFieldBegin("enable_parquet_lazy_mat", thrift.BOOL, 71); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableParquetLazyMat); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TQueryOptions) Field69DeepEqual(src int32) bool { + + if p.ExternalAggPartitionBits != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 71 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 71 end error: ", p), err) + return true } +func (p *TQueryOptions) Field70DeepEqual(src *string) bool { -func (p *TQueryOptions) writeField72(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableOrcLazyMat() { - if err = oprot.WriteFieldBegin("enable_orc_lazy_mat", thrift.BOOL, 72); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableOrcLazyMat); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.FileCacheBasePath == src { + return true + } else if p.FileCacheBasePath == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 72 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 72 end error: ", p), err) + if strings.Compare(*p.FileCacheBasePath, *src) != 0 { + return false + } + return true } +func (p *TQueryOptions) Field71DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField73(oprot thrift.TProtocol) (err error) { - if p.IsSetScanQueueMemLimit() { - if err = oprot.WriteFieldBegin("scan_queue_mem_limit", thrift.I64, 73); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.ScanQueueMemLimit); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableParquetLazyMat != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 73 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 73 end error: ", p), err) + return true } +func (p *TQueryOptions) Field72DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField74(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableScanNodeRunSerial() { - if err = oprot.WriteFieldBegin("enable_scan_node_run_serial", thrift.BOOL, 74); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableScanNodeRunSerial); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableOrcLazyMat != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 74 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 74 end error: ", p), err) + return true } +func (p *TQueryOptions) Field73DeepEqual(src *int64) bool { -func (p *TQueryOptions) writeField75(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableInsertStrict() { - if err = oprot.WriteFieldBegin("enable_insert_strict", thrift.BOOL, 75); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableInsertStrict); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.ScanQueueMemLimit == src { + return true + } else if p.ScanQueueMemLimit == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 75 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 75 end error: ", p), err) + if *p.ScanQueueMemLimit != *src { + return false + } + return true } +func (p *TQueryOptions) Field74DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField76(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableInvertedIndexQuery() { - if err = oprot.WriteFieldBegin("enable_inverted_index_query", thrift.BOOL, 76); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableInvertedIndexQuery); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableScanNodeRunSerial != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 76 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 76 end error: ", p), err) + return true } +func (p *TQueryOptions) Field75DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField77(oprot thrift.TProtocol) (err error) { - if p.IsSetTruncateCharOrVarcharColumns() { - if err = oprot.WriteFieldBegin("truncate_char_or_varchar_columns", thrift.BOOL, 77); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.TruncateCharOrVarcharColumns); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableInsertStrict != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 77 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 77 end error: ", p), err) + return true } +func (p *TQueryOptions) Field76DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField78(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableHashJoinEarlyStartProbe() { - if err = oprot.WriteFieldBegin("enable_hash_join_early_start_probe", thrift.BOOL, 78); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableHashJoinEarlyStartProbe); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableInvertedIndexQuery != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 78 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 78 end error: ", p), err) + return true } +func (p *TQueryOptions) Field77DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField79(oprot thrift.TProtocol) (err error) { - if p.IsSetEnablePipelineXEngine() { - if err = oprot.WriteFieldBegin("enable_pipeline_x_engine", thrift.BOOL, 79); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnablePipelineXEngine); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.TruncateCharOrVarcharColumns != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 79 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 79 end error: ", p), err) + return true } +func (p *TQueryOptions) Field78DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField80(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableMemtableOnSinkNode() { - if err = oprot.WriteFieldBegin("enable_memtable_on_sink_node", thrift.BOOL, 80); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableMemtableOnSinkNode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableHashJoinEarlyStartProbe != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 80 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 80 end error: ", p), err) + return true } +func (p *TQueryOptions) Field79DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField81(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableDeleteSubPredicateV2() { - if err = oprot.WriteFieldBegin("enable_delete_sub_predicate_v2", thrift.BOOL, 81); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableDeleteSubPredicateV2); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnablePipelineXEngine != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 81 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 81 end error: ", p), err) + return true } +func (p *TQueryOptions) Field80DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField82(oprot thrift.TProtocol) (err error) { - if p.IsSetFeProcessUuid() { - if err = oprot.WriteFieldBegin("fe_process_uuid", thrift.I64, 82); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.FeProcessUuid); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableMemtableOnSinkNode != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 82 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 82 end error: ", p), err) + return true } +func (p *TQueryOptions) Field81DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField83(oprot thrift.TProtocol) (err error) { - if p.IsSetInvertedIndexConjunctionOptThreshold() { - if err = oprot.WriteFieldBegin("inverted_index_conjunction_opt_threshold", thrift.I32, 83); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.InvertedIndexConjunctionOptThreshold); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableDeleteSubPredicateV2 != src { + return false + } + return true +} +func (p *TQueryOptions) Field82DeepEqual(src int64) bool { + + if p.FeProcessUuid != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 83 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 83 end error: ", p), err) + return true } +func (p *TQueryOptions) Field83DeepEqual(src int32) bool { -func (p *TQueryOptions) writeField84(oprot thrift.TProtocol) (err error) { - if p.IsSetEnableProfile() { - if err = oprot.WriteFieldBegin("enable_profile", thrift.BOOL, 84); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnableProfile); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.InvertedIndexConjunctionOptThreshold != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 84 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 84 end error: ", p), err) + return true } +func (p *TQueryOptions) Field84DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField85(oprot thrift.TProtocol) (err error) { - if p.IsSetEnablePageCache() { - if err = oprot.WriteFieldBegin("enable_page_cache", thrift.BOOL, 85); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnablePageCache); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnableProfile != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 85 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 85 end error: ", p), err) + return true } +func (p *TQueryOptions) Field85DeepEqual(src bool) bool { -func (p *TQueryOptions) writeField86(oprot thrift.TProtocol) (err error) { - if p.IsSetAnalyzeTimeout() { - if err = oprot.WriteFieldBegin("analyze_timeout", thrift.I32, 86); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.AnalyzeTimeout); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.EnablePageCache != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 86 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 86 end error: ", p), err) + return true } +func (p *TQueryOptions) Field86DeepEqual(src int32) bool { -func (p *TQueryOptions) String() string { - if p == nil { - return "" + if p.AnalyzeTimeout != src { + return false } - return fmt.Sprintf("TQueryOptions(%+v)", *p) + return true } +func (p *TQueryOptions) Field87DeepEqual(src bool) bool { -func (p *TQueryOptions) DeepEqual(ano *TQueryOptions) bool { - if p == ano { - return true - } else if p == nil || ano == nil { + if p.FasterFloatConvert != src { return false } - if !p.Field1DeepEqual(ano.AbortOnError) { + return true +} +func (p *TQueryOptions) Field88DeepEqual(src bool) bool { + + if p.EnableDecimal256 != src { return false } - if !p.Field2DeepEqual(ano.MaxErrors) { + return true +} +func (p *TQueryOptions) Field89DeepEqual(src bool) bool { + + if p.EnableLocalShuffle != src { return false } - if !p.Field3DeepEqual(ano.DisableCodegen) { + return true +} +func (p *TQueryOptions) Field90DeepEqual(src bool) bool { + + if p.SkipMissingVersion != src { return false } - if !p.Field4DeepEqual(ano.BatchSize) { + return true +} +func (p *TQueryOptions) Field91DeepEqual(src bool) bool { + + if p.RuntimeFilterWaitInfinitely != src { return false } - if !p.Field5DeepEqual(ano.NumNodes) { + return true +} +func (p *TQueryOptions) Field92DeepEqual(src int32) bool { + + if p.WaitFullBlockScheduleTimes != src { return false } - if !p.Field6DeepEqual(ano.MaxScanRangeLength) { + return true +} +func (p *TQueryOptions) Field93DeepEqual(src int32) bool { + + if p.InvertedIndexMaxExpansions != src { return false } - if !p.Field7DeepEqual(ano.NumScannerThreads) { + return true +} +func (p *TQueryOptions) Field94DeepEqual(src int32) bool { + + if p.InvertedIndexSkipThreshold != src { return false } - if !p.Field8DeepEqual(ano.MaxIoBuffers) { + return true +} +func (p *TQueryOptions) Field95DeepEqual(src bool) bool { + + if p.EnableParallelScan != src { return false } - if !p.Field9DeepEqual(ano.AllowUnsupportedFormats) { + return true +} +func (p *TQueryOptions) Field96DeepEqual(src int32) bool { + + if p.ParallelScanMaxScannersCount != src { return false } - if !p.Field10DeepEqual(ano.DefaultOrderByLimit) { + return true +} +func (p *TQueryOptions) Field97DeepEqual(src int64) bool { + + if p.ParallelScanMinRowsPerScanner != src { return false } - if !p.Field12DeepEqual(ano.MemLimit) { + return true +} +func (p *TQueryOptions) Field98DeepEqual(src bool) bool { + + if p.SkipBadTablet != src { return false } - if !p.Field13DeepEqual(ano.AbortOnDefaultLimitExceeded) { + return true +} +func (p *TQueryOptions) Field99DeepEqual(src float64) bool { + + if p.ScannerScaleUpRatio != src { return false } - if !p.Field14DeepEqual(ano.QueryTimeout) { + return true +} +func (p *TQueryOptions) Field100DeepEqual(src bool) bool { + + if p.EnableDistinctStreamingAggregation != src { return false } - if !p.Field15DeepEqual(ano.IsReportSuccess) { + return true +} +func (p *TQueryOptions) Field101DeepEqual(src bool) bool { + + if p.EnableJoinSpill != src { return false } - if !p.Field16DeepEqual(ano.CodegenLevel) { + return true +} +func (p *TQueryOptions) Field102DeepEqual(src bool) bool { + + if p.EnableSortSpill != src { return false } - if !p.Field17DeepEqual(ano.KuduLatestObservedTs) { + return true +} +func (p *TQueryOptions) Field103DeepEqual(src bool) bool { + + if p.EnableAggSpill != src { return false } - if !p.Field18DeepEqual(ano.QueryType) { + return true +} +func (p *TQueryOptions) Field104DeepEqual(src int64) bool { + + if p.MinRevocableMem != src { return false } - if !p.Field19DeepEqual(ano.MinReservation) { + return true +} +func (p *TQueryOptions) Field105DeepEqual(src int64) bool { + + if p.SpillStreamingAggMemLimit != src { return false } - if !p.Field20DeepEqual(ano.MaxReservation) { + return true +} +func (p *TQueryOptions) Field106DeepEqual(src int64) bool { + + if p.DataQueueMaxBlocks != src { return false } - if !p.Field21DeepEqual(ano.InitialReservationTotalClaims) { + return true +} +func (p *TQueryOptions) Field107DeepEqual(src bool) bool { + + if p.EnableCommonExprPushdownForInvertedIndex != src { return false } - if !p.Field22DeepEqual(ano.BufferPoolLimit) { + return true +} +func (p *TQueryOptions) Field108DeepEqual(src *int64) bool { + + if p.LocalExchangeFreeBlocksLimit == src { + return true + } else if p.LocalExchangeFreeBlocksLimit == nil || src == nil { return false } - if !p.Field23DeepEqual(ano.DefaultSpillableBufferSize) { + if *p.LocalExchangeFreeBlocksLimit != *src { return false } - if !p.Field24DeepEqual(ano.MinSpillableBufferSize) { + return true +} +func (p *TQueryOptions) Field109DeepEqual(src bool) bool { + + if p.EnableForceSpill != src { return false } - if !p.Field25DeepEqual(ano.MaxRowSize) { + return true +} +func (p *TQueryOptions) Field110DeepEqual(src bool) bool { + + if p.EnableParquetFilterByMinMax != src { return false } - if !p.Field26DeepEqual(ano.DisableStreamPreaggregations) { + return true +} +func (p *TQueryOptions) Field111DeepEqual(src bool) bool { + + if p.EnableOrcFilterByMinMax != src { return false } - if !p.Field27DeepEqual(ano.MtDop) { + return true +} +func (p *TQueryOptions) Field112DeepEqual(src int32) bool { + + if p.MaxColumnReaderNum != src { return false } - if !p.Field28DeepEqual(ano.LoadMemLimit) { + return true +} +func (p *TQueryOptions) Field113DeepEqual(src bool) bool { + + if p.EnableLocalMergeSort != src { return false } - if !p.Field29DeepEqual(ano.MaxScanKeyNum) { + return true +} +func (p *TQueryOptions) Field114DeepEqual(src bool) bool { + + if p.EnableParallelResultSink != src { return false } - if !p.Field30DeepEqual(ano.MaxPushdownConditionsPerColumn) { + return true +} +func (p *TQueryOptions) Field115DeepEqual(src bool) bool { + + if p.EnableShortCircuitQueryAccessColumnStore != src { return false } - if !p.Field31DeepEqual(ano.EnableSpilling) { + return true +} +func (p *TQueryOptions) Field116DeepEqual(src bool) bool { + + if p.EnableNoNeedReadDataOpt != src { return false } - if !p.Field32DeepEqual(ano.EnableEnableExchangeNodeParallelMerge) { + return true +} +func (p *TQueryOptions) Field117DeepEqual(src bool) bool { + + if p.ReadCsvEmptyLineAsNull != src { return false } - if !p.Field33DeepEqual(ano.RuntimeFilterWaitTimeMs) { + return true +} +func (p *TQueryOptions) Field118DeepEqual(src TSerdeDialect) bool { + + if p.SerdeDialect != src { return false } - if !p.Field34DeepEqual(ano.RuntimeFilterMaxInNum) { + return true +} +func (p *TQueryOptions) Field119DeepEqual(src bool) bool { + + if p.EnableMatchWithoutInvertedIndex != src { return false } - if !p.Field42DeepEqual(ano.ResourceLimit) { + return true +} +func (p *TQueryOptions) Field120DeepEqual(src bool) bool { + + if p.EnableFallbackOnMissingInvertedIndex != src { return false } - if !p.Field43DeepEqual(ano.ReturnObjectDataAsBinary) { + return true +} +func (p *TQueryOptions) Field121DeepEqual(src bool) bool { + + if p.KeepCarriageReturn != src { return false } - if !p.Field44DeepEqual(ano.TrimTailingSpacesForExternalTableQuery) { + return true +} +func (p *TQueryOptions) Field122DeepEqual(src int32) bool { + + if p.RuntimeBloomFilterMinSize != src { return false } - if !p.Field45DeepEqual(ano.EnableFunctionPushdown) { + return true +} +func (p *TQueryOptions) Field123DeepEqual(src bool) bool { + + if p.HiveParquetUseColumnNames != src { return false } - if !p.Field46DeepEqual(ano.FragmentTransmissionCompressionCodec) { + return true +} +func (p *TQueryOptions) Field124DeepEqual(src bool) bool { + + if p.HiveOrcUseColumnNames != src { return false } - if !p.Field48DeepEqual(ano.EnableLocalExchange) { + return true +} +func (p *TQueryOptions) Field125DeepEqual(src bool) bool { + + if p.EnableSegmentCache != src { return false } - if !p.Field49DeepEqual(ano.SkipStorageEngineMerge) { + return true +} +func (p *TQueryOptions) Field126DeepEqual(src int32) bool { + + if p.RuntimeBloomFilterMaxSize != src { return false } - if !p.Field50DeepEqual(ano.SkipDeletePredicate) { + return true +} +func (p *TQueryOptions) Field127DeepEqual(src int32) bool { + + if p.InListValueCountThreshold != src { return false } - if !p.Field51DeepEqual(ano.EnableNewShuffleHashMethod) { + return true +} +func (p *TQueryOptions) Field128DeepEqual(src bool) bool { + + if p.EnableVerboseProfile != src { return false } - if !p.Field52DeepEqual(ano.BeExecVersion) { + return true +} +func (p *TQueryOptions) Field129DeepEqual(src int32) bool { + + if p.RpcVerboseProfileMaxInstanceCount != src { return false } - if !p.Field53DeepEqual(ano.PartitionedHashJoinRowsThreshold) { + return true +} +func (p *TQueryOptions) Field130DeepEqual(src bool) bool { + + if p.EnableAdaptivePipelineTaskSerialReadOnLimit != src { return false } - if !p.Field54DeepEqual(ano.EnableShareHashTableForBroadcastJoin) { + return true +} +func (p *TQueryOptions) Field131DeepEqual(src int32) bool { + + if p.AdaptivePipelineTaskSerialReadOnLimit != src { return false } - if !p.Field55DeepEqual(ano.CheckOverflowForDecimal) { + return true +} +func (p *TQueryOptions) Field132DeepEqual(src int32) bool { + + if p.ParallelPrepareThreshold != src { return false } - if !p.Field56DeepEqual(ano.SkipDeleteBitmap) { + return true +} +func (p *TQueryOptions) Field133DeepEqual(src int32) bool { + + if p.PartitionTopnMaxPartitions != src { return false } - if !p.Field57DeepEqual(ano.EnablePipelineEngine) { + return true +} +func (p *TQueryOptions) Field134DeepEqual(src int32) bool { + + if p.PartitionTopnPrePartitionRows != src { return false } - if !p.Field58DeepEqual(ano.RepeatMaxNum) { + return true +} +func (p *TQueryOptions) Field135DeepEqual(src bool) bool { + + if p.EnableParallelOutfile != src { return false } - if !p.Field59DeepEqual(ano.ExternalSortBytesThreshold) { + return true +} +func (p *TQueryOptions) Field136DeepEqual(src bool) bool { + + if p.EnablePhraseQuerySequentialOpt != src { return false } - if !p.Field60DeepEqual(ano.PartitionedHashAggRowsThreshold) { + return true +} +func (p *TQueryOptions) Field137DeepEqual(src bool) bool { + + if p.EnableAutoCreateWhenOverwrite != src { return false } - if !p.Field61DeepEqual(ano.EnableFileCache) { + return true +} +func (p *TQueryOptions) Field138DeepEqual(src int64) bool { + + if p.OrcTinyStripeThresholdBytes != src { return false } - if !p.Field62DeepEqual(ano.InsertTimeout) { + return true +} +func (p *TQueryOptions) Field139DeepEqual(src int64) bool { + + if p.OrcOnceMaxReadBytes != src { return false } - if !p.Field63DeepEqual(ano.ExecutionTimeout) { + return true +} +func (p *TQueryOptions) Field140DeepEqual(src int64) bool { + + if p.OrcMaxMergeDistanceBytes != src { return false } - if !p.Field64DeepEqual(ano.DryRunQuery) { + return true +} +func (p *TQueryOptions) Field141DeepEqual(src bool) bool { + + if p.IgnoreRuntimeFilterError != src { return false } - if !p.Field65DeepEqual(ano.EnableCommonExprPushdown) { + return true +} +func (p *TQueryOptions) Field1000DeepEqual(src bool) bool { + + if p.DisableFileCache != src { return false } - if !p.Field66DeepEqual(ano.ParallelInstance) { - return false + return true +} + +type TScanRangeParams struct { + ScanRange *plannodes.TScanRange `thrift:"scan_range,1,required" frugal:"1,required,plannodes.TScanRange" json:"scan_range"` + VolumeId int32 `thrift:"volume_id,2,optional" frugal:"2,optional,i32" json:"volume_id,omitempty"` +} + +func NewTScanRangeParams() *TScanRangeParams { + return &TScanRangeParams{ + + VolumeId: -1, } - if !p.Field67DeepEqual(ano.MysqlRowBinaryFormat) { - return false +} + +func (p *TScanRangeParams) InitDefault() { + p.VolumeId = -1 +} + +var TScanRangeParams_ScanRange_DEFAULT *plannodes.TScanRange + +func (p *TScanRangeParams) GetScanRange() (v *plannodes.TScanRange) { + if !p.IsSetScanRange() { + return TScanRangeParams_ScanRange_DEFAULT } - if !p.Field68DeepEqual(ano.ExternalAggBytesThreshold) { - return false + return p.ScanRange +} + +var TScanRangeParams_VolumeId_DEFAULT int32 = -1 + +func (p *TScanRangeParams) GetVolumeId() (v int32) { + if !p.IsSetVolumeId() { + return TScanRangeParams_VolumeId_DEFAULT } - if !p.Field69DeepEqual(ano.ExternalAggPartitionBits) { - return false + return p.VolumeId +} +func (p *TScanRangeParams) SetScanRange(val *plannodes.TScanRange) { + p.ScanRange = val +} +func (p *TScanRangeParams) SetVolumeId(val int32) { + p.VolumeId = val +} + +var fieldIDToName_TScanRangeParams = map[int16]string{ + 1: "scan_range", + 2: "volume_id", +} + +func (p *TScanRangeParams) IsSetScanRange() bool { + return p.ScanRange != nil +} + +func (p *TScanRangeParams) IsSetVolumeId() bool { + return p.VolumeId != TScanRangeParams_VolumeId_DEFAULT +} + +func (p *TScanRangeParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetScanRange bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if !p.Field70DeepEqual(ano.FileCacheBasePath) { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetScanRange = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if !p.Field71DeepEqual(ano.EnableParquetLazyMat) { - return false + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - if !p.Field72DeepEqual(ano.EnableOrcLazyMat) { - return false + + if !issetScanRange { + fieldId = 1 + goto RequiredFieldNotSetError } - if !p.Field73DeepEqual(ano.ScanQueueMemLimit) { - return false + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TScanRangeParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TScanRangeParams[fieldId])) +} + +func (p *TScanRangeParams) ReadField1(iprot thrift.TProtocol) error { + _field := plannodes.NewTScanRange() + if err := _field.Read(iprot); err != nil { + return err } - if !p.Field74DeepEqual(ano.EnableScanNodeRunSerial) { - return false + p.ScanRange = _field + return nil +} +func (p *TScanRangeParams) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v } - if !p.Field75DeepEqual(ano.EnableInsertStrict) { - return false + p.VolumeId = _field + return nil +} + +func (p *TScanRangeParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TScanRangeParams"); err != nil { + goto WriteStructBeginError } - if !p.Field76DeepEqual(ano.EnableInvertedIndexQuery) { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } } - if !p.Field77DeepEqual(ano.TruncateCharOrVarcharColumns) { - return false + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if !p.Field78DeepEqual(ano.EnableHashJoinEarlyStartProbe) { - return false + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - if !p.Field79DeepEqual(ano.EnablePipelineXEngine) { - return false + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TScanRangeParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("scan_range", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - if !p.Field80DeepEqual(ano.EnableMemtableOnSinkNode) { - return false + if err := p.ScanRange.Write(oprot); err != nil { + return err } - if !p.Field81DeepEqual(ano.EnableDeleteSubPredicateV2) { - return false + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if !p.Field82DeepEqual(ano.FeProcessUuid) { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TScanRangeParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetVolumeId() { + if err = oprot.WriteFieldBegin("volume_id", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.VolumeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if !p.Field83DeepEqual(ano.InvertedIndexConjunctionOptThreshold) { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TScanRangeParams) String() string { + if p == nil { + return "" } - if !p.Field84DeepEqual(ano.EnableProfile) { + return fmt.Sprintf("TScanRangeParams(%+v)", *p) + +} + +func (p *TScanRangeParams) DeepEqual(ano *TScanRangeParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { return false } - if !p.Field85DeepEqual(ano.EnablePageCache) { + if !p.Field1DeepEqual(ano.ScanRange) { return false } - if !p.Field86DeepEqual(ano.AnalyzeTimeout) { + if !p.Field2DeepEqual(ano.VolumeId) { return false } return true } -func (p *TQueryOptions) Field1DeepEqual(src bool) bool { +func (p *TScanRangeParams) Field1DeepEqual(src *plannodes.TScanRange) bool { - if p.AbortOnError != src { + if !p.ScanRange.DeepEqual(src) { return false } return true } -func (p *TQueryOptions) Field2DeepEqual(src int32) bool { +func (p *TScanRangeParams) Field2DeepEqual(src int32) bool { - if p.MaxErrors != src { + if p.VolumeId != src { return false } return true } -func (p *TQueryOptions) Field3DeepEqual(src bool) bool { - if p.DisableCodegen != src { - return false - } - return true +type TRuntimeFilterTargetParams struct { + TargetFragmentInstanceId *types.TUniqueId `thrift:"target_fragment_instance_id,1,required" frugal:"1,required,types.TUniqueId" json:"target_fragment_instance_id"` + TargetFragmentInstanceAddr *types.TNetworkAddress `thrift:"target_fragment_instance_addr,2,required" frugal:"2,required,types.TNetworkAddress" json:"target_fragment_instance_addr"` } -func (p *TQueryOptions) Field4DeepEqual(src int32) bool { - if p.BatchSize != src { - return false - } - return true +func NewTRuntimeFilterTargetParams() *TRuntimeFilterTargetParams { + return &TRuntimeFilterTargetParams{} } -func (p *TQueryOptions) Field5DeepEqual(src int32) bool { - if p.NumNodes != src { - return false - } - return true +func (p *TRuntimeFilterTargetParams) InitDefault() { } -func (p *TQueryOptions) Field6DeepEqual(src int64) bool { - if p.MaxScanRangeLength != src { - return false - } - return true -} -func (p *TQueryOptions) Field7DeepEqual(src int32) bool { +var TRuntimeFilterTargetParams_TargetFragmentInstanceId_DEFAULT *types.TUniqueId - if p.NumScannerThreads != src { - return false +func (p *TRuntimeFilterTargetParams) GetTargetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetTargetFragmentInstanceId() { + return TRuntimeFilterTargetParams_TargetFragmentInstanceId_DEFAULT } - return true + return p.TargetFragmentInstanceId } -func (p *TQueryOptions) Field8DeepEqual(src int32) bool { - if p.MaxIoBuffers != src { - return false - } - return true -} -func (p *TQueryOptions) Field9DeepEqual(src bool) bool { +var TRuntimeFilterTargetParams_TargetFragmentInstanceAddr_DEFAULT *types.TNetworkAddress - if p.AllowUnsupportedFormats != src { - return false +func (p *TRuntimeFilterTargetParams) GetTargetFragmentInstanceAddr() (v *types.TNetworkAddress) { + if !p.IsSetTargetFragmentInstanceAddr() { + return TRuntimeFilterTargetParams_TargetFragmentInstanceAddr_DEFAULT } - return true + return p.TargetFragmentInstanceAddr +} +func (p *TRuntimeFilterTargetParams) SetTargetFragmentInstanceId(val *types.TUniqueId) { + p.TargetFragmentInstanceId = val +} +func (p *TRuntimeFilterTargetParams) SetTargetFragmentInstanceAddr(val *types.TNetworkAddress) { + p.TargetFragmentInstanceAddr = val } -func (p *TQueryOptions) Field10DeepEqual(src int64) bool { - if p.DefaultOrderByLimit != src { - return false - } - return true +var fieldIDToName_TRuntimeFilterTargetParams = map[int16]string{ + 1: "target_fragment_instance_id", + 2: "target_fragment_instance_addr", } -func (p *TQueryOptions) Field12DeepEqual(src int64) bool { - if p.MemLimit != src { - return false - } - return true +func (p *TRuntimeFilterTargetParams) IsSetTargetFragmentInstanceId() bool { + return p.TargetFragmentInstanceId != nil } -func (p *TQueryOptions) Field13DeepEqual(src bool) bool { - if p.AbortOnDefaultLimitExceeded != src { - return false - } - return true +func (p *TRuntimeFilterTargetParams) IsSetTargetFragmentInstanceAddr() bool { + return p.TargetFragmentInstanceAddr != nil } -func (p *TQueryOptions) Field14DeepEqual(src int32) bool { - if p.QueryTimeout != src { - return false +func (p *TRuntimeFilterTargetParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetTargetFragmentInstanceId bool = false + var issetTargetFragmentInstanceAddr bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return true -} -func (p *TQueryOptions) Field15DeepEqual(src bool) bool { - if p.IsReportSuccess != src { - return false + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTargetFragmentInstanceId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetTargetFragmentInstanceAddr = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true -} -func (p *TQueryOptions) Field16DeepEqual(src int32) bool { - if p.CodegenLevel != src { - return false + if !issetTargetFragmentInstanceId { + fieldId = 1 + goto RequiredFieldNotSetError } - return true -} -func (p *TQueryOptions) Field17DeepEqual(src int64) bool { - if p.KuduLatestObservedTs != src { - return false + if !issetTargetFragmentInstanceAddr { + fieldId = 2 + goto RequiredFieldNotSetError } - return true + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRuntimeFilterTargetParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRuntimeFilterTargetParams[fieldId])) } -func (p *TQueryOptions) Field18DeepEqual(src TQueryType) bool { - if p.QueryType != src { - return false +func (p *TRuntimeFilterTargetParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.TargetFragmentInstanceId = _field + return nil } -func (p *TQueryOptions) Field19DeepEqual(src int64) bool { - - if p.MinReservation != src { - return false +func (p *TRuntimeFilterTargetParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.TargetFragmentInstanceAddr = _field + return nil } -func (p *TQueryOptions) Field20DeepEqual(src int64) bool { - if p.MaxReservation != src { - return false +func (p *TRuntimeFilterTargetParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRuntimeFilterTargetParams"); err != nil { + goto WriteStructBeginError } - return true + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TQueryOptions) Field21DeepEqual(src int64) bool { - if p.InitialReservationTotalClaims != src { - return false +func (p *TRuntimeFilterTargetParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("target_fragment_instance_id", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - return true -} -func (p *TQueryOptions) Field22DeepEqual(src int64) bool { - - if p.BufferPoolLimit != src { - return false + if err := p.TargetFragmentInstanceId.Write(oprot); err != nil { + return err } - return true -} -func (p *TQueryOptions) Field23DeepEqual(src int64) bool { - - if p.DefaultSpillableBufferSize != src { - return false + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TQueryOptions) Field24DeepEqual(src int64) bool { - if p.MinSpillableBufferSize != src { - return false +func (p *TRuntimeFilterTargetParams) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("target_fragment_instance_addr", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError } - return true -} -func (p *TQueryOptions) Field25DeepEqual(src int64) bool { - - if p.MaxRowSize != src { - return false + if err := p.TargetFragmentInstanceAddr.Write(oprot); err != nil { + return err } - return true -} -func (p *TQueryOptions) Field26DeepEqual(src bool) bool { - - if p.DisableStreamPreaggregations != src { - return false + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TQueryOptions) Field27DeepEqual(src int32) bool { - if p.MtDop != src { - return false +func (p *TRuntimeFilterTargetParams) String() string { + if p == nil { + return "" } - return true -} -func (p *TQueryOptions) Field28DeepEqual(src int64) bool { + return fmt.Sprintf("TRuntimeFilterTargetParams(%+v)", *p) - if p.LoadMemLimit != src { - return false - } - return true } -func (p *TQueryOptions) Field29DeepEqual(src *int32) bool { - if p.MaxScanKeyNum == src { +func (p *TRuntimeFilterTargetParams) DeepEqual(ano *TRuntimeFilterTargetParams) bool { + if p == ano { return true - } else if p.MaxScanKeyNum == nil || src == nil { - return false - } - if *p.MaxScanKeyNum != *src { + } else if p == nil || ano == nil { return false } - return true -} -func (p *TQueryOptions) Field30DeepEqual(src *int32) bool { - - if p.MaxPushdownConditionsPerColumn == src { - return true - } else if p.MaxPushdownConditionsPerColumn == nil || src == nil { + if !p.Field1DeepEqual(ano.TargetFragmentInstanceId) { return false } - if *p.MaxPushdownConditionsPerColumn != *src { + if !p.Field2DeepEqual(ano.TargetFragmentInstanceAddr) { return false } return true } -func (p *TQueryOptions) Field31DeepEqual(src bool) bool { - if p.EnableSpilling != src { - return false - } - return true -} -func (p *TQueryOptions) Field32DeepEqual(src bool) bool { +func (p *TRuntimeFilterTargetParams) Field1DeepEqual(src *types.TUniqueId) bool { - if p.EnableEnableExchangeNodeParallelMerge != src { + if !p.TargetFragmentInstanceId.DeepEqual(src) { return false } return true } -func (p *TQueryOptions) Field33DeepEqual(src int32) bool { +func (p *TRuntimeFilterTargetParams) Field2DeepEqual(src *types.TNetworkAddress) bool { - if p.RuntimeFilterWaitTimeMs != src { + if !p.TargetFragmentInstanceAddr.DeepEqual(src) { return false } return true } -func (p *TQueryOptions) Field34DeepEqual(src int32) bool { - if p.RuntimeFilterMaxInNum != src { - return false - } - return true +type TRuntimeFilterTargetParamsV2 struct { + TargetFragmentInstanceIds []*types.TUniqueId `thrift:"target_fragment_instance_ids,1,required" frugal:"1,required,list" json:"target_fragment_instance_ids"` + TargetFragmentInstanceAddr *types.TNetworkAddress `thrift:"target_fragment_instance_addr,2,required" frugal:"2,required,types.TNetworkAddress" json:"target_fragment_instance_addr"` + TargetFragmentIds []int32 `thrift:"target_fragment_ids,3,optional" frugal:"3,optional,list" json:"target_fragment_ids,omitempty"` } -func (p *TQueryOptions) Field42DeepEqual(src *TResourceLimit) bool { - if !p.ResourceLimit.DeepEqual(src) { - return false - } - return true +func NewTRuntimeFilterTargetParamsV2() *TRuntimeFilterTargetParamsV2 { + return &TRuntimeFilterTargetParamsV2{} } -func (p *TQueryOptions) Field43DeepEqual(src bool) bool { - if p.ReturnObjectDataAsBinary != src { - return false - } - return true +func (p *TRuntimeFilterTargetParamsV2) InitDefault() { } -func (p *TQueryOptions) Field44DeepEqual(src bool) bool { - if p.TrimTailingSpacesForExternalTableQuery != src { - return false - } - return true +func (p *TRuntimeFilterTargetParamsV2) GetTargetFragmentInstanceIds() (v []*types.TUniqueId) { + return p.TargetFragmentInstanceIds } -func (p *TQueryOptions) Field45DeepEqual(src *bool) bool { - if p.EnableFunctionPushdown == src { - return true - } else if p.EnableFunctionPushdown == nil || src == nil { - return false - } - if *p.EnableFunctionPushdown != *src { - return false - } - return true -} -func (p *TQueryOptions) Field46DeepEqual(src *string) bool { +var TRuntimeFilterTargetParamsV2_TargetFragmentInstanceAddr_DEFAULT *types.TNetworkAddress - if p.FragmentTransmissionCompressionCodec == src { - return true - } else if p.FragmentTransmissionCompressionCodec == nil || src == nil { - return false - } - if strings.Compare(*p.FragmentTransmissionCompressionCodec, *src) != 0 { - return false +func (p *TRuntimeFilterTargetParamsV2) GetTargetFragmentInstanceAddr() (v *types.TNetworkAddress) { + if !p.IsSetTargetFragmentInstanceAddr() { + return TRuntimeFilterTargetParamsV2_TargetFragmentInstanceAddr_DEFAULT } - return true + return p.TargetFragmentInstanceAddr } -func (p *TQueryOptions) Field48DeepEqual(src *bool) bool { - if p.EnableLocalExchange == src { - return true - } else if p.EnableLocalExchange == nil || src == nil { - return false - } - if *p.EnableLocalExchange != *src { - return false - } - return true -} -func (p *TQueryOptions) Field49DeepEqual(src bool) bool { +var TRuntimeFilterTargetParamsV2_TargetFragmentIds_DEFAULT []int32 - if p.SkipStorageEngineMerge != src { - return false +func (p *TRuntimeFilterTargetParamsV2) GetTargetFragmentIds() (v []int32) { + if !p.IsSetTargetFragmentIds() { + return TRuntimeFilterTargetParamsV2_TargetFragmentIds_DEFAULT } - return true + return p.TargetFragmentIds +} +func (p *TRuntimeFilterTargetParamsV2) SetTargetFragmentInstanceIds(val []*types.TUniqueId) { + p.TargetFragmentInstanceIds = val +} +func (p *TRuntimeFilterTargetParamsV2) SetTargetFragmentInstanceAddr(val *types.TNetworkAddress) { + p.TargetFragmentInstanceAddr = val +} +func (p *TRuntimeFilterTargetParamsV2) SetTargetFragmentIds(val []int32) { + p.TargetFragmentIds = val } -func (p *TQueryOptions) Field50DeepEqual(src bool) bool { - if p.SkipDeletePredicate != src { - return false - } - return true +var fieldIDToName_TRuntimeFilterTargetParamsV2 = map[int16]string{ + 1: "target_fragment_instance_ids", + 2: "target_fragment_instance_addr", + 3: "target_fragment_ids", } -func (p *TQueryOptions) Field51DeepEqual(src *bool) bool { - if p.EnableNewShuffleHashMethod == src { - return true - } else if p.EnableNewShuffleHashMethod == nil || src == nil { - return false - } - if *p.EnableNewShuffleHashMethod != *src { - return false - } - return true +func (p *TRuntimeFilterTargetParamsV2) IsSetTargetFragmentInstanceAddr() bool { + return p.TargetFragmentInstanceAddr != nil } -func (p *TQueryOptions) Field52DeepEqual(src int32) bool { - if p.BeExecVersion != src { - return false - } - return true +func (p *TRuntimeFilterTargetParamsV2) IsSetTargetFragmentIds() bool { + return p.TargetFragmentIds != nil } -func (p *TQueryOptions) Field53DeepEqual(src int32) bool { - if p.PartitionedHashJoinRowsThreshold != src { - return false +func (p *TRuntimeFilterTargetParamsV2) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetTargetFragmentInstanceIds bool = false + var issetTargetFragmentInstanceAddr bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return true -} -func (p *TQueryOptions) Field54DeepEqual(src *bool) bool { - if p.EnableShareHashTableForBroadcastJoin == src { - return true - } else if p.EnableShareHashTableForBroadcastJoin == nil || src == nil { - return false + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTargetFragmentInstanceIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetTargetFragmentInstanceAddr = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if *p.EnableShareHashTableForBroadcastJoin != *src { - return false + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true -} -func (p *TQueryOptions) Field55DeepEqual(src bool) bool { - if p.CheckOverflowForDecimal != src { - return false + if !issetTargetFragmentInstanceIds { + fieldId = 1 + goto RequiredFieldNotSetError } - return true -} -func (p *TQueryOptions) Field56DeepEqual(src bool) bool { - if p.SkipDeleteBitmap != src { - return false + if !issetTargetFragmentInstanceAddr { + fieldId = 2 + goto RequiredFieldNotSetError } - return true -} -func (p *TQueryOptions) Field57DeepEqual(src bool) bool { + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRuntimeFilterTargetParamsV2[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - if p.EnablePipelineEngine != src { - return false - } - return true +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRuntimeFilterTargetParamsV2[fieldId])) } -func (p *TQueryOptions) Field58DeepEqual(src int32) bool { - if p.RepeatMaxNum != src { - return false +func (p *TRuntimeFilterTargetParamsV2) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err } - return true -} -func (p *TQueryOptions) Field59DeepEqual(src int64) bool { + _field := make([]*types.TUniqueId, 0, size) + values := make([]types.TUniqueId, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() - if p.ExternalSortBytesThreshold != src { - return false - } - return true -} -func (p *TQueryOptions) Field60DeepEqual(src int32) bool { + if err := _elem.Read(iprot); err != nil { + return err + } - if p.PartitionedHashAggRowsThreshold != src { - return false + _field = append(_field, _elem) } - return true -} -func (p *TQueryOptions) Field61DeepEqual(src bool) bool { - - if p.EnableFileCache != src { - return false + if err := iprot.ReadListEnd(); err != nil { + return err } - return true + p.TargetFragmentInstanceIds = _field + return nil } -func (p *TQueryOptions) Field62DeepEqual(src int32) bool { - - if p.InsertTimeout != src { - return false +func (p *TRuntimeFilterTargetParamsV2) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.TargetFragmentInstanceAddr = _field + return nil } -func (p *TQueryOptions) Field63DeepEqual(src int32) bool { - - if p.ExecutionTimeout != src { - return false +func (p *TRuntimeFilterTargetParamsV2) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err } - return true -} -func (p *TQueryOptions) Field64DeepEqual(src bool) bool { + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { - if p.DryRunQuery != src { - return false - } - return true -} -func (p *TQueryOptions) Field65DeepEqual(src bool) bool { + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } - if p.EnableCommonExprPushdown != src { - return false + _field = append(_field, _elem) } - return true -} -func (p *TQueryOptions) Field66DeepEqual(src int32) bool { - - if p.ParallelInstance != src { - return false + if err := iprot.ReadListEnd(); err != nil { + return err } - return true + p.TargetFragmentIds = _field + return nil } -func (p *TQueryOptions) Field67DeepEqual(src bool) bool { - if p.MysqlRowBinaryFormat != src { - return false +func (p *TRuntimeFilterTargetParamsV2) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TRuntimeFilterTargetParamsV2"); err != nil { + goto WriteStructBeginError } - return true -} -func (p *TQueryOptions) Field68DeepEqual(src int64) bool { - - if p.ExternalAggBytesThreshold != src { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } - return true -} -func (p *TQueryOptions) Field69DeepEqual(src int32) bool { - - if p.ExternalAggPartitionBits != src { - return false + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return true + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TQueryOptions) Field70DeepEqual(src *string) bool { - if p.FileCacheBasePath == src { - return true - } else if p.FileCacheBasePath == nil || src == nil { - return false +func (p *TRuntimeFilterTargetParamsV2) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("target_fragment_instance_ids", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError } - if strings.Compare(*p.FileCacheBasePath, *src) != 0 { - return false + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TargetFragmentInstanceIds)); err != nil { + return err } - return true -} -func (p *TQueryOptions) Field71DeepEqual(src bool) bool { - - if p.EnableParquetLazyMat != src { - return false + for _, v := range p.TargetFragmentInstanceIds { + if err := v.Write(oprot); err != nil { + return err + } } - return true -} -func (p *TQueryOptions) Field72DeepEqual(src bool) bool { - - if p.EnableOrcLazyMat != src { - return false + if err := oprot.WriteListEnd(); err != nil { + return err } - return true + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TQueryOptions) Field73DeepEqual(src *int64) bool { - if p.ScanQueueMemLimit == src { - return true - } else if p.ScanQueueMemLimit == nil || src == nil { - return false +func (p *TRuntimeFilterTargetParamsV2) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("target_fragment_instance_addr", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError } - if *p.ScanQueueMemLimit != *src { - return false + if err := p.TargetFragmentInstanceAddr.Write(oprot); err != nil { + return err } - return true -} -func (p *TQueryOptions) Field74DeepEqual(src bool) bool { - - if p.EnableScanNodeRunSerial != src { - return false + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TQueryOptions) Field75DeepEqual(src bool) bool { - if p.EnableInsertStrict != src { - return false +func (p *TRuntimeFilterTargetParamsV2) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTargetFragmentIds() { + if err = oprot.WriteFieldBegin("target_fragment_ids", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TargetFragmentIds)); err != nil { + return err + } + for _, v := range p.TargetFragmentIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TQueryOptions) Field76DeepEqual(src bool) bool { - if p.EnableInvertedIndexQuery != src { - return false +func (p *TRuntimeFilterTargetParamsV2) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TRuntimeFilterTargetParamsV2(%+v)", *p) + } -func (p *TQueryOptions) Field77DeepEqual(src bool) bool { - if p.TruncateCharOrVarcharColumns != src { +func (p *TRuntimeFilterTargetParamsV2) DeepEqual(ano *TRuntimeFilterTargetParamsV2) bool { + if p == ano { + return true + } else if p == nil || ano == nil { return false } - return true -} -func (p *TQueryOptions) Field78DeepEqual(src bool) bool { - - if p.EnableHashJoinEarlyStartProbe != src { + if !p.Field1DeepEqual(ano.TargetFragmentInstanceIds) { return false } - return true -} -func (p *TQueryOptions) Field79DeepEqual(src bool) bool { - - if p.EnablePipelineXEngine != src { + if !p.Field2DeepEqual(ano.TargetFragmentInstanceAddr) { return false } - return true -} -func (p *TQueryOptions) Field80DeepEqual(src bool) bool { - - if p.EnableMemtableOnSinkNode != src { + if !p.Field3DeepEqual(ano.TargetFragmentIds) { return false } return true } -func (p *TQueryOptions) Field81DeepEqual(src bool) bool { - if p.EnableDeleteSubPredicateV2 != src { +func (p *TRuntimeFilterTargetParamsV2) Field1DeepEqual(src []*types.TUniqueId) bool { + + if len(p.TargetFragmentInstanceIds) != len(src) { return false } + for i, v := range p.TargetFragmentInstanceIds { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } return true } -func (p *TQueryOptions) Field82DeepEqual(src int64) bool { +func (p *TRuntimeFilterTargetParamsV2) Field2DeepEqual(src *types.TNetworkAddress) bool { - if p.FeProcessUuid != src { + if !p.TargetFragmentInstanceAddr.DeepEqual(src) { return false } return true } -func (p *TQueryOptions) Field83DeepEqual(src int32) bool { +func (p *TRuntimeFilterTargetParamsV2) Field3DeepEqual(src []int32) bool { - if p.InvertedIndexConjunctionOptThreshold != src { + if len(p.TargetFragmentIds) != len(src) { return false } + for i, v := range p.TargetFragmentIds { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TQueryOptions) Field84DeepEqual(src bool) bool { - if p.EnableProfile != src { - return false - } - return true +type TRuntimeFilterParams struct { + RuntimeFilterMergeAddr *types.TNetworkAddress `thrift:"runtime_filter_merge_addr,1,optional" frugal:"1,optional,types.TNetworkAddress" json:"runtime_filter_merge_addr,omitempty"` + RidToTargetParam map[int32][]*TRuntimeFilterTargetParams `thrift:"rid_to_target_param,2,optional" frugal:"2,optional,map>" json:"rid_to_target_param,omitempty"` + RidToRuntimeFilter map[int32]*plannodes.TRuntimeFilterDesc `thrift:"rid_to_runtime_filter,3,optional" frugal:"3,optional,map" json:"rid_to_runtime_filter,omitempty"` + RuntimeFilterBuilderNum map[int32]int32 `thrift:"runtime_filter_builder_num,4,optional" frugal:"4,optional,map" json:"runtime_filter_builder_num,omitempty"` + RidToTargetParamv2 map[int32][]*TRuntimeFilterTargetParamsV2 `thrift:"rid_to_target_paramv2,5,optional" frugal:"5,optional,map>" json:"rid_to_target_paramv2,omitempty"` } -func (p *TQueryOptions) Field85DeepEqual(src bool) bool { - if p.EnablePageCache != src { - return false - } - return true +func NewTRuntimeFilterParams() *TRuntimeFilterParams { + return &TRuntimeFilterParams{} } -func (p *TQueryOptions) Field86DeepEqual(src int32) bool { - if p.AnalyzeTimeout != src { - return false - } - return true +func (p *TRuntimeFilterParams) InitDefault() { } -type TScanRangeParams struct { - ScanRange *plannodes.TScanRange `thrift:"scan_range,1,required" frugal:"1,required,plannodes.TScanRange" json:"scan_range"` - VolumeId int32 `thrift:"volume_id,2,optional" frugal:"2,optional,i32" json:"volume_id,omitempty"` +var TRuntimeFilterParams_RuntimeFilterMergeAddr_DEFAULT *types.TNetworkAddress + +func (p *TRuntimeFilterParams) GetRuntimeFilterMergeAddr() (v *types.TNetworkAddress) { + if !p.IsSetRuntimeFilterMergeAddr() { + return TRuntimeFilterParams_RuntimeFilterMergeAddr_DEFAULT + } + return p.RuntimeFilterMergeAddr } -func NewTScanRangeParams() *TScanRangeParams { - return &TScanRangeParams{ +var TRuntimeFilterParams_RidToTargetParam_DEFAULT map[int32][]*TRuntimeFilterTargetParams - VolumeId: -1, +func (p *TRuntimeFilterParams) GetRidToTargetParam() (v map[int32][]*TRuntimeFilterTargetParams) { + if !p.IsSetRidToTargetParam() { + return TRuntimeFilterParams_RidToTargetParam_DEFAULT } + return p.RidToTargetParam } -func (p *TScanRangeParams) InitDefault() { - *p = TScanRangeParams{ +var TRuntimeFilterParams_RidToRuntimeFilter_DEFAULT map[int32]*plannodes.TRuntimeFilterDesc - VolumeId: -1, +func (p *TRuntimeFilterParams) GetRidToRuntimeFilter() (v map[int32]*plannodes.TRuntimeFilterDesc) { + if !p.IsSetRidToRuntimeFilter() { + return TRuntimeFilterParams_RidToRuntimeFilter_DEFAULT } + return p.RidToRuntimeFilter } -var TScanRangeParams_ScanRange_DEFAULT *plannodes.TScanRange +var TRuntimeFilterParams_RuntimeFilterBuilderNum_DEFAULT map[int32]int32 -func (p *TScanRangeParams) GetScanRange() (v *plannodes.TScanRange) { - if !p.IsSetScanRange() { - return TScanRangeParams_ScanRange_DEFAULT +func (p *TRuntimeFilterParams) GetRuntimeFilterBuilderNum() (v map[int32]int32) { + if !p.IsSetRuntimeFilterBuilderNum() { + return TRuntimeFilterParams_RuntimeFilterBuilderNum_DEFAULT } - return p.ScanRange + return p.RuntimeFilterBuilderNum } -var TScanRangeParams_VolumeId_DEFAULT int32 = -1 +var TRuntimeFilterParams_RidToTargetParamv2_DEFAULT map[int32][]*TRuntimeFilterTargetParamsV2 -func (p *TScanRangeParams) GetVolumeId() (v int32) { - if !p.IsSetVolumeId() { - return TScanRangeParams_VolumeId_DEFAULT +func (p *TRuntimeFilterParams) GetRidToTargetParamv2() (v map[int32][]*TRuntimeFilterTargetParamsV2) { + if !p.IsSetRidToTargetParamv2() { + return TRuntimeFilterParams_RidToTargetParamv2_DEFAULT } - return p.VolumeId + return p.RidToTargetParamv2 } -func (p *TScanRangeParams) SetScanRange(val *plannodes.TScanRange) { - p.ScanRange = val +func (p *TRuntimeFilterParams) SetRuntimeFilterMergeAddr(val *types.TNetworkAddress) { + p.RuntimeFilterMergeAddr = val } -func (p *TScanRangeParams) SetVolumeId(val int32) { - p.VolumeId = val +func (p *TRuntimeFilterParams) SetRidToTargetParam(val map[int32][]*TRuntimeFilterTargetParams) { + p.RidToTargetParam = val +} +func (p *TRuntimeFilterParams) SetRidToRuntimeFilter(val map[int32]*plannodes.TRuntimeFilterDesc) { + p.RidToRuntimeFilter = val +} +func (p *TRuntimeFilterParams) SetRuntimeFilterBuilderNum(val map[int32]int32) { + p.RuntimeFilterBuilderNum = val +} +func (p *TRuntimeFilterParams) SetRidToTargetParamv2(val map[int32][]*TRuntimeFilterTargetParamsV2) { + p.RidToTargetParamv2 = val } -var fieldIDToName_TScanRangeParams = map[int16]string{ - 1: "scan_range", - 2: "volume_id", +var fieldIDToName_TRuntimeFilterParams = map[int16]string{ + 1: "runtime_filter_merge_addr", + 2: "rid_to_target_param", + 3: "rid_to_runtime_filter", + 4: "runtime_filter_builder_num", + 5: "rid_to_target_paramv2", } -func (p *TScanRangeParams) IsSetScanRange() bool { - return p.ScanRange != nil +func (p *TRuntimeFilterParams) IsSetRuntimeFilterMergeAddr() bool { + return p.RuntimeFilterMergeAddr != nil } -func (p *TScanRangeParams) IsSetVolumeId() bool { - return p.VolumeId != TScanRangeParams_VolumeId_DEFAULT +func (p *TRuntimeFilterParams) IsSetRidToTargetParam() bool { + return p.RidToTargetParam != nil } -func (p *TScanRangeParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TRuntimeFilterParams) IsSetRidToRuntimeFilter() bool { + return p.RidToRuntimeFilter != nil +} + +func (p *TRuntimeFilterParams) IsSetRuntimeFilterBuilderNum() bool { + return p.RuntimeFilterBuilderNum != nil +} + +func (p *TRuntimeFilterParams) IsSetRidToTargetParamv2() bool { + return p.RidToTargetParamv2 != nil +} + +func (p *TRuntimeFilterParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetScanRange bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -7310,78 +12329,222 @@ func (p *TScanRangeParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetScanRange = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.MAP { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.MAP { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRuntimeFilterParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TRuntimeFilterParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.RuntimeFilterMergeAddr = _field + return nil +} +func (p *TRuntimeFilterParams) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32][]*TRuntimeFilterTargetParams, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*TRuntimeFilterTargetParams, 0, size) + values := make([]TRuntimeFilterTargetParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err } + + _field[_key] = _val } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.RidToTargetParam = _field + return nil +} +func (p *TRuntimeFilterParams) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err } + _field := make(map[int32]*plannodes.TRuntimeFilterDesc, size) + values := make([]plannodes.TRuntimeFilterDesc, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } - if !issetScanRange { - fieldId = 1 - goto RequiredFieldNotSetError + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err } + p.RidToRuntimeFilter = _field return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TScanRangeParams[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TScanRangeParams[fieldId])) } +func (p *TRuntimeFilterParams) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -func (p *TScanRangeParams) ReadField1(iprot thrift.TProtocol) error { - p.ScanRange = plannodes.NewTScanRange() - if err := p.ScanRange.Read(iprot); err != nil { + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err } + p.RuntimeFilterBuilderNum = _field return nil } +func (p *TRuntimeFilterParams) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32][]*TRuntimeFilterTargetParamsV2, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*TRuntimeFilterTargetParamsV2, 0, size) + values := make([]TRuntimeFilterTargetParamsV2, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TScanRangeParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.VolumeId = v } + p.RidToTargetParamv2 = _field return nil } -func (p *TScanRangeParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TRuntimeFilterParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TScanRangeParams"); err != nil { + if err = oprot.WriteStructBegin("TRuntimeFilterParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -7393,7 +12556,18 @@ func (p *TScanRangeParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7412,15 +12586,17 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TScanRangeParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("scan_range", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.ScanRange.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TRuntimeFilterParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterMergeAddr() { + if err = oprot.WriteFieldBegin("runtime_filter_merge_addr", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.RuntimeFilterMergeAddr.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -7429,12 +12605,129 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TScanRangeParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetVolumeId() { - if err = oprot.WriteFieldBegin("volume_id", thrift.I32, 2); err != nil { +func (p *TRuntimeFilterParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetRidToTargetParam() { + if err = oprot.WriteFieldBegin("rid_to_target_param", thrift.MAP, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.RidToTargetParam)); err != nil { + return err + } + for k, v := range p.RidToTargetParam { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TRuntimeFilterParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetRidToRuntimeFilter() { + if err = oprot.WriteFieldBegin("rid_to_runtime_filter", thrift.MAP, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.RidToRuntimeFilter)); err != nil { + return err + } + for k, v := range p.RidToRuntimeFilter { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TRuntimeFilterParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterBuilderNum() { + if err = oprot.WriteFieldBegin("runtime_filter_builder_num", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.RuntimeFilterBuilderNum)); err != nil { + return err + } + for k, v := range p.RuntimeFilterBuilderNum { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TRuntimeFilterParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRidToTargetParamv2() { + if err = oprot.WriteFieldBegin("rid_to_target_paramv2", thrift.MAP, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(p.VolumeId); err != nil { + if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.RidToTargetParamv2)); err != nil { + return err + } + for k, v := range p.RidToTargetParamv2 { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -7443,104 +12736,307 @@ func (p *TScanRangeParams) writeField2(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TScanRangeParams) String() string { +func (p *TRuntimeFilterParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TScanRangeParams(%+v)", *p) + return fmt.Sprintf("TRuntimeFilterParams(%+v)", *p) + } -func (p *TScanRangeParams) DeepEqual(ano *TScanRangeParams) bool { +func (p *TRuntimeFilterParams) DeepEqual(ano *TRuntimeFilterParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ScanRange) { + if !p.Field1DeepEqual(ano.RuntimeFilterMergeAddr) { return false } - if !p.Field2DeepEqual(ano.VolumeId) { + if !p.Field2DeepEqual(ano.RidToTargetParam) { + return false + } + if !p.Field3DeepEqual(ano.RidToRuntimeFilter) { + return false + } + if !p.Field4DeepEqual(ano.RuntimeFilterBuilderNum) { + return false + } + if !p.Field5DeepEqual(ano.RidToTargetParamv2) { return false } return true } -func (p *TScanRangeParams) Field1DeepEqual(src *plannodes.TScanRange) bool { +func (p *TRuntimeFilterParams) Field1DeepEqual(src *types.TNetworkAddress) bool { - if !p.ScanRange.DeepEqual(src) { + if !p.RuntimeFilterMergeAddr.DeepEqual(src) { return false } return true } -func (p *TScanRangeParams) Field2DeepEqual(src int32) bool { +func (p *TRuntimeFilterParams) Field2DeepEqual(src map[int32][]*TRuntimeFilterTargetParams) bool { - if p.VolumeId != src { + if len(p.RidToTargetParam) != len(src) { return false } + for k, v := range p.RidToTargetParam { + _src := src[k] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } return true } +func (p *TRuntimeFilterParams) Field3DeepEqual(src map[int32]*plannodes.TRuntimeFilterDesc) bool { -type TRuntimeFilterTargetParams struct { - TargetFragmentInstanceId *types.TUniqueId `thrift:"target_fragment_instance_id,1,required" frugal:"1,required,types.TUniqueId" json:"target_fragment_instance_id"` - TargetFragmentInstanceAddr *types.TNetworkAddress `thrift:"target_fragment_instance_addr,2,required" frugal:"2,required,types.TNetworkAddress" json:"target_fragment_instance_addr"` + if len(p.RidToRuntimeFilter) != len(src) { + return false + } + for k, v := range p.RidToRuntimeFilter { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } + } + return true } +func (p *TRuntimeFilterParams) Field4DeepEqual(src map[int32]int32) bool { -func NewTRuntimeFilterTargetParams() *TRuntimeFilterTargetParams { - return &TRuntimeFilterTargetParams{} + if len(p.RuntimeFilterBuilderNum) != len(src) { + return false + } + for k, v := range p.RuntimeFilterBuilderNum { + _src := src[k] + if v != _src { + return false + } + } + return true } +func (p *TRuntimeFilterParams) Field5DeepEqual(src map[int32][]*TRuntimeFilterTargetParamsV2) bool { -func (p *TRuntimeFilterTargetParams) InitDefault() { - *p = TRuntimeFilterTargetParams{} + if len(p.RidToTargetParamv2) != len(src) { + return false + } + for k, v := range p.RidToTargetParamv2 { + _src := src[k] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true } -var TRuntimeFilterTargetParams_TargetFragmentInstanceId_DEFAULT *types.TUniqueId +type TPlanFragmentExecParams struct { + QueryId *types.TUniqueId `thrift:"query_id,1,required" frugal:"1,required,types.TUniqueId" json:"query_id"` + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,2,required" frugal:"2,required,types.TUniqueId" json:"fragment_instance_id"` + PerNodeScanRanges map[types.TPlanNodeId][]*TScanRangeParams `thrift:"per_node_scan_ranges,3,required" frugal:"3,required,map>" json:"per_node_scan_ranges"` + PerExchNumSenders map[types.TPlanNodeId]int32 `thrift:"per_exch_num_senders,4,required" frugal:"4,required,map" json:"per_exch_num_senders"` + Destinations []*datasinks.TPlanFragmentDestination `thrift:"destinations,5" frugal:"5,default,list" json:"destinations"` + SenderId *int32 `thrift:"sender_id,9,optional" frugal:"9,optional,i32" json:"sender_id,omitempty"` + NumSenders *int32 `thrift:"num_senders,10,optional" frugal:"10,optional,i32" json:"num_senders,omitempty"` + SendQueryStatisticsWithEveryBatch *bool `thrift:"send_query_statistics_with_every_batch,11,optional" frugal:"11,optional,bool" json:"send_query_statistics_with_every_batch,omitempty"` + RuntimeFilterParams *TRuntimeFilterParams `thrift:"runtime_filter_params,12,optional" frugal:"12,optional,TRuntimeFilterParams" json:"runtime_filter_params,omitempty"` + GroupCommit *bool `thrift:"group_commit,13,optional" frugal:"13,optional,bool" json:"group_commit,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,14,optional" frugal:"14,optional,list" json:"topn_filter_source_node_ids,omitempty"` +} -func (p *TRuntimeFilterTargetParams) GetTargetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetTargetFragmentInstanceId() { - return TRuntimeFilterTargetParams_TargetFragmentInstanceId_DEFAULT +func NewTPlanFragmentExecParams() *TPlanFragmentExecParams { + return &TPlanFragmentExecParams{} +} + +func (p *TPlanFragmentExecParams) InitDefault() { +} + +var TPlanFragmentExecParams_QueryId_DEFAULT *types.TUniqueId + +func (p *TPlanFragmentExecParams) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TPlanFragmentExecParams_QueryId_DEFAULT } - return p.TargetFragmentInstanceId + return p.QueryId } -var TRuntimeFilterTargetParams_TargetFragmentInstanceAddr_DEFAULT *types.TNetworkAddress +var TPlanFragmentExecParams_FragmentInstanceId_DEFAULT *types.TUniqueId -func (p *TRuntimeFilterTargetParams) GetTargetFragmentInstanceAddr() (v *types.TNetworkAddress) { - if !p.IsSetTargetFragmentInstanceAddr() { - return TRuntimeFilterTargetParams_TargetFragmentInstanceAddr_DEFAULT +func (p *TPlanFragmentExecParams) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TPlanFragmentExecParams_FragmentInstanceId_DEFAULT } - return p.TargetFragmentInstanceAddr + return p.FragmentInstanceId +} + +func (p *TPlanFragmentExecParams) GetPerNodeScanRanges() (v map[types.TPlanNodeId][]*TScanRangeParams) { + return p.PerNodeScanRanges +} + +func (p *TPlanFragmentExecParams) GetPerExchNumSenders() (v map[types.TPlanNodeId]int32) { + return p.PerExchNumSenders +} + +func (p *TPlanFragmentExecParams) GetDestinations() (v []*datasinks.TPlanFragmentDestination) { + return p.Destinations +} + +var TPlanFragmentExecParams_SenderId_DEFAULT int32 + +func (p *TPlanFragmentExecParams) GetSenderId() (v int32) { + if !p.IsSetSenderId() { + return TPlanFragmentExecParams_SenderId_DEFAULT + } + return *p.SenderId +} + +var TPlanFragmentExecParams_NumSenders_DEFAULT int32 + +func (p *TPlanFragmentExecParams) GetNumSenders() (v int32) { + if !p.IsSetNumSenders() { + return TPlanFragmentExecParams_NumSenders_DEFAULT + } + return *p.NumSenders +} + +var TPlanFragmentExecParams_SendQueryStatisticsWithEveryBatch_DEFAULT bool + +func (p *TPlanFragmentExecParams) GetSendQueryStatisticsWithEveryBatch() (v bool) { + if !p.IsSetSendQueryStatisticsWithEveryBatch() { + return TPlanFragmentExecParams_SendQueryStatisticsWithEveryBatch_DEFAULT + } + return *p.SendQueryStatisticsWithEveryBatch +} + +var TPlanFragmentExecParams_RuntimeFilterParams_DEFAULT *TRuntimeFilterParams + +func (p *TPlanFragmentExecParams) GetRuntimeFilterParams() (v *TRuntimeFilterParams) { + if !p.IsSetRuntimeFilterParams() { + return TPlanFragmentExecParams_RuntimeFilterParams_DEFAULT + } + return p.RuntimeFilterParams +} + +var TPlanFragmentExecParams_GroupCommit_DEFAULT bool + +func (p *TPlanFragmentExecParams) GetGroupCommit() (v bool) { + if !p.IsSetGroupCommit() { + return TPlanFragmentExecParams_GroupCommit_DEFAULT + } + return *p.GroupCommit +} + +var TPlanFragmentExecParams_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TPlanFragmentExecParams) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPlanFragmentExecParams_TopnFilterSourceNodeIds_DEFAULT + } + return p.TopnFilterSourceNodeIds +} +func (p *TPlanFragmentExecParams) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TPlanFragmentExecParams) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val +} +func (p *TPlanFragmentExecParams) SetPerNodeScanRanges(val map[types.TPlanNodeId][]*TScanRangeParams) { + p.PerNodeScanRanges = val +} +func (p *TPlanFragmentExecParams) SetPerExchNumSenders(val map[types.TPlanNodeId]int32) { + p.PerExchNumSenders = val +} +func (p *TPlanFragmentExecParams) SetDestinations(val []*datasinks.TPlanFragmentDestination) { + p.Destinations = val +} +func (p *TPlanFragmentExecParams) SetSenderId(val *int32) { + p.SenderId = val +} +func (p *TPlanFragmentExecParams) SetNumSenders(val *int32) { + p.NumSenders = val +} +func (p *TPlanFragmentExecParams) SetSendQueryStatisticsWithEveryBatch(val *bool) { + p.SendQueryStatisticsWithEveryBatch = val +} +func (p *TPlanFragmentExecParams) SetRuntimeFilterParams(val *TRuntimeFilterParams) { + p.RuntimeFilterParams = val +} +func (p *TPlanFragmentExecParams) SetGroupCommit(val *bool) { + p.GroupCommit = val +} +func (p *TPlanFragmentExecParams) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} + +var fieldIDToName_TPlanFragmentExecParams = map[int16]string{ + 1: "query_id", + 2: "fragment_instance_id", + 3: "per_node_scan_ranges", + 4: "per_exch_num_senders", + 5: "destinations", + 9: "sender_id", + 10: "num_senders", + 11: "send_query_statistics_with_every_batch", + 12: "runtime_filter_params", + 13: "group_commit", + 14: "topn_filter_source_node_ids", +} + +func (p *TPlanFragmentExecParams) IsSetQueryId() bool { + return p.QueryId != nil +} + +func (p *TPlanFragmentExecParams) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil +} + +func (p *TPlanFragmentExecParams) IsSetSenderId() bool { + return p.SenderId != nil } -func (p *TRuntimeFilterTargetParams) SetTargetFragmentInstanceId(val *types.TUniqueId) { - p.TargetFragmentInstanceId = val + +func (p *TPlanFragmentExecParams) IsSetNumSenders() bool { + return p.NumSenders != nil } -func (p *TRuntimeFilterTargetParams) SetTargetFragmentInstanceAddr(val *types.TNetworkAddress) { - p.TargetFragmentInstanceAddr = val + +func (p *TPlanFragmentExecParams) IsSetSendQueryStatisticsWithEveryBatch() bool { + return p.SendQueryStatisticsWithEveryBatch != nil } -var fieldIDToName_TRuntimeFilterTargetParams = map[int16]string{ - 1: "target_fragment_instance_id", - 2: "target_fragment_instance_addr", +func (p *TPlanFragmentExecParams) IsSetRuntimeFilterParams() bool { + return p.RuntimeFilterParams != nil } -func (p *TRuntimeFilterTargetParams) IsSetTargetFragmentInstanceId() bool { - return p.TargetFragmentInstanceId != nil +func (p *TPlanFragmentExecParams) IsSetGroupCommit() bool { + return p.GroupCommit != nil } -func (p *TRuntimeFilterTargetParams) IsSetTargetFragmentInstanceAddr() bool { - return p.TargetFragmentInstanceAddr != nil +func (p *TPlanFragmentExecParams) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil } -func (p *TRuntimeFilterTargetParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TPlanFragmentExecParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetTargetFragmentInstanceId bool = false - var issetTargetFragmentInstanceAddr bool = false + var issetQueryId bool = false + var issetFragmentInstanceId bool = false + var issetPerNodeScanRanges bool = false + var issetPerExchNumSenders bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -7561,29 +13057,98 @@ func (p *TRuntimeFilterTargetParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetTargetFragmentInstanceId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetQueryId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetTargetFragmentInstanceAddr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetFragmentInstanceId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetPerNodeScanRanges = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetPerExchNumSenders = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.LIST { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.LIST { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7592,22 +13157,32 @@ func (p *TRuntimeFilterTargetParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetTargetFragmentInstanceId { + if !issetQueryId { fieldId = 1 goto RequiredFieldNotSetError } - if !issetTargetFragmentInstanceAddr { + if !issetFragmentInstanceId { fieldId = 2 goto RequiredFieldNotSetError } + + if !issetPerNodeScanRanges { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetPerExchNumSenders { + fieldId = 4 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRuntimeFilterTargetParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlanFragmentExecParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -7616,28 +13191,197 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRuntimeFilterTargetParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPlanFragmentExecParams[fieldId])) +} + +func (p *TPlanFragmentExecParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryId = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.FragmentInstanceId = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId][]*TScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*TScanRangeParams, 0, size) + values := make([]TScanRangeParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PerNodeScanRanges = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId]int32, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PerExchNumSenders = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*datasinks.TPlanFragmentDestination, 0, size) + values := make([]datasinks.TPlanFragmentDestination, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Destinations = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField9(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SenderId = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField10(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.NumSenders = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.SendQueryStatisticsWithEveryBatch = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField12(iprot thrift.TProtocol) error { + _field := NewTRuntimeFilterParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.RuntimeFilterParams = _field + return nil +} +func (p *TPlanFragmentExecParams) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommit = _field + return nil } +func (p *TPlanFragmentExecParams) ReadField14(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } -func (p *TRuntimeFilterTargetParams) ReadField1(iprot thrift.TProtocol) error { - p.TargetFragmentInstanceId = types.NewTUniqueId() - if err := p.TargetFragmentInstanceId.Read(iprot); err != nil { - return err + _field = append(_field, _elem) } - return nil -} - -func (p *TRuntimeFilterTargetParams) ReadField2(iprot thrift.TProtocol) error { - p.TargetFragmentInstanceAddr = types.NewTNetworkAddress() - if err := p.TargetFragmentInstanceAddr.Read(iprot); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } + p.TopnFilterSourceNodeIds = _field return nil } -func (p *TRuntimeFilterTargetParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TPlanFragmentExecParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRuntimeFilterTargetParams"); err != nil { + if err = oprot.WriteStructBegin("TPlanFragmentExecParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -7649,7 +13393,42 @@ func (p *TRuntimeFilterTargetParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7668,11 +13447,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRuntimeFilterTargetParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("target_fragment_instance_id", thrift.STRUCT, 1); err != nil { +func (p *TPlanFragmentExecParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.TargetFragmentInstanceId.Write(oprot); err != nil { + if err := p.QueryId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -7682,442 +13461,518 @@ func (p *TRuntimeFilterTargetParams) writeField1(oprot thrift.TProtocol) (err er WriteFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.FragmentInstanceId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("per_node_scan_ranges", thrift.MAP, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.PerNodeScanRanges)); err != nil { + return err + } + for k, v := range p.PerNodeScanRanges { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("per_exch_num_senders", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.PerExchNumSenders)); err != nil { + return err + } + for k, v := range p.PerExchNumSenders { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("destinations", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Destinations)); err != nil { + return err + } + for _, v := range p.Destinations { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetSenderId() { + if err = oprot.WriteFieldBegin("sender_id", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SenderId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetNumSenders() { + if err = oprot.WriteFieldBegin("num_senders", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NumSenders); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetSendQueryStatisticsWithEveryBatch() { + if err = oprot.WriteFieldBegin("send_query_statistics_with_every_batch", thrift.BOOL, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.SendQueryStatisticsWithEveryBatch); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterParams() { + if err = oprot.WriteFieldBegin("runtime_filter_params", thrift.STRUCT, 12); err != nil { + goto WriteFieldBeginError + } + if err := p.RuntimeFilterParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TPlanFragmentExecParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommit() { + if err = oprot.WriteFieldBegin("group_commit", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.GroupCommit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -func (p *TRuntimeFilterTargetParams) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("target_fragment_instance_addr", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.TargetFragmentInstanceAddr.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TPlanFragmentExecParams) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } -func (p *TRuntimeFilterTargetParams) String() string { +func (p *TPlanFragmentExecParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TRuntimeFilterTargetParams(%+v)", *p) + return fmt.Sprintf("TPlanFragmentExecParams(%+v)", *p) + } -func (p *TRuntimeFilterTargetParams) DeepEqual(ano *TRuntimeFilterTargetParams) bool { +func (p *TPlanFragmentExecParams) DeepEqual(ano *TPlanFragmentExecParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TargetFragmentInstanceId) { + if !p.Field1DeepEqual(ano.QueryId) { return false } - if !p.Field2DeepEqual(ano.TargetFragmentInstanceAddr) { + if !p.Field2DeepEqual(ano.FragmentInstanceId) { return false } - return true -} - -func (p *TRuntimeFilterTargetParams) Field1DeepEqual(src *types.TUniqueId) bool { - - if !p.TargetFragmentInstanceId.DeepEqual(src) { + if !p.Field3DeepEqual(ano.PerNodeScanRanges) { return false } - return true -} -func (p *TRuntimeFilterTargetParams) Field2DeepEqual(src *types.TNetworkAddress) bool { - - if !p.TargetFragmentInstanceAddr.DeepEqual(src) { + if !p.Field4DeepEqual(ano.PerExchNumSenders) { return false } - return true -} - -type TRuntimeFilterTargetParamsV2 struct { - TargetFragmentInstanceIds []*types.TUniqueId `thrift:"target_fragment_instance_ids,1,required" frugal:"1,required,list" json:"target_fragment_instance_ids"` - TargetFragmentInstanceAddr *types.TNetworkAddress `thrift:"target_fragment_instance_addr,2,required" frugal:"2,required,types.TNetworkAddress" json:"target_fragment_instance_addr"` -} - -func NewTRuntimeFilterTargetParamsV2() *TRuntimeFilterTargetParamsV2 { - return &TRuntimeFilterTargetParamsV2{} -} - -func (p *TRuntimeFilterTargetParamsV2) InitDefault() { - *p = TRuntimeFilterTargetParamsV2{} -} - -func (p *TRuntimeFilterTargetParamsV2) GetTargetFragmentInstanceIds() (v []*types.TUniqueId) { - return p.TargetFragmentInstanceIds -} - -var TRuntimeFilterTargetParamsV2_TargetFragmentInstanceAddr_DEFAULT *types.TNetworkAddress - -func (p *TRuntimeFilterTargetParamsV2) GetTargetFragmentInstanceAddr() (v *types.TNetworkAddress) { - if !p.IsSetTargetFragmentInstanceAddr() { - return TRuntimeFilterTargetParamsV2_TargetFragmentInstanceAddr_DEFAULT + if !p.Field5DeepEqual(ano.Destinations) { + return false } - return p.TargetFragmentInstanceAddr -} -func (p *TRuntimeFilterTargetParamsV2) SetTargetFragmentInstanceIds(val []*types.TUniqueId) { - p.TargetFragmentInstanceIds = val -} -func (p *TRuntimeFilterTargetParamsV2) SetTargetFragmentInstanceAddr(val *types.TNetworkAddress) { - p.TargetFragmentInstanceAddr = val -} - -var fieldIDToName_TRuntimeFilterTargetParamsV2 = map[int16]string{ - 1: "target_fragment_instance_ids", - 2: "target_fragment_instance_addr", -} - -func (p *TRuntimeFilterTargetParamsV2) IsSetTargetFragmentInstanceAddr() bool { - return p.TargetFragmentInstanceAddr != nil -} - -func (p *TRuntimeFilterTargetParamsV2) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetTargetFragmentInstanceIds bool = false - var issetTargetFragmentInstanceAddr bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError + if !p.Field9DeepEqual(ano.SenderId) { + return false } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetTargetFragmentInstanceIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetTargetFragmentInstanceAddr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } + if !p.Field10DeepEqual(ano.NumSenders) { + return false } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if !p.Field11DeepEqual(ano.SendQueryStatisticsWithEveryBatch) { + return false } - - if !issetTargetFragmentInstanceIds { - fieldId = 1 - goto RequiredFieldNotSetError + if !p.Field12DeepEqual(ano.RuntimeFilterParams) { + return false } - - if !issetTargetFragmentInstanceAddr { - fieldId = 2 - goto RequiredFieldNotSetError + if !p.Field13DeepEqual(ano.GroupCommit) { + return false } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRuntimeFilterTargetParamsV2[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TRuntimeFilterTargetParamsV2[fieldId])) + if !p.Field14DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } + return true } -func (p *TRuntimeFilterTargetParamsV2) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.TargetFragmentInstanceIds = make([]*types.TUniqueId, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTUniqueId() - if err := _elem.Read(iprot); err != nil { - return err - } +func (p *TPlanFragmentExecParams) Field1DeepEqual(src *types.TUniqueId) bool { - p.TargetFragmentInstanceIds = append(p.TargetFragmentInstanceIds, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + if !p.QueryId.DeepEqual(src) { + return false } - return nil + return true } +func (p *TPlanFragmentExecParams) Field2DeepEqual(src *types.TUniqueId) bool { -func (p *TRuntimeFilterTargetParamsV2) ReadField2(iprot thrift.TProtocol) error { - p.TargetFragmentInstanceAddr = types.NewTNetworkAddress() - if err := p.TargetFragmentInstanceAddr.Read(iprot); err != nil { - return err + if !p.FragmentInstanceId.DeepEqual(src) { + return false } - return nil + return true } +func (p *TPlanFragmentExecParams) Field3DeepEqual(src map[types.TPlanNodeId][]*TScanRangeParams) bool { -func (p *TRuntimeFilterTargetParamsV2) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TRuntimeFilterTargetParamsV2"); err != nil { - goto WriteStructBeginError + if len(p.PerNodeScanRanges) != len(src) { + return false } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError + for k, v := range p.PerNodeScanRanges { + _src := src[k] + if len(v) != len(_src) { + return false } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } } - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return true } - -func (p *TRuntimeFilterTargetParamsV2) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("target_fragment_instance_ids", thrift.LIST, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TargetFragmentInstanceIds)); err != nil { - return err +func (p *TPlanFragmentExecParams) Field4DeepEqual(src map[types.TPlanNodeId]int32) bool { + + if len(p.PerExchNumSenders) != len(src) { + return false } - for _, v := range p.TargetFragmentInstanceIds { - if err := v.Write(oprot); err != nil { - return err + for k, v := range p.PerExchNumSenders { + _src := src[k] + if v != _src { + return false } } - if err := oprot.WriteListEnd(); err != nil { - return err + return true +} +func (p *TPlanFragmentExecParams) Field5DeepEqual(src []*datasinks.TPlanFragmentDestination) bool { + + if len(p.Destinations) != len(src) { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + for i, v := range p.Destinations { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return true } +func (p *TPlanFragmentExecParams) Field9DeepEqual(src *int32) bool { -func (p *TRuntimeFilterTargetParamsV2) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("target_fragment_instance_addr", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.TargetFragmentInstanceAddr.Write(oprot); err != nil { - return err + if p.SenderId == src { + return true + } else if p.SenderId == nil || src == nil { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if *p.SenderId != *src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return true } +func (p *TPlanFragmentExecParams) Field10DeepEqual(src *int32) bool { -func (p *TRuntimeFilterTargetParamsV2) String() string { - if p == nil { - return "" + if p.NumSenders == src { + return true + } else if p.NumSenders == nil || src == nil { + return false } - return fmt.Sprintf("TRuntimeFilterTargetParamsV2(%+v)", *p) + if *p.NumSenders != *src { + return false + } + return true } +func (p *TPlanFragmentExecParams) Field11DeepEqual(src *bool) bool { -func (p *TRuntimeFilterTargetParamsV2) DeepEqual(ano *TRuntimeFilterTargetParamsV2) bool { - if p == ano { + if p.SendQueryStatisticsWithEveryBatch == src { return true - } else if p == nil || ano == nil { + } else if p.SendQueryStatisticsWithEveryBatch == nil || src == nil { return false } - if !p.Field1DeepEqual(ano.TargetFragmentInstanceIds) { + if *p.SendQueryStatisticsWithEveryBatch != *src { return false } - if !p.Field2DeepEqual(ano.TargetFragmentInstanceAddr) { + return true +} +func (p *TPlanFragmentExecParams) Field12DeepEqual(src *TRuntimeFilterParams) bool { + + if !p.RuntimeFilterParams.DeepEqual(src) { return false } return true } +func (p *TPlanFragmentExecParams) Field13DeepEqual(src *bool) bool { -func (p *TRuntimeFilterTargetParamsV2) Field1DeepEqual(src []*types.TUniqueId) bool { - - if len(p.TargetFragmentInstanceIds) != len(src) { + if p.GroupCommit == src { + return true + } else if p.GroupCommit == nil || src == nil { return false } - for i, v := range p.TargetFragmentInstanceIds { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if *p.GroupCommit != *src { + return false } return true } -func (p *TRuntimeFilterTargetParamsV2) Field2DeepEqual(src *types.TNetworkAddress) bool { +func (p *TPlanFragmentExecParams) Field14DeepEqual(src []int32) bool { - if !p.TargetFragmentInstanceAddr.DeepEqual(src) { + if len(p.TopnFilterSourceNodeIds) != len(src) { return false } + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false + } + } return true } -type TRuntimeFilterParams struct { - RuntimeFilterMergeAddr *types.TNetworkAddress `thrift:"runtime_filter_merge_addr,1,optional" frugal:"1,optional,types.TNetworkAddress" json:"runtime_filter_merge_addr,omitempty"` - RidToTargetParam map[int32][]*TRuntimeFilterTargetParams `thrift:"rid_to_target_param,2,optional" frugal:"2,optional,map>" json:"rid_to_target_param,omitempty"` - RidToRuntimeFilter map[int32]*plannodes.TRuntimeFilterDesc `thrift:"rid_to_runtime_filter,3,optional" frugal:"3,optional,map" json:"rid_to_runtime_filter,omitempty"` - RuntimeFilterBuilderNum map[int32]int32 `thrift:"runtime_filter_builder_num,4,optional" frugal:"4,optional,map" json:"runtime_filter_builder_num,omitempty"` - RidToTargetParamv2 map[int32][]*TRuntimeFilterTargetParamsV2 `thrift:"rid_to_target_paramv2,5,optional" frugal:"5,optional,map>" json:"rid_to_target_paramv2,omitempty"` +type TQueryGlobals struct { + NowString string `thrift:"now_string,1,required" frugal:"1,required,string" json:"now_string"` + TimestampMs *int64 `thrift:"timestamp_ms,2,optional" frugal:"2,optional,i64" json:"timestamp_ms,omitempty"` + TimeZone *string `thrift:"time_zone,3,optional" frugal:"3,optional,string" json:"time_zone,omitempty"` + LoadZeroTolerance bool `thrift:"load_zero_tolerance,4,optional" frugal:"4,optional,bool" json:"load_zero_tolerance,omitempty"` + NanoSeconds *int32 `thrift:"nano_seconds,5,optional" frugal:"5,optional,i32" json:"nano_seconds,omitempty"` } -func NewTRuntimeFilterParams() *TRuntimeFilterParams { - return &TRuntimeFilterParams{} -} +func NewTQueryGlobals() *TQueryGlobals { + return &TQueryGlobals{ -func (p *TRuntimeFilterParams) InitDefault() { - *p = TRuntimeFilterParams{} + LoadZeroTolerance: false, + } } -var TRuntimeFilterParams_RuntimeFilterMergeAddr_DEFAULT *types.TNetworkAddress +func (p *TQueryGlobals) InitDefault() { + p.LoadZeroTolerance = false +} -func (p *TRuntimeFilterParams) GetRuntimeFilterMergeAddr() (v *types.TNetworkAddress) { - if !p.IsSetRuntimeFilterMergeAddr() { - return TRuntimeFilterParams_RuntimeFilterMergeAddr_DEFAULT - } - return p.RuntimeFilterMergeAddr +func (p *TQueryGlobals) GetNowString() (v string) { + return p.NowString } -var TRuntimeFilterParams_RidToTargetParam_DEFAULT map[int32][]*TRuntimeFilterTargetParams +var TQueryGlobals_TimestampMs_DEFAULT int64 -func (p *TRuntimeFilterParams) GetRidToTargetParam() (v map[int32][]*TRuntimeFilterTargetParams) { - if !p.IsSetRidToTargetParam() { - return TRuntimeFilterParams_RidToTargetParam_DEFAULT +func (p *TQueryGlobals) GetTimestampMs() (v int64) { + if !p.IsSetTimestampMs() { + return TQueryGlobals_TimestampMs_DEFAULT } - return p.RidToTargetParam + return *p.TimestampMs } -var TRuntimeFilterParams_RidToRuntimeFilter_DEFAULT map[int32]*plannodes.TRuntimeFilterDesc +var TQueryGlobals_TimeZone_DEFAULT string -func (p *TRuntimeFilterParams) GetRidToRuntimeFilter() (v map[int32]*plannodes.TRuntimeFilterDesc) { - if !p.IsSetRidToRuntimeFilter() { - return TRuntimeFilterParams_RidToRuntimeFilter_DEFAULT +func (p *TQueryGlobals) GetTimeZone() (v string) { + if !p.IsSetTimeZone() { + return TQueryGlobals_TimeZone_DEFAULT } - return p.RidToRuntimeFilter + return *p.TimeZone } -var TRuntimeFilterParams_RuntimeFilterBuilderNum_DEFAULT map[int32]int32 +var TQueryGlobals_LoadZeroTolerance_DEFAULT bool = false -func (p *TRuntimeFilterParams) GetRuntimeFilterBuilderNum() (v map[int32]int32) { - if !p.IsSetRuntimeFilterBuilderNum() { - return TRuntimeFilterParams_RuntimeFilterBuilderNum_DEFAULT +func (p *TQueryGlobals) GetLoadZeroTolerance() (v bool) { + if !p.IsSetLoadZeroTolerance() { + return TQueryGlobals_LoadZeroTolerance_DEFAULT } - return p.RuntimeFilterBuilderNum + return p.LoadZeroTolerance } -var TRuntimeFilterParams_RidToTargetParamv2_DEFAULT map[int32][]*TRuntimeFilterTargetParamsV2 +var TQueryGlobals_NanoSeconds_DEFAULT int32 -func (p *TRuntimeFilterParams) GetRidToTargetParamv2() (v map[int32][]*TRuntimeFilterTargetParamsV2) { - if !p.IsSetRidToTargetParamv2() { - return TRuntimeFilterParams_RidToTargetParamv2_DEFAULT +func (p *TQueryGlobals) GetNanoSeconds() (v int32) { + if !p.IsSetNanoSeconds() { + return TQueryGlobals_NanoSeconds_DEFAULT } - return p.RidToTargetParamv2 -} -func (p *TRuntimeFilterParams) SetRuntimeFilterMergeAddr(val *types.TNetworkAddress) { - p.RuntimeFilterMergeAddr = val + return *p.NanoSeconds } -func (p *TRuntimeFilterParams) SetRidToTargetParam(val map[int32][]*TRuntimeFilterTargetParams) { - p.RidToTargetParam = val +func (p *TQueryGlobals) SetNowString(val string) { + p.NowString = val } -func (p *TRuntimeFilterParams) SetRidToRuntimeFilter(val map[int32]*plannodes.TRuntimeFilterDesc) { - p.RidToRuntimeFilter = val +func (p *TQueryGlobals) SetTimestampMs(val *int64) { + p.TimestampMs = val } -func (p *TRuntimeFilterParams) SetRuntimeFilterBuilderNum(val map[int32]int32) { - p.RuntimeFilterBuilderNum = val +func (p *TQueryGlobals) SetTimeZone(val *string) { + p.TimeZone = val } -func (p *TRuntimeFilterParams) SetRidToTargetParamv2(val map[int32][]*TRuntimeFilterTargetParamsV2) { - p.RidToTargetParamv2 = val +func (p *TQueryGlobals) SetLoadZeroTolerance(val bool) { + p.LoadZeroTolerance = val } - -var fieldIDToName_TRuntimeFilterParams = map[int16]string{ - 1: "runtime_filter_merge_addr", - 2: "rid_to_target_param", - 3: "rid_to_runtime_filter", - 4: "runtime_filter_builder_num", - 5: "rid_to_target_paramv2", +func (p *TQueryGlobals) SetNanoSeconds(val *int32) { + p.NanoSeconds = val } -func (p *TRuntimeFilterParams) IsSetRuntimeFilterMergeAddr() bool { - return p.RuntimeFilterMergeAddr != nil +var fieldIDToName_TQueryGlobals = map[int16]string{ + 1: "now_string", + 2: "timestamp_ms", + 3: "time_zone", + 4: "load_zero_tolerance", + 5: "nano_seconds", } -func (p *TRuntimeFilterParams) IsSetRidToTargetParam() bool { - return p.RidToTargetParam != nil +func (p *TQueryGlobals) IsSetTimestampMs() bool { + return p.TimestampMs != nil } -func (p *TRuntimeFilterParams) IsSetRidToRuntimeFilter() bool { - return p.RidToRuntimeFilter != nil +func (p *TQueryGlobals) IsSetTimeZone() bool { + return p.TimeZone != nil } -func (p *TRuntimeFilterParams) IsSetRuntimeFilterBuilderNum() bool { - return p.RuntimeFilterBuilderNum != nil +func (p *TQueryGlobals) IsSetLoadZeroTolerance() bool { + return p.LoadZeroTolerance != TQueryGlobals_LoadZeroTolerance_DEFAULT } -func (p *TRuntimeFilterParams) IsSetRidToTargetParamv2() bool { - return p.RidToTargetParamv2 != nil +func (p *TQueryGlobals) IsSetNanoSeconds() bool { + return p.NanoSeconds != nil } -func (p *TRuntimeFilterParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TQueryGlobals) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetNowString bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -8134,61 +13989,51 @@ func (p *TRuntimeFilterParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetNowString = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.BOOL { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8196,14 +14041,18 @@ func (p *TRuntimeFilterParams) Read(iprot thrift.TProtocol) (err error) { if err = iprot.ReadStructEnd(); err != nil { goto ReadStructEndError } - + + if !issetNowString { + fieldId = 1 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TRuntimeFilterParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryGlobals[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -8211,152 +14060,69 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TQueryGlobals[fieldId])) } -func (p *TRuntimeFilterParams) ReadField1(iprot thrift.TProtocol) error { - p.RuntimeFilterMergeAddr = types.NewTNetworkAddress() - if err := p.RuntimeFilterMergeAddr.Read(iprot); err != nil { +func (p *TQueryGlobals) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = v } + p.NowString = _field return nil } +func (p *TQueryGlobals) ReadField2(iprot thrift.TProtocol) error { -func (p *TRuntimeFilterParams) ReadField2(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.RidToTargetParam = make(map[int32][]*TRuntimeFilterTargetParams, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - _val := make([]*TRuntimeFilterTargetParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTRuntimeFilterTargetParams() - if err := _elem.Read(iprot); err != nil { - return err - } - - _val = append(_val, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - - p.RidToTargetParam[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.TimestampMs = _field return nil } +func (p *TQueryGlobals) ReadField3(iprot thrift.TProtocol) error { -func (p *TRuntimeFilterParams) ReadField3(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.RidToRuntimeFilter = make(map[int32]*plannodes.TRuntimeFilterDesc, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - _val := plannodes.NewTRuntimeFilterDesc() - if err := _val.Read(iprot); err != nil { - return err - } - - p.RidToRuntimeFilter[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.TimeZone = _field return nil } +func (p *TQueryGlobals) ReadField4(iprot thrift.TProtocol) error { -func (p *TRuntimeFilterParams) ReadField4(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.RuntimeFilterBuilderNum = make(map[int32]int32, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - var _val int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _val = v - } - - p.RuntimeFilterBuilderNum[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { + var _field bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = v } + p.LoadZeroTolerance = _field return nil } +func (p *TQueryGlobals) ReadField5(iprot thrift.TProtocol) error { -func (p *TRuntimeFilterParams) ReadField5(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.RidToTargetParamv2 = make(map[int32][]*TRuntimeFilterTargetParamsV2, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - _val := make([]*TRuntimeFilterTargetParamsV2, 0, size) - for i := 0; i < size; i++ { - _elem := NewTRuntimeFilterTargetParamsV2() - if err := _elem.Read(iprot); err != nil { - return err - } - - _val = append(_val, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - - p.RidToTargetParamv2[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err + } else { + _field = &v } + p.NanoSeconds = _field return nil } -func (p *TRuntimeFilterParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TQueryGlobals) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TRuntimeFilterParams"); err != nil { + if err = oprot.WriteStructBegin("TQueryGlobals"); err != nil { goto WriteStructBeginError } if p != nil { @@ -8380,7 +14146,6 @@ func (p *TRuntimeFilterParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8399,17 +14164,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TRuntimeFilterParams) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetRuntimeFilterMergeAddr() { - if err = oprot.WriteFieldBegin("runtime_filter_merge_addr", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.RuntimeFilterMergeAddr.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TQueryGlobals) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("now_string", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.NowString); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -8418,33 +14181,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TRuntimeFilterParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetRidToTargetParam() { - if err = oprot.WriteFieldBegin("rid_to_target_param", thrift.MAP, 2); err != nil { +func (p *TQueryGlobals) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTimestampMs() { + if err = oprot.WriteFieldBegin("timestamp_ms", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.RidToTargetParam)); err != nil { - return err - } - for k, v := range p.RidToTargetParam { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return err - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteI64(*p.TimestampMs); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -8458,25 +14200,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TRuntimeFilterParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetRidToRuntimeFilter() { - if err = oprot.WriteFieldBegin("rid_to_runtime_filter", thrift.MAP, 3); err != nil { +func (p *TQueryGlobals) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTimeZone() { + if err = oprot.WriteFieldBegin("time_zone", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.RidToRuntimeFilter)); err != nil { - return err - } - for k, v := range p.RidToRuntimeFilter { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteString(*p.TimeZone); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -8490,65 +14219,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TRuntimeFilterParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetRuntimeFilterBuilderNum() { - if err = oprot.WriteFieldBegin("runtime_filter_builder_num", thrift.MAP, 4); err != nil { +func (p *TQueryGlobals) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadZeroTolerance() { + if err = oprot.WriteFieldBegin("load_zero_tolerance", thrift.BOOL, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.RuntimeFilterBuilderNum)); err != nil { - return err - } - for k, v := range p.RuntimeFilterBuilderNum { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := oprot.WriteI32(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteBool(p.LoadZeroTolerance); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TRuntimeFilterParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetRidToTargetParamv2() { - if err = oprot.WriteFieldBegin("rid_to_target_paramv2", thrift.MAP, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.RidToTargetParamv2)); err != nil { - return err - } - for k, v := range p.RidToTargetParamv2 { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return err - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } + goto WriteFieldEndError } - if err := oprot.WriteMapEnd(); err != nil { + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueryGlobals) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetNanoSeconds() { + if err = oprot.WriteFieldBegin("nano_seconds", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NanoSeconds); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -8562,857 +14257,1187 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TRuntimeFilterParams) String() string { +func (p *TQueryGlobals) String() string { if p == nil { return "" } - return fmt.Sprintf("TRuntimeFilterParams(%+v)", *p) + return fmt.Sprintf("TQueryGlobals(%+v)", *p) + } -func (p *TRuntimeFilterParams) DeepEqual(ano *TRuntimeFilterParams) bool { +func (p *TQueryGlobals) DeepEqual(ano *TQueryGlobals) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.RuntimeFilterMergeAddr) { + if !p.Field1DeepEqual(ano.NowString) { return false } - if !p.Field2DeepEqual(ano.RidToTargetParam) { + if !p.Field2DeepEqual(ano.TimestampMs) { return false } - if !p.Field3DeepEqual(ano.RidToRuntimeFilter) { + if !p.Field3DeepEqual(ano.TimeZone) { return false } - if !p.Field4DeepEqual(ano.RuntimeFilterBuilderNum) { + if !p.Field4DeepEqual(ano.LoadZeroTolerance) { return false } - if !p.Field5DeepEqual(ano.RidToTargetParamv2) { + if !p.Field5DeepEqual(ano.NanoSeconds) { return false } return true } -func (p *TRuntimeFilterParams) Field1DeepEqual(src *types.TNetworkAddress) bool { +func (p *TQueryGlobals) Field1DeepEqual(src string) bool { - if !p.RuntimeFilterMergeAddr.DeepEqual(src) { + if strings.Compare(p.NowString, src) != 0 { return false } return true } -func (p *TRuntimeFilterParams) Field2DeepEqual(src map[int32][]*TRuntimeFilterTargetParams) bool { +func (p *TQueryGlobals) Field2DeepEqual(src *int64) bool { - if len(p.RidToTargetParam) != len(src) { + if p.TimestampMs == src { + return true + } else if p.TimestampMs == nil || src == nil { return false } - for k, v := range p.RidToTargetParam { - _src := src[k] - if len(v) != len(_src) { - return false - } - for i, v := range v { - _src1 := _src[i] - if !v.DeepEqual(_src1) { - return false - } - } + if *p.TimestampMs != *src { + return false } return true } -func (p *TRuntimeFilterParams) Field3DeepEqual(src map[int32]*plannodes.TRuntimeFilterDesc) bool { +func (p *TQueryGlobals) Field3DeepEqual(src *string) bool { - if len(p.RidToRuntimeFilter) != len(src) { + if p.TimeZone == src { + return true + } else if p.TimeZone == nil || src == nil { return false } - for k, v := range p.RidToRuntimeFilter { - _src := src[k] - if !v.DeepEqual(_src) { - return false - } + if strings.Compare(*p.TimeZone, *src) != 0 { + return false + } + return true +} +func (p *TQueryGlobals) Field4DeepEqual(src bool) bool { + + if p.LoadZeroTolerance != src { + return false + } + return true +} +func (p *TQueryGlobals) Field5DeepEqual(src *int32) bool { + + if p.NanoSeconds == src { + return true + } else if p.NanoSeconds == nil || src == nil { + return false + } + if *p.NanoSeconds != *src { + return false } return true } -func (p *TRuntimeFilterParams) Field4DeepEqual(src map[int32]int32) bool { - if len(p.RuntimeFilterBuilderNum) != len(src) { - return false - } - for k, v := range p.RuntimeFilterBuilderNum { - _src := src[k] - if v != _src { - return false - } - } - return true +type TTxnParams struct { + NeedTxn *bool `thrift:"need_txn,1,optional" frugal:"1,optional,bool" json:"need_txn,omitempty"` + Token *string `thrift:"token,2,optional" frugal:"2,optional,string" json:"token,omitempty"` + ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,3,optional" frugal:"3,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + Tbl *string `thrift:"tbl,5,optional" frugal:"5,optional,string" json:"tbl,omitempty"` + UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` + TxnId *int64 `thrift:"txn_id,7,optional" frugal:"7,optional,i64" json:"txn_id,omitempty"` + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,8,optional" frugal:"8,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` + DbId *int64 `thrift:"db_id,9,optional" frugal:"9,optional,i64" json:"db_id,omitempty"` + MaxFilterRatio *float64 `thrift:"max_filter_ratio,10,optional" frugal:"10,optional,double" json:"max_filter_ratio,omitempty"` + EnablePipelineTxnLoad bool `thrift:"enable_pipeline_txn_load,11,optional" frugal:"11,optional,bool" json:"enable_pipeline_txn_load,omitempty"` +} + +func NewTTxnParams() *TTxnParams { + return &TTxnParams{ + + EnablePipelineTxnLoad: true, + } +} + +func (p *TTxnParams) InitDefault() { + p.EnablePipelineTxnLoad = true +} + +var TTxnParams_NeedTxn_DEFAULT bool + +func (p *TTxnParams) GetNeedTxn() (v bool) { + if !p.IsSetNeedTxn() { + return TTxnParams_NeedTxn_DEFAULT + } + return *p.NeedTxn +} + +var TTxnParams_Token_DEFAULT string + +func (p *TTxnParams) GetToken() (v string) { + if !p.IsSetToken() { + return TTxnParams_Token_DEFAULT + } + return *p.Token +} + +var TTxnParams_ThriftRpcTimeoutMs_DEFAULT int64 + +func (p *TTxnParams) GetThriftRpcTimeoutMs() (v int64) { + if !p.IsSetThriftRpcTimeoutMs() { + return TTxnParams_ThriftRpcTimeoutMs_DEFAULT + } + return *p.ThriftRpcTimeoutMs +} + +var TTxnParams_Db_DEFAULT string + +func (p *TTxnParams) GetDb() (v string) { + if !p.IsSetDb() { + return TTxnParams_Db_DEFAULT + } + return *p.Db +} + +var TTxnParams_Tbl_DEFAULT string + +func (p *TTxnParams) GetTbl() (v string) { + if !p.IsSetTbl() { + return TTxnParams_Tbl_DEFAULT + } + return *p.Tbl +} + +var TTxnParams_UserIp_DEFAULT string + +func (p *TTxnParams) GetUserIp() (v string) { + if !p.IsSetUserIp() { + return TTxnParams_UserIp_DEFAULT + } + return *p.UserIp +} + +var TTxnParams_TxnId_DEFAULT int64 + +func (p *TTxnParams) GetTxnId() (v int64) { + if !p.IsSetTxnId() { + return TTxnParams_TxnId_DEFAULT + } + return *p.TxnId +} + +var TTxnParams_FragmentInstanceId_DEFAULT *types.TUniqueId + +func (p *TTxnParams) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TTxnParams_FragmentInstanceId_DEFAULT + } + return p.FragmentInstanceId +} + +var TTxnParams_DbId_DEFAULT int64 + +func (p *TTxnParams) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TTxnParams_DbId_DEFAULT + } + return *p.DbId +} + +var TTxnParams_MaxFilterRatio_DEFAULT float64 + +func (p *TTxnParams) GetMaxFilterRatio() (v float64) { + if !p.IsSetMaxFilterRatio() { + return TTxnParams_MaxFilterRatio_DEFAULT + } + return *p.MaxFilterRatio +} + +var TTxnParams_EnablePipelineTxnLoad_DEFAULT bool = true + +func (p *TTxnParams) GetEnablePipelineTxnLoad() (v bool) { + if !p.IsSetEnablePipelineTxnLoad() { + return TTxnParams_EnablePipelineTxnLoad_DEFAULT + } + return p.EnablePipelineTxnLoad +} +func (p *TTxnParams) SetNeedTxn(val *bool) { + p.NeedTxn = val +} +func (p *TTxnParams) SetToken(val *string) { + p.Token = val +} +func (p *TTxnParams) SetThriftRpcTimeoutMs(val *int64) { + p.ThriftRpcTimeoutMs = val +} +func (p *TTxnParams) SetDb(val *string) { + p.Db = val +} +func (p *TTxnParams) SetTbl(val *string) { + p.Tbl = val +} +func (p *TTxnParams) SetUserIp(val *string) { + p.UserIp = val +} +func (p *TTxnParams) SetTxnId(val *int64) { + p.TxnId = val +} +func (p *TTxnParams) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val +} +func (p *TTxnParams) SetDbId(val *int64) { + p.DbId = val +} +func (p *TTxnParams) SetMaxFilterRatio(val *float64) { + p.MaxFilterRatio = val +} +func (p *TTxnParams) SetEnablePipelineTxnLoad(val bool) { + p.EnablePipelineTxnLoad = val +} + +var fieldIDToName_TTxnParams = map[int16]string{ + 1: "need_txn", + 2: "token", + 3: "thrift_rpc_timeout_ms", + 4: "db", + 5: "tbl", + 6: "user_ip", + 7: "txn_id", + 8: "fragment_instance_id", + 9: "db_id", + 10: "max_filter_ratio", + 11: "enable_pipeline_txn_load", +} + +func (p *TTxnParams) IsSetNeedTxn() bool { + return p.NeedTxn != nil } -func (p *TRuntimeFilterParams) Field5DeepEqual(src map[int32][]*TRuntimeFilterTargetParamsV2) bool { - if len(p.RidToTargetParamv2) != len(src) { - return false - } - for k, v := range p.RidToTargetParamv2 { - _src := src[k] - if len(v) != len(_src) { - return false - } - for i, v := range v { - _src1 := _src[i] - if !v.DeepEqual(_src1) { - return false - } - } - } - return true +func (p *TTxnParams) IsSetToken() bool { + return p.Token != nil } -type TPlanFragmentExecParams struct { - QueryId *types.TUniqueId `thrift:"query_id,1,required" frugal:"1,required,types.TUniqueId" json:"query_id"` - FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,2,required" frugal:"2,required,types.TUniqueId" json:"fragment_instance_id"` - PerNodeScanRanges map[types.TPlanNodeId][]*TScanRangeParams `thrift:"per_node_scan_ranges,3,required" frugal:"3,required,map>" json:"per_node_scan_ranges"` - PerExchNumSenders map[types.TPlanNodeId]int32 `thrift:"per_exch_num_senders,4,required" frugal:"4,required,map" json:"per_exch_num_senders"` - Destinations []*datasinks.TPlanFragmentDestination `thrift:"destinations,5" frugal:"5,default,list" json:"destinations"` - SenderId *int32 `thrift:"sender_id,9,optional" frugal:"9,optional,i32" json:"sender_id,omitempty"` - NumSenders *int32 `thrift:"num_senders,10,optional" frugal:"10,optional,i32" json:"num_senders,omitempty"` - SendQueryStatisticsWithEveryBatch *bool `thrift:"send_query_statistics_with_every_batch,11,optional" frugal:"11,optional,bool" json:"send_query_statistics_with_every_batch,omitempty"` - RuntimeFilterParams *TRuntimeFilterParams `thrift:"runtime_filter_params,12,optional" frugal:"12,optional,TRuntimeFilterParams" json:"runtime_filter_params,omitempty"` - GroupCommit *bool `thrift:"group_commit,13,optional" frugal:"13,optional,bool" json:"group_commit,omitempty"` +func (p *TTxnParams) IsSetThriftRpcTimeoutMs() bool { + return p.ThriftRpcTimeoutMs != nil } -func NewTPlanFragmentExecParams() *TPlanFragmentExecParams { - return &TPlanFragmentExecParams{} +func (p *TTxnParams) IsSetDb() bool { + return p.Db != nil } -func (p *TPlanFragmentExecParams) InitDefault() { - *p = TPlanFragmentExecParams{} +func (p *TTxnParams) IsSetTbl() bool { + return p.Tbl != nil } -var TPlanFragmentExecParams_QueryId_DEFAULT *types.TUniqueId - -func (p *TPlanFragmentExecParams) GetQueryId() (v *types.TUniqueId) { - if !p.IsSetQueryId() { - return TPlanFragmentExecParams_QueryId_DEFAULT - } - return p.QueryId +func (p *TTxnParams) IsSetUserIp() bool { + return p.UserIp != nil } -var TPlanFragmentExecParams_FragmentInstanceId_DEFAULT *types.TUniqueId +func (p *TTxnParams) IsSetTxnId() bool { + return p.TxnId != nil +} -func (p *TPlanFragmentExecParams) GetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetFragmentInstanceId() { - return TPlanFragmentExecParams_FragmentInstanceId_DEFAULT - } - return p.FragmentInstanceId +func (p *TTxnParams) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil } -func (p *TPlanFragmentExecParams) GetPerNodeScanRanges() (v map[types.TPlanNodeId][]*TScanRangeParams) { - return p.PerNodeScanRanges +func (p *TTxnParams) IsSetDbId() bool { + return p.DbId != nil } -func (p *TPlanFragmentExecParams) GetPerExchNumSenders() (v map[types.TPlanNodeId]int32) { - return p.PerExchNumSenders +func (p *TTxnParams) IsSetMaxFilterRatio() bool { + return p.MaxFilterRatio != nil } -func (p *TPlanFragmentExecParams) GetDestinations() (v []*datasinks.TPlanFragmentDestination) { - return p.Destinations +func (p *TTxnParams) IsSetEnablePipelineTxnLoad() bool { + return p.EnablePipelineTxnLoad != TTxnParams_EnablePipelineTxnLoad_DEFAULT } -var TPlanFragmentExecParams_SenderId_DEFAULT int32 +func (p *TTxnParams) Read(iprot thrift.TProtocol) (err error) { -func (p *TPlanFragmentExecParams) GetSenderId() (v int32) { - if !p.IsSetSenderId() { - return TPlanFragmentExecParams_SenderId_DEFAULT + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return *p.SenderId -} -var TPlanFragmentExecParams_NumSenders_DEFAULT int32 + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } -func (p *TPlanFragmentExecParams) GetNumSenders() (v int32) { - if !p.IsSetNumSenders() { - return TPlanFragmentExecParams_NumSenders_DEFAULT + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.DOUBLE { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return *p.NumSenders -} -var TPlanFragmentExecParams_SendQueryStatisticsWithEveryBatch_DEFAULT bool + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -func (p *TPlanFragmentExecParams) GetSendQueryStatisticsWithEveryBatch() (v bool) { - if !p.IsSetSendQueryStatisticsWithEveryBatch() { - return TPlanFragmentExecParams_SendQueryStatisticsWithEveryBatch_DEFAULT - } - return *p.SendQueryStatisticsWithEveryBatch +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -var TPlanFragmentExecParams_RuntimeFilterParams_DEFAULT *TRuntimeFilterParams +func (p *TTxnParams) ReadField1(iprot thrift.TProtocol) error { -func (p *TPlanFragmentExecParams) GetRuntimeFilterParams() (v *TRuntimeFilterParams) { - if !p.IsSetRuntimeFilterParams() { - return TPlanFragmentExecParams_RuntimeFilterParams_DEFAULT + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v } - return p.RuntimeFilterParams + p.NeedTxn = _field + return nil } +func (p *TTxnParams) ReadField2(iprot thrift.TProtocol) error { -var TPlanFragmentExecParams_GroupCommit_DEFAULT bool - -func (p *TPlanFragmentExecParams) GetGroupCommit() (v bool) { - if !p.IsSetGroupCommit() { - return TPlanFragmentExecParams_GroupCommit_DEFAULT + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return *p.GroupCommit -} -func (p *TPlanFragmentExecParams) SetQueryId(val *types.TUniqueId) { - p.QueryId = val -} -func (p *TPlanFragmentExecParams) SetFragmentInstanceId(val *types.TUniqueId) { - p.FragmentInstanceId = val -} -func (p *TPlanFragmentExecParams) SetPerNodeScanRanges(val map[types.TPlanNodeId][]*TScanRangeParams) { - p.PerNodeScanRanges = val -} -func (p *TPlanFragmentExecParams) SetPerExchNumSenders(val map[types.TPlanNodeId]int32) { - p.PerExchNumSenders = val + p.Token = _field + return nil } -func (p *TPlanFragmentExecParams) SetDestinations(val []*datasinks.TPlanFragmentDestination) { - p.Destinations = val +func (p *TTxnParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ThriftRpcTimeoutMs = _field + return nil } -func (p *TPlanFragmentExecParams) SetSenderId(val *int32) { - p.SenderId = val +func (p *TTxnParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Db = _field + return nil } -func (p *TPlanFragmentExecParams) SetNumSenders(val *int32) { - p.NumSenders = val +func (p *TTxnParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Tbl = _field + return nil } -func (p *TPlanFragmentExecParams) SetSendQueryStatisticsWithEveryBatch(val *bool) { - p.SendQueryStatisticsWithEveryBatch = val +func (p *TTxnParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.UserIp = _field + return nil } -func (p *TPlanFragmentExecParams) SetRuntimeFilterParams(val *TRuntimeFilterParams) { - p.RuntimeFilterParams = val +func (p *TTxnParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TxnId = _field + return nil } -func (p *TPlanFragmentExecParams) SetGroupCommit(val *bool) { - p.GroupCommit = val +func (p *TTxnParams) ReadField8(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.FragmentInstanceId = _field + return nil } +func (p *TTxnParams) ReadField9(iprot thrift.TProtocol) error { -var fieldIDToName_TPlanFragmentExecParams = map[int16]string{ - 1: "query_id", - 2: "fragment_instance_id", - 3: "per_node_scan_ranges", - 4: "per_exch_num_senders", - 5: "destinations", - 9: "sender_id", - 10: "num_senders", - 11: "send_query_statistics_with_every_batch", - 12: "runtime_filter_params", - 13: "group_commit", + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DbId = _field + return nil } +func (p *TTxnParams) ReadField10(iprot thrift.TProtocol) error { -func (p *TPlanFragmentExecParams) IsSetQueryId() bool { - return p.QueryId != nil + var _field *float64 + if v, err := iprot.ReadDouble(); err != nil { + return err + } else { + _field = &v + } + p.MaxFilterRatio = _field + return nil } +func (p *TTxnParams) ReadField11(iprot thrift.TProtocol) error { -func (p *TPlanFragmentExecParams) IsSetFragmentInstanceId() bool { - return p.FragmentInstanceId != nil + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnablePipelineTxnLoad = _field + return nil } -func (p *TPlanFragmentExecParams) IsSetSenderId() bool { - return p.SenderId != nil +func (p *TTxnParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTxnParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPlanFragmentExecParams) IsSetNumSenders() bool { - return p.NumSenders != nil +func (p *TTxnParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetNeedTxn() { + if err = oprot.WriteFieldBegin("need_txn", thrift.BOOL, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.NeedTxn); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPlanFragmentExecParams) IsSetSendQueryStatisticsWithEveryBatch() bool { - return p.SendQueryStatisticsWithEveryBatch != nil +func (p *TTxnParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetToken() { + if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Token); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPlanFragmentExecParams) IsSetRuntimeFilterParams() bool { - return p.RuntimeFilterParams != nil +func (p *TTxnParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetThriftRpcTimeoutMs() { + if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TPlanFragmentExecParams) IsSetGroupCommit() bool { - return p.GroupCommit != nil +func (p *TTxnParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Db); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TPlanFragmentExecParams) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetQueryId bool = false - var issetFragmentInstanceId bool = false - var issetPerNodeScanRanges bool = false - var issetPerExchNumSenders bool = false +func (p *TTxnParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTbl() { + if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Tbl); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TTxnParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetUserIp() { + if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.UserIp); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError +func (p *TTxnParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnId() { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 7); err != nil { + goto WriteFieldBeginError } - if fieldTypeId == thrift.STOP { - break + if err := oprot.WriteI64(*p.TxnId); err != nil { + return err } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetQueryId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetFragmentInstanceId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.MAP { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - issetPerNodeScanRanges = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.MAP { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - issetPerExchNumSenders = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.LIST { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I32 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I32 { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError +func (p *TTxnParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentInstanceId() { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.FragmentInstanceId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} - if !issetQueryId { - fieldId = 1 - goto RequiredFieldNotSetError +func (p *TTxnParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} - if !issetFragmentInstanceId { - fieldId = 2 - goto RequiredFieldNotSetError +func (p *TTxnParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxFilterRatio() { + if err = oprot.WriteFieldBegin("max_filter_ratio", thrift.DOUBLE, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteDouble(*p.MaxFilterRatio); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} - if !issetPerNodeScanRanges { - fieldId = 3 - goto RequiredFieldNotSetError +func (p *TTxnParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetEnablePipelineTxnLoad() { + if err = oprot.WriteFieldBegin("enable_pipeline_txn_load", thrift.BOOL, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnablePipelineTxnLoad); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} - if !issetPerExchNumSenders { - fieldId = 4 - goto RequiredFieldNotSetError +func (p *TTxnParams) String() string { + if p == nil { + return "" } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPlanFragmentExecParams[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + return fmt.Sprintf("TTxnParams(%+v)", *p) -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPlanFragmentExecParams[fieldId])) } -func (p *TPlanFragmentExecParams) ReadField1(iprot thrift.TProtocol) error { - p.QueryId = types.NewTUniqueId() - if err := p.QueryId.Read(iprot); err != nil { - return err +func (p *TTxnParams) DeepEqual(ano *TTxnParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.NeedTxn) { + return false + } + if !p.Field2DeepEqual(ano.Token) { + return false + } + if !p.Field3DeepEqual(ano.ThriftRpcTimeoutMs) { + return false + } + if !p.Field4DeepEqual(ano.Db) { + return false + } + if !p.Field5DeepEqual(ano.Tbl) { + return false + } + if !p.Field6DeepEqual(ano.UserIp) { + return false + } + if !p.Field7DeepEqual(ano.TxnId) { + return false + } + if !p.Field8DeepEqual(ano.FragmentInstanceId) { + return false } - return nil -} - -func (p *TPlanFragmentExecParams) ReadField2(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { - return err + if !p.Field9DeepEqual(ano.DbId) { + return false } - return nil -} - -func (p *TPlanFragmentExecParams) ReadField3(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err + if !p.Field10DeepEqual(ano.MaxFilterRatio) { + return false } - p.PerNodeScanRanges = make(map[types.TPlanNodeId][]*TScanRangeParams, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - _val := make([]*TScanRangeParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTScanRangeParams() - if err := _elem.Read(iprot); err != nil { - return err - } + if !p.Field11DeepEqual(ano.EnablePipelineTxnLoad) { + return false + } + return true +} - _val = append(_val, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } +func (p *TTxnParams) Field1DeepEqual(src *bool) bool { - p.PerNodeScanRanges[_key] = _val + if p.NeedTxn == src { + return true + } else if p.NeedTxn == nil || src == nil { + return false } - if err := iprot.ReadMapEnd(); err != nil { - return err + if *p.NeedTxn != *src { + return false } - return nil + return true } +func (p *TTxnParams) Field2DeepEqual(src *string) bool { -func (p *TPlanFragmentExecParams) ReadField4(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err + if p.Token == src { + return true + } else if p.Token == nil || src == nil { + return false } - p.PerExchNumSenders = make(map[types.TPlanNodeId]int32, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - var _val int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _val = v - } + if strings.Compare(*p.Token, *src) != 0 { + return false + } + return true +} +func (p *TTxnParams) Field3DeepEqual(src *int64) bool { - p.PerExchNumSenders[_key] = _val + if p.ThriftRpcTimeoutMs == src { + return true + } else if p.ThriftRpcTimeoutMs == nil || src == nil { + return false } - if err := iprot.ReadMapEnd(); err != nil { - return err + if *p.ThriftRpcTimeoutMs != *src { + return false } - return nil + return true } +func (p *TTxnParams) Field4DeepEqual(src *string) bool { -func (p *TPlanFragmentExecParams) ReadField5(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err + if p.Db == src { + return true + } else if p.Db == nil || src == nil { + return false } - p.Destinations = make([]*datasinks.TPlanFragmentDestination, 0, size) - for i := 0; i < size; i++ { - _elem := datasinks.NewTPlanFragmentDestination() - if err := _elem.Read(iprot); err != nil { - return err - } + if strings.Compare(*p.Db, *src) != 0 { + return false + } + return true +} +func (p *TTxnParams) Field5DeepEqual(src *string) bool { - p.Destinations = append(p.Destinations, _elem) + if p.Tbl == src { + return true + } else if p.Tbl == nil || src == nil { + return false } - if err := iprot.ReadListEnd(); err != nil { - return err + if strings.Compare(*p.Tbl, *src) != 0 { + return false } - return nil + return true } +func (p *TTxnParams) Field6DeepEqual(src *string) bool { -func (p *TPlanFragmentExecParams) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.SenderId = &v + if p.UserIp == src { + return true + } else if p.UserIp == nil || src == nil { + return false } - return nil + if strings.Compare(*p.UserIp, *src) != 0 { + return false + } + return true } +func (p *TTxnParams) Field7DeepEqual(src *int64) bool { -func (p *TPlanFragmentExecParams) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.NumSenders = &v + if p.TxnId == src { + return true + } else if p.TxnId == nil || src == nil { + return false } - return nil + if *p.TxnId != *src { + return false + } + return true } +func (p *TTxnParams) Field8DeepEqual(src *types.TUniqueId) bool { -func (p *TPlanFragmentExecParams) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.SendQueryStatisticsWithEveryBatch = &v + if !p.FragmentInstanceId.DeepEqual(src) { + return false } - return nil + return true } +func (p *TTxnParams) Field9DeepEqual(src *int64) bool { -func (p *TPlanFragmentExecParams) ReadField12(iprot thrift.TProtocol) error { - p.RuntimeFilterParams = NewTRuntimeFilterParams() - if err := p.RuntimeFilterParams.Read(iprot); err != nil { - return err + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { + return false } - return nil + if *p.DbId != *src { + return false + } + return true } +func (p *TTxnParams) Field10DeepEqual(src *float64) bool { -func (p *TPlanFragmentExecParams) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.GroupCommit = &v + if p.MaxFilterRatio == src { + return true + } else if p.MaxFilterRatio == nil || src == nil { + return false } - return nil + if *p.MaxFilterRatio != *src { + return false + } + return true } +func (p *TTxnParams) Field11DeepEqual(src bool) bool { -func (p *TPlanFragmentExecParams) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TPlanFragmentExecParams"); err != nil { - goto WriteStructBeginError + if p.EnablePipelineTxnLoad != src { + return false } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } + return true +} - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +type TColumnDict struct { + Type *types.TPrimitiveType `thrift:"type,1,optional" frugal:"1,optional,TPrimitiveType" json:"type,omitempty"` + StrDict []string `thrift:"str_dict,2" frugal:"2,default,list" json:"str_dict"` +} + +func NewTColumnDict() *TColumnDict { + return &TColumnDict{} } -func (p *TPlanFragmentExecParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +func (p *TColumnDict) InitDefault() { } -func (p *TPlanFragmentExecParams) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.FragmentInstanceId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +var TColumnDict_Type_DEFAULT types.TPrimitiveType + +func (p *TColumnDict) GetType() (v types.TPrimitiveType) { + if !p.IsSetType() { + return TColumnDict_Type_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return *p.Type } -func (p *TPlanFragmentExecParams) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("per_node_scan_ranges", thrift.MAP, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.PerNodeScanRanges)); err != nil { - return err +func (p *TColumnDict) GetStrDict() (v []string) { + return p.StrDict +} +func (p *TColumnDict) SetType(val *types.TPrimitiveType) { + p.Type = val +} +func (p *TColumnDict) SetStrDict(val []string) { + p.StrDict = val +} + +var fieldIDToName_TColumnDict = map[int16]string{ + 1: "type", + 2: "str_dict", +} + +func (p *TColumnDict) IsSetType() bool { + return p.Type != nil +} + +func (p *TColumnDict) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - for k, v := range p.PerNodeScanRanges { - if err := oprot.WriteI32(k); err != nil { - return err + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return err + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err := oprot.WriteListEnd(); err != nil { - return err + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnDict[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -func (p *TPlanFragmentExecParams) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("per_exch_num_senders", thrift.MAP, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.PerExchNumSenders)); err != nil { - return err - } - for k, v := range p.PerExchNumSenders { +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} - if err := oprot.WriteI32(k); err != nil { - return err - } +func (p *TColumnDict) ReadField1(iprot thrift.TProtocol) error { - if err := oprot.WriteI32(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + var _field *types.TPrimitiveType + if v, err := iprot.ReadI32(); err != nil { return err + } else { + tmp := types.TPrimitiveType(v) + _field = &tmp } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.Type = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } - -func (p *TPlanFragmentExecParams) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("destinations", thrift.LIST, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Destinations)); err != nil { +func (p *TColumnDict) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } - for _, v := range p.Destinations { - if err := v.Write(oprot); err != nil { + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _elem = v } + + _field = append(_field, _elem) } - if err := oprot.WriteListEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.StrDict = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TPlanFragmentExecParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetSenderId() { - if err = oprot.WriteFieldBegin("sender_id", thrift.I32, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.SenderId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TColumnDict) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TColumnDict"); err != nil { + goto WriteStructBeginError } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TPlanFragmentExecParams) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetNumSenders() { - if err = oprot.WriteFieldBegin("num_senders", thrift.I32, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.NumSenders); err != nil { - return err + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) -} - -func (p *TPlanFragmentExecParams) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetSendQueryStatisticsWithEveryBatch() { - if err = oprot.WriteFieldBegin("send_query_statistics_with_every_batch", thrift.BOOL, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.SendQueryStatisticsWithEveryBatch); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPlanFragmentExecParams) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetRuntimeFilterParams() { - if err = oprot.WriteFieldBegin("runtime_filter_params", thrift.STRUCT, 12); err != nil { +func (p *TColumnDict) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err = oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { goto WriteFieldBeginError } - if err := p.RuntimeFilterParams.Write(oprot); err != nil { + if err := oprot.WriteI32(int32(*p.Type)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9421,297 +15446,138 @@ func (p *TPlanFragmentExecParams) writeField12(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPlanFragmentExecParams) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetGroupCommit() { - if err = oprot.WriteFieldBegin("group_commit", thrift.BOOL, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.GroupCommit); err != nil { +func (p *TColumnDict) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("str_dict", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.StrDict)); err != nil { + return err + } + for _, v := range p.StrDict { + if err := oprot.WriteString(v); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPlanFragmentExecParams) String() string { +func (p *TColumnDict) String() string { if p == nil { return "" } - return fmt.Sprintf("TPlanFragmentExecParams(%+v)", *p) + return fmt.Sprintf("TColumnDict(%+v)", *p) + } -func (p *TPlanFragmentExecParams) DeepEqual(ano *TPlanFragmentExecParams) bool { +func (p *TColumnDict) DeepEqual(ano *TColumnDict) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.QueryId) { - return false - } - if !p.Field2DeepEqual(ano.FragmentInstanceId) { - return false - } - if !p.Field3DeepEqual(ano.PerNodeScanRanges) { - return false - } - if !p.Field4DeepEqual(ano.PerExchNumSenders) { - return false - } - if !p.Field5DeepEqual(ano.Destinations) { - return false - } - if !p.Field9DeepEqual(ano.SenderId) { - return false - } - if !p.Field10DeepEqual(ano.NumSenders) { - return false - } - if !p.Field11DeepEqual(ano.SendQueryStatisticsWithEveryBatch) { - return false - } - if !p.Field12DeepEqual(ano.RuntimeFilterParams) { - return false - } - if !p.Field13DeepEqual(ano.GroupCommit) { + if !p.Field1DeepEqual(ano.Type) { return false } - return true -} - -func (p *TPlanFragmentExecParams) Field1DeepEqual(src *types.TUniqueId) bool { - - if !p.QueryId.DeepEqual(src) { + if !p.Field2DeepEqual(ano.StrDict) { return false } return true } -func (p *TPlanFragmentExecParams) Field2DeepEqual(src *types.TUniqueId) bool { - if !p.FragmentInstanceId.DeepEqual(src) { - return false - } - return true -} -func (p *TPlanFragmentExecParams) Field3DeepEqual(src map[types.TPlanNodeId][]*TScanRangeParams) bool { +func (p *TColumnDict) Field1DeepEqual(src *types.TPrimitiveType) bool { - if len(p.PerNodeScanRanges) != len(src) { + if p.Type == src { + return true + } else if p.Type == nil || src == nil { return false } - for k, v := range p.PerNodeScanRanges { - _src := src[k] - if len(v) != len(_src) { - return false - } - for i, v := range v { - _src1 := _src[i] - if !v.DeepEqual(_src1) { - return false - } - } - } - return true -} -func (p *TPlanFragmentExecParams) Field4DeepEqual(src map[types.TPlanNodeId]int32) bool { - - if len(p.PerExchNumSenders) != len(src) { + if *p.Type != *src { return false } - for k, v := range p.PerExchNumSenders { - _src := src[k] - if v != _src { - return false - } - } return true } -func (p *TPlanFragmentExecParams) Field5DeepEqual(src []*datasinks.TPlanFragmentDestination) bool { +func (p *TColumnDict) Field2DeepEqual(src []string) bool { - if len(p.Destinations) != len(src) { + if len(p.StrDict) != len(src) { return false } - for i, v := range p.Destinations { + for i, v := range p.StrDict { _src := src[i] - if !v.DeepEqual(_src) { + if strings.Compare(v, _src) != 0 { return false } } - return true -} -func (p *TPlanFragmentExecParams) Field9DeepEqual(src *int32) bool { - - if p.SenderId == src { - return true - } else if p.SenderId == nil || src == nil { - return false - } - if *p.SenderId != *src { - return false - } - return true -} -func (p *TPlanFragmentExecParams) Field10DeepEqual(src *int32) bool { - - if p.NumSenders == src { - return true - } else if p.NumSenders == nil || src == nil { - return false - } - if *p.NumSenders != *src { - return false - } - return true -} -func (p *TPlanFragmentExecParams) Field11DeepEqual(src *bool) bool { - - if p.SendQueryStatisticsWithEveryBatch == src { - return true - } else if p.SendQueryStatisticsWithEveryBatch == nil || src == nil { - return false - } - if *p.SendQueryStatisticsWithEveryBatch != *src { - return false - } - return true -} -func (p *TPlanFragmentExecParams) Field12DeepEqual(src *TRuntimeFilterParams) bool { - - if !p.RuntimeFilterParams.DeepEqual(src) { - return false - } - return true -} -func (p *TPlanFragmentExecParams) Field13DeepEqual(src *bool) bool { - - if p.GroupCommit == src { - return true - } else if p.GroupCommit == nil || src == nil { - return false - } - if *p.GroupCommit != *src { - return false - } - return true -} - -type TQueryGlobals struct { - NowString string `thrift:"now_string,1,required" frugal:"1,required,string" json:"now_string"` - TimestampMs *int64 `thrift:"timestamp_ms,2,optional" frugal:"2,optional,i64" json:"timestamp_ms,omitempty"` - TimeZone *string `thrift:"time_zone,3,optional" frugal:"3,optional,string" json:"time_zone,omitempty"` - LoadZeroTolerance bool `thrift:"load_zero_tolerance,4,optional" frugal:"4,optional,bool" json:"load_zero_tolerance,omitempty"` - NanoSeconds *int32 `thrift:"nano_seconds,5,optional" frugal:"5,optional,i32" json:"nano_seconds,omitempty"` -} - -func NewTQueryGlobals() *TQueryGlobals { - return &TQueryGlobals{ - - LoadZeroTolerance: false, - } -} - -func (p *TQueryGlobals) InitDefault() { - *p = TQueryGlobals{ - - LoadZeroTolerance: false, - } -} - -func (p *TQueryGlobals) GetNowString() (v string) { - return p.NowString -} - -var TQueryGlobals_TimestampMs_DEFAULT int64 - -func (p *TQueryGlobals) GetTimestampMs() (v int64) { - if !p.IsSetTimestampMs() { - return TQueryGlobals_TimestampMs_DEFAULT - } - return *p.TimestampMs -} - -var TQueryGlobals_TimeZone_DEFAULT string - -func (p *TQueryGlobals) GetTimeZone() (v string) { - if !p.IsSetTimeZone() { - return TQueryGlobals_TimeZone_DEFAULT - } - return *p.TimeZone -} - -var TQueryGlobals_LoadZeroTolerance_DEFAULT bool = false - -func (p *TQueryGlobals) GetLoadZeroTolerance() (v bool) { - if !p.IsSetLoadZeroTolerance() { - return TQueryGlobals_LoadZeroTolerance_DEFAULT - } - return p.LoadZeroTolerance -} - -var TQueryGlobals_NanoSeconds_DEFAULT int32 - -func (p *TQueryGlobals) GetNanoSeconds() (v int32) { - if !p.IsSetNanoSeconds() { - return TQueryGlobals_NanoSeconds_DEFAULT - } - return *p.NanoSeconds -} -func (p *TQueryGlobals) SetNowString(val string) { - p.NowString = val -} -func (p *TQueryGlobals) SetTimestampMs(val *int64) { - p.TimestampMs = val + return true } -func (p *TQueryGlobals) SetTimeZone(val *string) { - p.TimeZone = val + +type TGlobalDict struct { + Dicts map[int32]*TColumnDict `thrift:"dicts,1,optional" frugal:"1,optional,map" json:"dicts,omitempty"` + SlotDicts map[int32]int32 `thrift:"slot_dicts,2,optional" frugal:"2,optional,map" json:"slot_dicts,omitempty"` } -func (p *TQueryGlobals) SetLoadZeroTolerance(val bool) { - p.LoadZeroTolerance = val + +func NewTGlobalDict() *TGlobalDict { + return &TGlobalDict{} } -func (p *TQueryGlobals) SetNanoSeconds(val *int32) { - p.NanoSeconds = val + +func (p *TGlobalDict) InitDefault() { } -var fieldIDToName_TQueryGlobals = map[int16]string{ - 1: "now_string", - 2: "timestamp_ms", - 3: "time_zone", - 4: "load_zero_tolerance", - 5: "nano_seconds", +var TGlobalDict_Dicts_DEFAULT map[int32]*TColumnDict + +func (p *TGlobalDict) GetDicts() (v map[int32]*TColumnDict) { + if !p.IsSetDicts() { + return TGlobalDict_Dicts_DEFAULT + } + return p.Dicts } -func (p *TQueryGlobals) IsSetTimestampMs() bool { - return p.TimestampMs != nil +var TGlobalDict_SlotDicts_DEFAULT map[int32]int32 + +func (p *TGlobalDict) GetSlotDicts() (v map[int32]int32) { + if !p.IsSetSlotDicts() { + return TGlobalDict_SlotDicts_DEFAULT + } + return p.SlotDicts +} +func (p *TGlobalDict) SetDicts(val map[int32]*TColumnDict) { + p.Dicts = val +} +func (p *TGlobalDict) SetSlotDicts(val map[int32]int32) { + p.SlotDicts = val } -func (p *TQueryGlobals) IsSetTimeZone() bool { - return p.TimeZone != nil +var fieldIDToName_TGlobalDict = map[int16]string{ + 1: "dicts", + 2: "slot_dicts", } -func (p *TQueryGlobals) IsSetLoadZeroTolerance() bool { - return p.LoadZeroTolerance != TQueryGlobals_LoadZeroTolerance_DEFAULT +func (p *TGlobalDict) IsSetDicts() bool { + return p.Dicts != nil } -func (p *TQueryGlobals) IsSetNanoSeconds() bool { - return p.NanoSeconds != nil +func (p *TGlobalDict) IsSetSlotDicts() bool { + return p.SlotDicts != nil } -func (p *TQueryGlobals) Read(iprot thrift.TProtocol) (err error) { +func (p *TGlobalDict) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetNowString bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -9728,62 +15594,26 @@ func (p *TQueryGlobals) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.MAP { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetNowString = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.MAP { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9792,17 +15622,13 @@ func (p *TQueryGlobals) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetNowString { - fieldId = 1 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryGlobals[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGlobalDict[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -9810,58 +15636,70 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TQueryGlobals[fieldId])) } -func (p *TQueryGlobals) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TGlobalDict) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { return err - } else { - p.NowString = v } - return nil -} + _field := make(map[int32]*TColumnDict, size) + values := make([]TColumnDict, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -func (p *TQueryGlobals) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TimestampMs = &v - } - return nil -} + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } -func (p *TQueryGlobals) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.TimeZone = &v } + p.Dicts = _field return nil } - -func (p *TQueryGlobals) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *TGlobalDict) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { return err - } else { - p.LoadZeroTolerance = v } - return nil -} + _field := make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -func (p *TQueryGlobals) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err - } else { - p.NanoSeconds = &v } + p.SlotDicts = _field return nil } -func (p *TQueryGlobals) Write(oprot thrift.TProtocol) (err error) { +func (p *TGlobalDict) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TQueryGlobals"); err != nil { + if err = oprot.WriteStructBegin("TGlobalDict"); err != nil { goto WriteStructBeginError } if p != nil { @@ -9873,19 +15711,6 @@ func (p *TQueryGlobals) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9904,48 +15729,23 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TQueryGlobals) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("now_string", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.NowString); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TQueryGlobals) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestampMs() { - if err = oprot.WriteFieldBegin("timestamp_ms", thrift.I64, 2); err != nil { +func (p *TGlobalDict) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDicts() { + if err = oprot.WriteFieldBegin("dicts", thrift.MAP, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TimestampMs); err != nil { + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.Dicts)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TQueryGlobals) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeZone() { - if err = oprot.WriteFieldBegin("time_zone", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError + for k, v := range p.Dicts { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } } - if err := oprot.WriteString(*p.TimeZone); err != nil { + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9954,36 +15754,28 @@ func (p *TQueryGlobals) writeField3(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TQueryGlobals) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadZeroTolerance() { - if err = oprot.WriteFieldBegin("load_zero_tolerance", thrift.BOOL, 4); err != nil { +func (p *TGlobalDict) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSlotDicts() { + if err = oprot.WriteFieldBegin("slot_dicts", thrift.MAP, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(p.LoadZeroTolerance); err != nil { + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.SlotDicts)); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TQueryGlobals) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetNanoSeconds() { - if err = oprot.WriteFieldBegin("nano_seconds", thrift.I32, 5); err != nil { - goto WriteFieldBeginError + for k, v := range p.SlotDicts { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } } - if err := oprot.WriteI32(*p.NanoSeconds); err != nil { + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9992,312 +15784,147 @@ func (p *TQueryGlobals) writeField5(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TQueryGlobals) String() string { +func (p *TGlobalDict) String() string { if p == nil { return "" } - return fmt.Sprintf("TQueryGlobals(%+v)", *p) + return fmt.Sprintf("TGlobalDict(%+v)", *p) + } -func (p *TQueryGlobals) DeepEqual(ano *TQueryGlobals) bool { +func (p *TGlobalDict) DeepEqual(ano *TGlobalDict) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.NowString) { - return false - } - if !p.Field2DeepEqual(ano.TimestampMs) { - return false - } - if !p.Field3DeepEqual(ano.TimeZone) { - return false - } - if !p.Field4DeepEqual(ano.LoadZeroTolerance) { - return false - } - if !p.Field5DeepEqual(ano.NanoSeconds) { + if !p.Field1DeepEqual(ano.Dicts) { return false } - return true -} - -func (p *TQueryGlobals) Field1DeepEqual(src string) bool { - - if strings.Compare(p.NowString, src) != 0 { + if !p.Field2DeepEqual(ano.SlotDicts) { return false } return true } -func (p *TQueryGlobals) Field2DeepEqual(src *int64) bool { - if p.TimestampMs == src { - return true - } else if p.TimestampMs == nil || src == nil { - return false - } - if *p.TimestampMs != *src { - return false - } - return true -} -func (p *TQueryGlobals) Field3DeepEqual(src *string) bool { +func (p *TGlobalDict) Field1DeepEqual(src map[int32]*TColumnDict) bool { - if p.TimeZone == src { - return true - } else if p.TimeZone == nil || src == nil { - return false - } - if strings.Compare(*p.TimeZone, *src) != 0 { + if len(p.Dicts) != len(src) { return false } - return true -} -func (p *TQueryGlobals) Field4DeepEqual(src bool) bool { - - if p.LoadZeroTolerance != src { - return false + for k, v := range p.Dicts { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TQueryGlobals) Field5DeepEqual(src *int32) bool { +func (p *TGlobalDict) Field2DeepEqual(src map[int32]int32) bool { - if p.NanoSeconds == src { - return true - } else if p.NanoSeconds == nil || src == nil { + if len(p.SlotDicts) != len(src) { return false } - if *p.NanoSeconds != *src { - return false + for k, v := range p.SlotDicts { + _src := src[k] + if v != _src { + return false + } } return true } -type TTxnParams struct { - NeedTxn *bool `thrift:"need_txn,1,optional" frugal:"1,optional,bool" json:"need_txn,omitempty"` - Token *string `thrift:"token,2,optional" frugal:"2,optional,string" json:"token,omitempty"` - ThriftRpcTimeoutMs *int64 `thrift:"thrift_rpc_timeout_ms,3,optional" frugal:"3,optional,i64" json:"thrift_rpc_timeout_ms,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - Tbl *string `thrift:"tbl,5,optional" frugal:"5,optional,string" json:"tbl,omitempty"` - UserIp *string `thrift:"user_ip,6,optional" frugal:"6,optional,string" json:"user_ip,omitempty"` - TxnId *int64 `thrift:"txn_id,7,optional" frugal:"7,optional,i64" json:"txn_id,omitempty"` - FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,8,optional" frugal:"8,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` - DbId *int64 `thrift:"db_id,9,optional" frugal:"9,optional,i64" json:"db_id,omitempty"` - MaxFilterRatio *float64 `thrift:"max_filter_ratio,10,optional" frugal:"10,optional,double" json:"max_filter_ratio,omitempty"` - EnablePipelineTxnLoad bool `thrift:"enable_pipeline_txn_load,11,optional" frugal:"11,optional,bool" json:"enable_pipeline_txn_load,omitempty"` -} - -func NewTTxnParams() *TTxnParams { - return &TTxnParams{ - - EnablePipelineTxnLoad: false, - } -} - -func (p *TTxnParams) InitDefault() { - *p = TTxnParams{ - - EnablePipelineTxnLoad: false, - } -} - -var TTxnParams_NeedTxn_DEFAULT bool - -func (p *TTxnParams) GetNeedTxn() (v bool) { - if !p.IsSetNeedTxn() { - return TTxnParams_NeedTxn_DEFAULT - } - return *p.NeedTxn -} - -var TTxnParams_Token_DEFAULT string - -func (p *TTxnParams) GetToken() (v string) { - if !p.IsSetToken() { - return TTxnParams_Token_DEFAULT - } - return *p.Token -} - -var TTxnParams_ThriftRpcTimeoutMs_DEFAULT int64 - -func (p *TTxnParams) GetThriftRpcTimeoutMs() (v int64) { - if !p.IsSetThriftRpcTimeoutMs() { - return TTxnParams_ThriftRpcTimeoutMs_DEFAULT - } - return *p.ThriftRpcTimeoutMs -} - -var TTxnParams_Db_DEFAULT string - -func (p *TTxnParams) GetDb() (v string) { - if !p.IsSetDb() { - return TTxnParams_Db_DEFAULT - } - return *p.Db -} - -var TTxnParams_Tbl_DEFAULT string - -func (p *TTxnParams) GetTbl() (v string) { - if !p.IsSetTbl() { - return TTxnParams_Tbl_DEFAULT - } - return *p.Tbl -} - -var TTxnParams_UserIp_DEFAULT string - -func (p *TTxnParams) GetUserIp() (v string) { - if !p.IsSetUserIp() { - return TTxnParams_UserIp_DEFAULT - } - return *p.UserIp -} - -var TTxnParams_TxnId_DEFAULT int64 - -func (p *TTxnParams) GetTxnId() (v int64) { - if !p.IsSetTxnId() { - return TTxnParams_TxnId_DEFAULT - } - return *p.TxnId -} - -var TTxnParams_FragmentInstanceId_DEFAULT *types.TUniqueId - -func (p *TTxnParams) GetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetFragmentInstanceId() { - return TTxnParams_FragmentInstanceId_DEFAULT - } - return p.FragmentInstanceId +type TPipelineWorkloadGroup struct { + Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` + Properties map[string]string `thrift:"properties,3,optional" frugal:"3,optional,map" json:"properties,omitempty"` + Version *int64 `thrift:"version,4,optional" frugal:"4,optional,i64" json:"version,omitempty"` } -var TTxnParams_DbId_DEFAULT int64 - -func (p *TTxnParams) GetDbId() (v int64) { - if !p.IsSetDbId() { - return TTxnParams_DbId_DEFAULT - } - return *p.DbId +func NewTPipelineWorkloadGroup() *TPipelineWorkloadGroup { + return &TPipelineWorkloadGroup{} } -var TTxnParams_MaxFilterRatio_DEFAULT float64 - -func (p *TTxnParams) GetMaxFilterRatio() (v float64) { - if !p.IsSetMaxFilterRatio() { - return TTxnParams_MaxFilterRatio_DEFAULT - } - return *p.MaxFilterRatio +func (p *TPipelineWorkloadGroup) InitDefault() { } -var TTxnParams_EnablePipelineTxnLoad_DEFAULT bool = false +var TPipelineWorkloadGroup_Id_DEFAULT int64 -func (p *TTxnParams) GetEnablePipelineTxnLoad() (v bool) { - if !p.IsSetEnablePipelineTxnLoad() { - return TTxnParams_EnablePipelineTxnLoad_DEFAULT +func (p *TPipelineWorkloadGroup) GetId() (v int64) { + if !p.IsSetId() { + return TPipelineWorkloadGroup_Id_DEFAULT } - return p.EnablePipelineTxnLoad -} -func (p *TTxnParams) SetNeedTxn(val *bool) { - p.NeedTxn = val -} -func (p *TTxnParams) SetToken(val *string) { - p.Token = val -} -func (p *TTxnParams) SetThriftRpcTimeoutMs(val *int64) { - p.ThriftRpcTimeoutMs = val -} -func (p *TTxnParams) SetDb(val *string) { - p.Db = val -} -func (p *TTxnParams) SetTbl(val *string) { - p.Tbl = val -} -func (p *TTxnParams) SetUserIp(val *string) { - p.UserIp = val -} -func (p *TTxnParams) SetTxnId(val *int64) { - p.TxnId = val -} -func (p *TTxnParams) SetFragmentInstanceId(val *types.TUniqueId) { - p.FragmentInstanceId = val -} -func (p *TTxnParams) SetDbId(val *int64) { - p.DbId = val -} -func (p *TTxnParams) SetMaxFilterRatio(val *float64) { - p.MaxFilterRatio = val -} -func (p *TTxnParams) SetEnablePipelineTxnLoad(val bool) { - p.EnablePipelineTxnLoad = val -} - -var fieldIDToName_TTxnParams = map[int16]string{ - 1: "need_txn", - 2: "token", - 3: "thrift_rpc_timeout_ms", - 4: "db", - 5: "tbl", - 6: "user_ip", - 7: "txn_id", - 8: "fragment_instance_id", - 9: "db_id", - 10: "max_filter_ratio", - 11: "enable_pipeline_txn_load", + return *p.Id } -func (p *TTxnParams) IsSetNeedTxn() bool { - return p.NeedTxn != nil -} +var TPipelineWorkloadGroup_Name_DEFAULT string -func (p *TTxnParams) IsSetToken() bool { - return p.Token != nil +func (p *TPipelineWorkloadGroup) GetName() (v string) { + if !p.IsSetName() { + return TPipelineWorkloadGroup_Name_DEFAULT + } + return *p.Name } -func (p *TTxnParams) IsSetThriftRpcTimeoutMs() bool { - return p.ThriftRpcTimeoutMs != nil -} +var TPipelineWorkloadGroup_Properties_DEFAULT map[string]string -func (p *TTxnParams) IsSetDb() bool { - return p.Db != nil +func (p *TPipelineWorkloadGroup) GetProperties() (v map[string]string) { + if !p.IsSetProperties() { + return TPipelineWorkloadGroup_Properties_DEFAULT + } + return p.Properties } -func (p *TTxnParams) IsSetTbl() bool { - return p.Tbl != nil -} +var TPipelineWorkloadGroup_Version_DEFAULT int64 -func (p *TTxnParams) IsSetUserIp() bool { - return p.UserIp != nil +func (p *TPipelineWorkloadGroup) GetVersion() (v int64) { + if !p.IsSetVersion() { + return TPipelineWorkloadGroup_Version_DEFAULT + } + return *p.Version +} +func (p *TPipelineWorkloadGroup) SetId(val *int64) { + p.Id = val +} +func (p *TPipelineWorkloadGroup) SetName(val *string) { + p.Name = val +} +func (p *TPipelineWorkloadGroup) SetProperties(val map[string]string) { + p.Properties = val +} +func (p *TPipelineWorkloadGroup) SetVersion(val *int64) { + p.Version = val } -func (p *TTxnParams) IsSetTxnId() bool { - return p.TxnId != nil +var fieldIDToName_TPipelineWorkloadGroup = map[int16]string{ + 1: "id", + 2: "name", + 3: "properties", + 4: "version", } -func (p *TTxnParams) IsSetFragmentInstanceId() bool { - return p.FragmentInstanceId != nil +func (p *TPipelineWorkloadGroup) IsSetId() bool { + return p.Id != nil } -func (p *TTxnParams) IsSetDbId() bool { - return p.DbId != nil +func (p *TPipelineWorkloadGroup) IsSetName() bool { + return p.Name != nil } -func (p *TTxnParams) IsSetMaxFilterRatio() bool { - return p.MaxFilterRatio != nil +func (p *TPipelineWorkloadGroup) IsSetProperties() bool { + return p.Properties != nil } -func (p *TTxnParams) IsSetEnablePipelineTxnLoad() bool { - return p.EnablePipelineTxnLoad != TTxnParams_EnablePipelineTxnLoad_DEFAULT +func (p *TPipelineWorkloadGroup) IsSetVersion() bool { + return p.Version != nil } -func (p *TTxnParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TPipelineWorkloadGroup) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10317,121 +15944,42 @@ func (p *TTxnParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I64 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.MAP { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.DOUBLE { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField11(iprot); err != nil { + if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10446,7 +15994,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTxnParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineWorkloadGroup[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10456,107 +16004,72 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTxnParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.NeedTxn = &v - } - return nil -} - -func (p *TTxnParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Token = &v - } - return nil -} +func (p *TPipelineWorkloadGroup) ReadField1(iprot thrift.TProtocol) error { -func (p *TTxnParams) ReadField3(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ThriftRpcTimeoutMs = &v + _field = &v } + p.Id = _field return nil } +func (p *TPipelineWorkloadGroup) ReadField2(iprot thrift.TProtocol) error { -func (p *TTxnParams) ReadField4(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Name = _field return nil } - -func (p *TTxnParams) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TPipelineWorkloadGroup) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { return err - } else { - p.Tbl = &v } - return nil -} + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } -func (p *TTxnParams) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.UserIp = &v - } - return nil -} + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } -func (p *TTxnParams) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.TxnId = &v + _field[_key] = _val } - return nil -} - -func (p *TTxnParams) ReadField8(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { + if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } +func (p *TPipelineWorkloadGroup) ReadField4(iprot thrift.TProtocol) error { -func (p *TTxnParams) ReadField9(iprot thrift.TProtocol) error { + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.DbId = &v - } - return nil -} - -func (p *TTxnParams) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return err - } else { - p.MaxFilterRatio = &v - } - return nil -} - -func (p *TTxnParams) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.EnablePipelineTxnLoad = v + _field = &v } + p.Version = _field return nil } -func (p *TTxnParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TPipelineWorkloadGroup) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTxnParams"); err != nil { + if err = oprot.WriteStructBegin("TPipelineWorkloadGroup"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10569,42 +16082,13 @@ func (p *TTxnParams) Write(oprot thrift.TProtocol) (err error) { goto WriteFieldError } if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 + fieldId = 3 goto WriteFieldError } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 + if err = p.writeField4(oprot); err != nil { + fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10623,12 +16107,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTxnParams) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetNeedTxn() { - if err = oprot.WriteFieldBegin("need_txn", thrift.BOOL, 1); err != nil { +func (p *TPipelineWorkloadGroup) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.NeedTxn); err != nil { + if err := oprot.WriteI64(*p.Id); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10642,12 +16126,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTxnParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetToken() { - if err = oprot.WriteFieldBegin("token", thrift.STRING, 2); err != nil { +func (p *TPipelineWorkloadGroup) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Token); err != nil { + if err := oprot.WriteString(*p.Name); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10661,12 +16145,23 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTxnParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetThriftRpcTimeoutMs() { - if err = oprot.WriteFieldBegin("thrift_rpc_timeout_ms", thrift.I64, 3); err != nil { +func (p *TPipelineWorkloadGroup) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetProperties() { + if err = oprot.WriteFieldBegin("properties", thrift.MAP, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.ThriftRpcTimeoutMs); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + return err + } + for k, v := range p.Properties { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10680,12 +16175,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTxnParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDb() { - if err = oprot.WriteFieldBegin("db", thrift.STRING, 4); err != nil { +func (p *TPipelineWorkloadGroup) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetVersion() { + if err = oprot.WriteFieldBegin("version", thrift.I64, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Db); err != nil { + if err := oprot.WriteI64(*p.Version); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10699,632 +16194,698 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TTxnParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTbl() { - if err = oprot.WriteFieldBegin("tbl", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Tbl); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineWorkloadGroup) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return fmt.Sprintf("TPipelineWorkloadGroup(%+v)", *p) + } -func (p *TTxnParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetUserIp() { - if err = oprot.WriteFieldBegin("user_ip", thrift.STRING, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.UserIp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineWorkloadGroup) DeepEqual(ano *TPipelineWorkloadGroup) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + if !p.Field1DeepEqual(ano.Id) { + return false + } + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Properties) { + return false + } + if !p.Field4DeepEqual(ano.Version) { + return false + } + return true } -func (p *TTxnParams) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnId() { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.TxnId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineWorkloadGroup) Field1DeepEqual(src *int64) bool { + + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + if *p.Id != *src { + return false + } + return true } +func (p *TPipelineWorkloadGroup) Field2DeepEqual(src *string) bool { -func (p *TTxnParams) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentInstanceId() { - if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 8); err != nil { - goto WriteFieldBeginError - } - if err := p.FragmentInstanceId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true } +func (p *TPipelineWorkloadGroup) Field3DeepEqual(src map[string]string) bool { -func (p *TTxnParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetDbId() { - if err = oprot.WriteFieldBegin("db_id", thrift.I64, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DbId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if len(p.Properties) != len(src) { + return false + } + for k, v := range p.Properties { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return true } +func (p *TPipelineWorkloadGroup) Field4DeepEqual(src *int64) bool { -func (p *TTxnParams) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxFilterRatio() { - if err = oprot.WriteFieldBegin("max_filter_ratio", thrift.DOUBLE, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteDouble(*p.MaxFilterRatio); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.Version == src { + return true + } else if p.Version == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + if *p.Version != *src { + return false + } + return true +} + +type TExecPlanFragmentParams struct { + ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` + Fragment *planner.TPlanFragment `thrift:"fragment,2,optional" frugal:"2,optional,planner.TPlanFragment" json:"fragment,omitempty"` + DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,3,optional" frugal:"3,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` + Params *TPlanFragmentExecParams `thrift:"params,4,optional" frugal:"4,optional,TPlanFragmentExecParams" json:"params,omitempty"` + Coord *types.TNetworkAddress `thrift:"coord,5,optional" frugal:"5,optional,types.TNetworkAddress" json:"coord,omitempty"` + BackendNum *int32 `thrift:"backend_num,6,optional" frugal:"6,optional,i32" json:"backend_num,omitempty"` + QueryGlobals *TQueryGlobals `thrift:"query_globals,7,optional" frugal:"7,optional,TQueryGlobals" json:"query_globals,omitempty"` + QueryOptions *TQueryOptions `thrift:"query_options,8,optional" frugal:"8,optional,TQueryOptions" json:"query_options,omitempty"` + IsReportSuccess *bool `thrift:"is_report_success,9,optional" frugal:"9,optional,bool" json:"is_report_success,omitempty"` + ResourceInfo *types.TResourceInfo `thrift:"resource_info,10,optional" frugal:"10,optional,types.TResourceInfo" json:"resource_info,omitempty"` + ImportLabel *string `thrift:"import_label,11,optional" frugal:"11,optional,string" json:"import_label,omitempty"` + DbName *string `thrift:"db_name,12,optional" frugal:"12,optional,string" json:"db_name,omitempty"` + LoadJobId *int64 `thrift:"load_job_id,13,optional" frugal:"13,optional,i64" json:"load_job_id,omitempty"` + LoadErrorHubInfo *TLoadErrorHubInfo `thrift:"load_error_hub_info,14,optional" frugal:"14,optional,TLoadErrorHubInfo" json:"load_error_hub_info,omitempty"` + FragmentNumOnHost *int32 `thrift:"fragment_num_on_host,15,optional" frugal:"15,optional,i32" json:"fragment_num_on_host,omitempty"` + IsSimplifiedParam bool `thrift:"is_simplified_param,16,optional" frugal:"16,optional,bool" json:"is_simplified_param,omitempty"` + TxnConf *TTxnParams `thrift:"txn_conf,17,optional" frugal:"17,optional,TTxnParams" json:"txn_conf,omitempty"` + BackendId *int64 `thrift:"backend_id,18,optional" frugal:"18,optional,i64" json:"backend_id,omitempty"` + GlobalDict *TGlobalDict `thrift:"global_dict,19,optional" frugal:"19,optional,TGlobalDict" json:"global_dict,omitempty"` + NeedWaitExecutionTrigger bool `thrift:"need_wait_execution_trigger,20,optional" frugal:"20,optional,bool" json:"need_wait_execution_trigger,omitempty"` + BuildHashTableForBroadcastJoin bool `thrift:"build_hash_table_for_broadcast_join,21,optional" frugal:"21,optional,bool" json:"build_hash_table_for_broadcast_join,omitempty"` + InstancesSharingHashTable []*types.TUniqueId `thrift:"instances_sharing_hash_table,22,optional" frugal:"22,optional,list" json:"instances_sharing_hash_table,omitempty"` + TableName *string `thrift:"table_name,23,optional" frugal:"23,optional,string" json:"table_name,omitempty"` + FileScanParams map[types.TPlanNodeId]*plannodes.TFileScanRangeParams `thrift:"file_scan_params,24,optional" frugal:"24,optional,map" json:"file_scan_params,omitempty"` + WalId *int64 `thrift:"wal_id,25,optional" frugal:"25,optional,i64" json:"wal_id,omitempty"` + LoadStreamPerNode *int32 `thrift:"load_stream_per_node,26,optional" frugal:"26,optional,i32" json:"load_stream_per_node,omitempty"` + TotalLoadStreams *int32 `thrift:"total_load_streams,27,optional" frugal:"27,optional,i32" json:"total_load_streams,omitempty"` + NumLocalSink *int32 `thrift:"num_local_sink,28,optional" frugal:"28,optional,i32" json:"num_local_sink,omitempty"` + ContentLength *int64 `thrift:"content_length,29,optional" frugal:"29,optional,i64" json:"content_length,omitempty"` + WorkloadGroups []*TPipelineWorkloadGroup `thrift:"workload_groups,30,optional" frugal:"30,optional,list" json:"workload_groups,omitempty"` + IsNereids bool `thrift:"is_nereids,31,optional" frugal:"31,optional,bool" json:"is_nereids,omitempty"` + CurrentConnectFe *types.TNetworkAddress `thrift:"current_connect_fe,32,optional" frugal:"32,optional,types.TNetworkAddress" json:"current_connect_fe,omitempty"` + IsMowTable *bool `thrift:"is_mow_table,1000,optional" frugal:"1000,optional,bool" json:"is_mow_table,omitempty"` +} + +func NewTExecPlanFragmentParams() *TExecPlanFragmentParams { + return &TExecPlanFragmentParams{ + + IsSimplifiedParam: false, + NeedWaitExecutionTrigger: false, + BuildHashTableForBroadcastJoin: false, + IsNereids: true, + } +} + +func (p *TExecPlanFragmentParams) InitDefault() { + p.IsSimplifiedParam = false + p.NeedWaitExecutionTrigger = false + p.BuildHashTableForBroadcastJoin = false + p.IsNereids = true +} + +func (p *TExecPlanFragmentParams) GetProtocolVersion() (v PaloInternalServiceVersion) { + return p.ProtocolVersion +} + +var TExecPlanFragmentParams_Fragment_DEFAULT *planner.TPlanFragment + +func (p *TExecPlanFragmentParams) GetFragment() (v *planner.TPlanFragment) { + if !p.IsSetFragment() { + return TExecPlanFragmentParams_Fragment_DEFAULT + } + return p.Fragment } -func (p *TTxnParams) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetEnablePipelineTxnLoad() { - if err = oprot.WriteFieldBegin("enable_pipeline_txn_load", thrift.BOOL, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.EnablePipelineTxnLoad); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TExecPlanFragmentParams_DescTbl_DEFAULT *descriptors.TDescriptorTable + +func (p *TExecPlanFragmentParams) GetDescTbl() (v *descriptors.TDescriptorTable) { + if !p.IsSetDescTbl() { + return TExecPlanFragmentParams_DescTbl_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) + return p.DescTbl } -func (p *TTxnParams) String() string { - if p == nil { - return "" +var TExecPlanFragmentParams_Params_DEFAULT *TPlanFragmentExecParams + +func (p *TExecPlanFragmentParams) GetParams() (v *TPlanFragmentExecParams) { + if !p.IsSetParams() { + return TExecPlanFragmentParams_Params_DEFAULT } - return fmt.Sprintf("TTxnParams(%+v)", *p) + return p.Params } -func (p *TTxnParams) DeepEqual(ano *TTxnParams) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.NeedTxn) { - return false - } - if !p.Field2DeepEqual(ano.Token) { - return false - } - if !p.Field3DeepEqual(ano.ThriftRpcTimeoutMs) { - return false - } - if !p.Field4DeepEqual(ano.Db) { - return false +var TExecPlanFragmentParams_Coord_DEFAULT *types.TNetworkAddress + +func (p *TExecPlanFragmentParams) GetCoord() (v *types.TNetworkAddress) { + if !p.IsSetCoord() { + return TExecPlanFragmentParams_Coord_DEFAULT } - if !p.Field5DeepEqual(ano.Tbl) { - return false + return p.Coord +} + +var TExecPlanFragmentParams_BackendNum_DEFAULT int32 + +func (p *TExecPlanFragmentParams) GetBackendNum() (v int32) { + if !p.IsSetBackendNum() { + return TExecPlanFragmentParams_BackendNum_DEFAULT } - if !p.Field6DeepEqual(ano.UserIp) { - return false + return *p.BackendNum +} + +var TExecPlanFragmentParams_QueryGlobals_DEFAULT *TQueryGlobals + +func (p *TExecPlanFragmentParams) GetQueryGlobals() (v *TQueryGlobals) { + if !p.IsSetQueryGlobals() { + return TExecPlanFragmentParams_QueryGlobals_DEFAULT } - if !p.Field7DeepEqual(ano.TxnId) { - return false + return p.QueryGlobals +} + +var TExecPlanFragmentParams_QueryOptions_DEFAULT *TQueryOptions + +func (p *TExecPlanFragmentParams) GetQueryOptions() (v *TQueryOptions) { + if !p.IsSetQueryOptions() { + return TExecPlanFragmentParams_QueryOptions_DEFAULT } - if !p.Field8DeepEqual(ano.FragmentInstanceId) { - return false + return p.QueryOptions +} + +var TExecPlanFragmentParams_IsReportSuccess_DEFAULT bool + +func (p *TExecPlanFragmentParams) GetIsReportSuccess() (v bool) { + if !p.IsSetIsReportSuccess() { + return TExecPlanFragmentParams_IsReportSuccess_DEFAULT } - if !p.Field9DeepEqual(ano.DbId) { - return false + return *p.IsReportSuccess +} + +var TExecPlanFragmentParams_ResourceInfo_DEFAULT *types.TResourceInfo + +func (p *TExecPlanFragmentParams) GetResourceInfo() (v *types.TResourceInfo) { + if !p.IsSetResourceInfo() { + return TExecPlanFragmentParams_ResourceInfo_DEFAULT } - if !p.Field10DeepEqual(ano.MaxFilterRatio) { - return false + return p.ResourceInfo +} + +var TExecPlanFragmentParams_ImportLabel_DEFAULT string + +func (p *TExecPlanFragmentParams) GetImportLabel() (v string) { + if !p.IsSetImportLabel() { + return TExecPlanFragmentParams_ImportLabel_DEFAULT } - if !p.Field11DeepEqual(ano.EnablePipelineTxnLoad) { - return false + return *p.ImportLabel +} + +var TExecPlanFragmentParams_DbName_DEFAULT string + +func (p *TExecPlanFragmentParams) GetDbName() (v string) { + if !p.IsSetDbName() { + return TExecPlanFragmentParams_DbName_DEFAULT } - return true + return *p.DbName } -func (p *TTxnParams) Field1DeepEqual(src *bool) bool { +var TExecPlanFragmentParams_LoadJobId_DEFAULT int64 - if p.NeedTxn == src { - return true - } else if p.NeedTxn == nil || src == nil { - return false +func (p *TExecPlanFragmentParams) GetLoadJobId() (v int64) { + if !p.IsSetLoadJobId() { + return TExecPlanFragmentParams_LoadJobId_DEFAULT } - if *p.NeedTxn != *src { - return false + return *p.LoadJobId +} + +var TExecPlanFragmentParams_LoadErrorHubInfo_DEFAULT *TLoadErrorHubInfo + +func (p *TExecPlanFragmentParams) GetLoadErrorHubInfo() (v *TLoadErrorHubInfo) { + if !p.IsSetLoadErrorHubInfo() { + return TExecPlanFragmentParams_LoadErrorHubInfo_DEFAULT } - return true + return p.LoadErrorHubInfo } -func (p *TTxnParams) Field2DeepEqual(src *string) bool { - if p.Token == src { - return true - } else if p.Token == nil || src == nil { - return false +var TExecPlanFragmentParams_FragmentNumOnHost_DEFAULT int32 + +func (p *TExecPlanFragmentParams) GetFragmentNumOnHost() (v int32) { + if !p.IsSetFragmentNumOnHost() { + return TExecPlanFragmentParams_FragmentNumOnHost_DEFAULT } - if strings.Compare(*p.Token, *src) != 0 { - return false + return *p.FragmentNumOnHost +} + +var TExecPlanFragmentParams_IsSimplifiedParam_DEFAULT bool = false + +func (p *TExecPlanFragmentParams) GetIsSimplifiedParam() (v bool) { + if !p.IsSetIsSimplifiedParam() { + return TExecPlanFragmentParams_IsSimplifiedParam_DEFAULT } - return true + return p.IsSimplifiedParam } -func (p *TTxnParams) Field3DeepEqual(src *int64) bool { - if p.ThriftRpcTimeoutMs == src { - return true - } else if p.ThriftRpcTimeoutMs == nil || src == nil { - return false +var TExecPlanFragmentParams_TxnConf_DEFAULT *TTxnParams + +func (p *TExecPlanFragmentParams) GetTxnConf() (v *TTxnParams) { + if !p.IsSetTxnConf() { + return TExecPlanFragmentParams_TxnConf_DEFAULT } - if *p.ThriftRpcTimeoutMs != *src { - return false + return p.TxnConf +} + +var TExecPlanFragmentParams_BackendId_DEFAULT int64 + +func (p *TExecPlanFragmentParams) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TExecPlanFragmentParams_BackendId_DEFAULT } - return true + return *p.BackendId } -func (p *TTxnParams) Field4DeepEqual(src *string) bool { - if p.Db == src { - return true - } else if p.Db == nil || src == nil { - return false +var TExecPlanFragmentParams_GlobalDict_DEFAULT *TGlobalDict + +func (p *TExecPlanFragmentParams) GetGlobalDict() (v *TGlobalDict) { + if !p.IsSetGlobalDict() { + return TExecPlanFragmentParams_GlobalDict_DEFAULT } - if strings.Compare(*p.Db, *src) != 0 { - return false + return p.GlobalDict +} + +var TExecPlanFragmentParams_NeedWaitExecutionTrigger_DEFAULT bool = false + +func (p *TExecPlanFragmentParams) GetNeedWaitExecutionTrigger() (v bool) { + if !p.IsSetNeedWaitExecutionTrigger() { + return TExecPlanFragmentParams_NeedWaitExecutionTrigger_DEFAULT } - return true + return p.NeedWaitExecutionTrigger } -func (p *TTxnParams) Field5DeepEqual(src *string) bool { - if p.Tbl == src { - return true - } else if p.Tbl == nil || src == nil { - return false +var TExecPlanFragmentParams_BuildHashTableForBroadcastJoin_DEFAULT bool = false + +func (p *TExecPlanFragmentParams) GetBuildHashTableForBroadcastJoin() (v bool) { + if !p.IsSetBuildHashTableForBroadcastJoin() { + return TExecPlanFragmentParams_BuildHashTableForBroadcastJoin_DEFAULT } - if strings.Compare(*p.Tbl, *src) != 0 { - return false + return p.BuildHashTableForBroadcastJoin +} + +var TExecPlanFragmentParams_InstancesSharingHashTable_DEFAULT []*types.TUniqueId + +func (p *TExecPlanFragmentParams) GetInstancesSharingHashTable() (v []*types.TUniqueId) { + if !p.IsSetInstancesSharingHashTable() { + return TExecPlanFragmentParams_InstancesSharingHashTable_DEFAULT } - return true + return p.InstancesSharingHashTable } -func (p *TTxnParams) Field6DeepEqual(src *string) bool { - if p.UserIp == src { - return true - } else if p.UserIp == nil || src == nil { - return false +var TExecPlanFragmentParams_TableName_DEFAULT string + +func (p *TExecPlanFragmentParams) GetTableName() (v string) { + if !p.IsSetTableName() { + return TExecPlanFragmentParams_TableName_DEFAULT } - if strings.Compare(*p.UserIp, *src) != 0 { - return false + return *p.TableName +} + +var TExecPlanFragmentParams_FileScanParams_DEFAULT map[types.TPlanNodeId]*plannodes.TFileScanRangeParams + +func (p *TExecPlanFragmentParams) GetFileScanParams() (v map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + if !p.IsSetFileScanParams() { + return TExecPlanFragmentParams_FileScanParams_DEFAULT } - return true + return p.FileScanParams } -func (p *TTxnParams) Field7DeepEqual(src *int64) bool { - if p.TxnId == src { - return true - } else if p.TxnId == nil || src == nil { - return false - } - if *p.TxnId != *src { - return false +var TExecPlanFragmentParams_WalId_DEFAULT int64 + +func (p *TExecPlanFragmentParams) GetWalId() (v int64) { + if !p.IsSetWalId() { + return TExecPlanFragmentParams_WalId_DEFAULT } - return true + return *p.WalId } -func (p *TTxnParams) Field8DeepEqual(src *types.TUniqueId) bool { - if !p.FragmentInstanceId.DeepEqual(src) { - return false +var TExecPlanFragmentParams_LoadStreamPerNode_DEFAULT int32 + +func (p *TExecPlanFragmentParams) GetLoadStreamPerNode() (v int32) { + if !p.IsSetLoadStreamPerNode() { + return TExecPlanFragmentParams_LoadStreamPerNode_DEFAULT } - return true + return *p.LoadStreamPerNode } -func (p *TTxnParams) Field9DeepEqual(src *int64) bool { - if p.DbId == src { - return true - } else if p.DbId == nil || src == nil { - return false - } - if *p.DbId != *src { - return false +var TExecPlanFragmentParams_TotalLoadStreams_DEFAULT int32 + +func (p *TExecPlanFragmentParams) GetTotalLoadStreams() (v int32) { + if !p.IsSetTotalLoadStreams() { + return TExecPlanFragmentParams_TotalLoadStreams_DEFAULT } - return true + return *p.TotalLoadStreams } -func (p *TTxnParams) Field10DeepEqual(src *float64) bool { - if p.MaxFilterRatio == src { - return true - } else if p.MaxFilterRatio == nil || src == nil { - return false - } - if *p.MaxFilterRatio != *src { - return false +var TExecPlanFragmentParams_NumLocalSink_DEFAULT int32 + +func (p *TExecPlanFragmentParams) GetNumLocalSink() (v int32) { + if !p.IsSetNumLocalSink() { + return TExecPlanFragmentParams_NumLocalSink_DEFAULT } - return true + return *p.NumLocalSink } -func (p *TTxnParams) Field11DeepEqual(src bool) bool { - if p.EnablePipelineTxnLoad != src { - return false +var TExecPlanFragmentParams_ContentLength_DEFAULT int64 + +func (p *TExecPlanFragmentParams) GetContentLength() (v int64) { + if !p.IsSetContentLength() { + return TExecPlanFragmentParams_ContentLength_DEFAULT } - return true + return *p.ContentLength } -type TColumnDict struct { - Type *types.TPrimitiveType `thrift:"type,1,optional" frugal:"1,optional,TPrimitiveType" json:"type,omitempty"` - StrDict []string `thrift:"str_dict,2" frugal:"2,default,list" json:"str_dict"` -} +var TExecPlanFragmentParams_WorkloadGroups_DEFAULT []*TPipelineWorkloadGroup -func NewTColumnDict() *TColumnDict { - return &TColumnDict{} +func (p *TExecPlanFragmentParams) GetWorkloadGroups() (v []*TPipelineWorkloadGroup) { + if !p.IsSetWorkloadGroups() { + return TExecPlanFragmentParams_WorkloadGroups_DEFAULT + } + return p.WorkloadGroups } -func (p *TColumnDict) InitDefault() { - *p = TColumnDict{} +var TExecPlanFragmentParams_IsNereids_DEFAULT bool = true + +func (p *TExecPlanFragmentParams) GetIsNereids() (v bool) { + if !p.IsSetIsNereids() { + return TExecPlanFragmentParams_IsNereids_DEFAULT + } + return p.IsNereids } -var TColumnDict_Type_DEFAULT types.TPrimitiveType +var TExecPlanFragmentParams_CurrentConnectFe_DEFAULT *types.TNetworkAddress -func (p *TColumnDict) GetType() (v types.TPrimitiveType) { - if !p.IsSetType() { - return TColumnDict_Type_DEFAULT +func (p *TExecPlanFragmentParams) GetCurrentConnectFe() (v *types.TNetworkAddress) { + if !p.IsSetCurrentConnectFe() { + return TExecPlanFragmentParams_CurrentConnectFe_DEFAULT } - return *p.Type + return p.CurrentConnectFe } -func (p *TColumnDict) GetStrDict() (v []string) { - return p.StrDict +var TExecPlanFragmentParams_IsMowTable_DEFAULT bool + +func (p *TExecPlanFragmentParams) GetIsMowTable() (v bool) { + if !p.IsSetIsMowTable() { + return TExecPlanFragmentParams_IsMowTable_DEFAULT + } + return *p.IsMowTable } -func (p *TColumnDict) SetType(val *types.TPrimitiveType) { - p.Type = val +func (p *TExecPlanFragmentParams) SetProtocolVersion(val PaloInternalServiceVersion) { + p.ProtocolVersion = val } -func (p *TColumnDict) SetStrDict(val []string) { - p.StrDict = val +func (p *TExecPlanFragmentParams) SetFragment(val *planner.TPlanFragment) { + p.Fragment = val } - -var fieldIDToName_TColumnDict = map[int16]string{ - 1: "type", - 2: "str_dict", +func (p *TExecPlanFragmentParams) SetDescTbl(val *descriptors.TDescriptorTable) { + p.DescTbl = val } - -func (p *TColumnDict) IsSetType() bool { - return p.Type != nil +func (p *TExecPlanFragmentParams) SetParams(val *TPlanFragmentExecParams) { + p.Params = val +} +func (p *TExecPlanFragmentParams) SetCoord(val *types.TNetworkAddress) { + p.Coord = val +} +func (p *TExecPlanFragmentParams) SetBackendNum(val *int32) { + p.BackendNum = val +} +func (p *TExecPlanFragmentParams) SetQueryGlobals(val *TQueryGlobals) { + p.QueryGlobals = val +} +func (p *TExecPlanFragmentParams) SetQueryOptions(val *TQueryOptions) { + p.QueryOptions = val +} +func (p *TExecPlanFragmentParams) SetIsReportSuccess(val *bool) { + p.IsReportSuccess = val +} +func (p *TExecPlanFragmentParams) SetResourceInfo(val *types.TResourceInfo) { + p.ResourceInfo = val +} +func (p *TExecPlanFragmentParams) SetImportLabel(val *string) { + p.ImportLabel = val +} +func (p *TExecPlanFragmentParams) SetDbName(val *string) { + p.DbName = val +} +func (p *TExecPlanFragmentParams) SetLoadJobId(val *int64) { + p.LoadJobId = val +} +func (p *TExecPlanFragmentParams) SetLoadErrorHubInfo(val *TLoadErrorHubInfo) { + p.LoadErrorHubInfo = val +} +func (p *TExecPlanFragmentParams) SetFragmentNumOnHost(val *int32) { + p.FragmentNumOnHost = val +} +func (p *TExecPlanFragmentParams) SetIsSimplifiedParam(val bool) { + p.IsSimplifiedParam = val +} +func (p *TExecPlanFragmentParams) SetTxnConf(val *TTxnParams) { + p.TxnConf = val +} +func (p *TExecPlanFragmentParams) SetBackendId(val *int64) { + p.BackendId = val +} +func (p *TExecPlanFragmentParams) SetGlobalDict(val *TGlobalDict) { + p.GlobalDict = val +} +func (p *TExecPlanFragmentParams) SetNeedWaitExecutionTrigger(val bool) { + p.NeedWaitExecutionTrigger = val +} +func (p *TExecPlanFragmentParams) SetBuildHashTableForBroadcastJoin(val bool) { + p.BuildHashTableForBroadcastJoin = val +} +func (p *TExecPlanFragmentParams) SetInstancesSharingHashTable(val []*types.TUniqueId) { + p.InstancesSharingHashTable = val +} +func (p *TExecPlanFragmentParams) SetTableName(val *string) { + p.TableName = val +} +func (p *TExecPlanFragmentParams) SetFileScanParams(val map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + p.FileScanParams = val +} +func (p *TExecPlanFragmentParams) SetWalId(val *int64) { + p.WalId = val +} +func (p *TExecPlanFragmentParams) SetLoadStreamPerNode(val *int32) { + p.LoadStreamPerNode = val +} +func (p *TExecPlanFragmentParams) SetTotalLoadStreams(val *int32) { + p.TotalLoadStreams = val +} +func (p *TExecPlanFragmentParams) SetNumLocalSink(val *int32) { + p.NumLocalSink = val +} +func (p *TExecPlanFragmentParams) SetContentLength(val *int64) { + p.ContentLength = val +} +func (p *TExecPlanFragmentParams) SetWorkloadGroups(val []*TPipelineWorkloadGroup) { + p.WorkloadGroups = val +} +func (p *TExecPlanFragmentParams) SetIsNereids(val bool) { + p.IsNereids = val +} +func (p *TExecPlanFragmentParams) SetCurrentConnectFe(val *types.TNetworkAddress) { + p.CurrentConnectFe = val +} +func (p *TExecPlanFragmentParams) SetIsMowTable(val *bool) { + p.IsMowTable = val } -func (p *TColumnDict) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 +var fieldIDToName_TExecPlanFragmentParams = map[int16]string{ + 1: "protocol_version", + 2: "fragment", + 3: "desc_tbl", + 4: "params", + 5: "coord", + 6: "backend_num", + 7: "query_globals", + 8: "query_options", + 9: "is_report_success", + 10: "resource_info", + 11: "import_label", + 12: "db_name", + 13: "load_job_id", + 14: "load_error_hub_info", + 15: "fragment_num_on_host", + 16: "is_simplified_param", + 17: "txn_conf", + 18: "backend_id", + 19: "global_dict", + 20: "need_wait_execution_trigger", + 21: "build_hash_table_for_broadcast_join", + 22: "instances_sharing_hash_table", + 23: "table_name", + 24: "file_scan_params", + 25: "wal_id", + 26: "load_stream_per_node", + 27: "total_load_streams", + 28: "num_local_sink", + 29: "content_length", + 30: "workload_groups", + 31: "is_nereids", + 32: "current_connect_fe", + 1000: "is_mow_table", +} - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } +func (p *TExecPlanFragmentParams) IsSetFragment() bool { + return p.Fragment != nil +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } +func (p *TExecPlanFragmentParams) IsSetDescTbl() bool { + return p.DescTbl != nil +} - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +func (p *TExecPlanFragmentParams) IsSetParams() bool { + return p.Params != nil +} - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } +func (p *TExecPlanFragmentParams) IsSetCoord() bool { + return p.Coord != nil +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TColumnDict[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +func (p *TExecPlanFragmentParams) IsSetBackendNum() bool { + return p.BackendNum != nil +} -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +func (p *TExecPlanFragmentParams) IsSetQueryGlobals() bool { + return p.QueryGlobals != nil } -func (p *TColumnDict) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := types.TPrimitiveType(v) - p.Type = &tmp - } - return nil +func (p *TExecPlanFragmentParams) IsSetQueryOptions() bool { + return p.QueryOptions != nil } -func (p *TColumnDict) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.StrDict = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } +func (p *TExecPlanFragmentParams) IsSetIsReportSuccess() bool { + return p.IsReportSuccess != nil +} - p.StrDict = append(p.StrDict, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil +func (p *TExecPlanFragmentParams) IsSetResourceInfo() bool { + return p.ResourceInfo != nil } -func (p *TColumnDict) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TColumnDict"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } +func (p *TExecPlanFragmentParams) IsSetImportLabel() bool { + return p.ImportLabel != nil +} - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +func (p *TExecPlanFragmentParams) IsSetDbName() bool { + return p.DbName != nil } -func (p *TColumnDict) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetType() { - if err = oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.Type)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +func (p *TExecPlanFragmentParams) IsSetLoadJobId() bool { + return p.LoadJobId != nil } -func (p *TColumnDict) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("str_dict", thrift.LIST, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.StrDict)); err != nil { - return err - } - for _, v := range p.StrDict { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +func (p *TExecPlanFragmentParams) IsSetLoadErrorHubInfo() bool { + return p.LoadErrorHubInfo != nil } -func (p *TColumnDict) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TColumnDict(%+v)", *p) +func (p *TExecPlanFragmentParams) IsSetFragmentNumOnHost() bool { + return p.FragmentNumOnHost != nil } -func (p *TColumnDict) DeepEqual(ano *TColumnDict) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Type) { - return false - } - if !p.Field2DeepEqual(ano.StrDict) { - return false - } - return true +func (p *TExecPlanFragmentParams) IsSetIsSimplifiedParam() bool { + return p.IsSimplifiedParam != TExecPlanFragmentParams_IsSimplifiedParam_DEFAULT } -func (p *TColumnDict) Field1DeepEqual(src *types.TPrimitiveType) bool { +func (p *TExecPlanFragmentParams) IsSetTxnConf() bool { + return p.TxnConf != nil +} - if p.Type == src { - return true - } else if p.Type == nil || src == nil { - return false - } - if *p.Type != *src { - return false - } - return true +func (p *TExecPlanFragmentParams) IsSetBackendId() bool { + return p.BackendId != nil } -func (p *TColumnDict) Field2DeepEqual(src []string) bool { - if len(p.StrDict) != len(src) { - return false - } - for i, v := range p.StrDict { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } - return true +func (p *TExecPlanFragmentParams) IsSetGlobalDict() bool { + return p.GlobalDict != nil } -type TGlobalDict struct { - Dicts map[int32]*TColumnDict `thrift:"dicts,1,optional" frugal:"1,optional,map" json:"dicts,omitempty"` - SlotDicts map[int32]int32 `thrift:"slot_dicts,2,optional" frugal:"2,optional,map" json:"slot_dicts,omitempty"` +func (p *TExecPlanFragmentParams) IsSetNeedWaitExecutionTrigger() bool { + return p.NeedWaitExecutionTrigger != TExecPlanFragmentParams_NeedWaitExecutionTrigger_DEFAULT } -func NewTGlobalDict() *TGlobalDict { - return &TGlobalDict{} +func (p *TExecPlanFragmentParams) IsSetBuildHashTableForBroadcastJoin() bool { + return p.BuildHashTableForBroadcastJoin != TExecPlanFragmentParams_BuildHashTableForBroadcastJoin_DEFAULT } -func (p *TGlobalDict) InitDefault() { - *p = TGlobalDict{} +func (p *TExecPlanFragmentParams) IsSetInstancesSharingHashTable() bool { + return p.InstancesSharingHashTable != nil } -var TGlobalDict_Dicts_DEFAULT map[int32]*TColumnDict +func (p *TExecPlanFragmentParams) IsSetTableName() bool { + return p.TableName != nil +} -func (p *TGlobalDict) GetDicts() (v map[int32]*TColumnDict) { - if !p.IsSetDicts() { - return TGlobalDict_Dicts_DEFAULT - } - return p.Dicts +func (p *TExecPlanFragmentParams) IsSetFileScanParams() bool { + return p.FileScanParams != nil } -var TGlobalDict_SlotDicts_DEFAULT map[int32]int32 +func (p *TExecPlanFragmentParams) IsSetWalId() bool { + return p.WalId != nil +} -func (p *TGlobalDict) GetSlotDicts() (v map[int32]int32) { - if !p.IsSetSlotDicts() { - return TGlobalDict_SlotDicts_DEFAULT - } - return p.SlotDicts +func (p *TExecPlanFragmentParams) IsSetLoadStreamPerNode() bool { + return p.LoadStreamPerNode != nil } -func (p *TGlobalDict) SetDicts(val map[int32]*TColumnDict) { - p.Dicts = val + +func (p *TExecPlanFragmentParams) IsSetTotalLoadStreams() bool { + return p.TotalLoadStreams != nil } -func (p *TGlobalDict) SetSlotDicts(val map[int32]int32) { - p.SlotDicts = val + +func (p *TExecPlanFragmentParams) IsSetNumLocalSink() bool { + return p.NumLocalSink != nil } -var fieldIDToName_TGlobalDict = map[int16]string{ - 1: "dicts", - 2: "slot_dicts", +func (p *TExecPlanFragmentParams) IsSetContentLength() bool { + return p.ContentLength != nil } -func (p *TGlobalDict) IsSetDicts() bool { - return p.Dicts != nil +func (p *TExecPlanFragmentParams) IsSetWorkloadGroups() bool { + return p.WorkloadGroups != nil } -func (p *TGlobalDict) IsSetSlotDicts() bool { - return p.SlotDicts != nil +func (p *TExecPlanFragmentParams) IsSetIsNereids() bool { + return p.IsNereids != TExecPlanFragmentParams_IsNereids_DEFAULT } -func (p *TGlobalDict) Read(iprot thrift.TProtocol) (err error) { +func (p *TExecPlanFragmentParams) IsSetCurrentConnectFe() bool { + return p.CurrentConnectFe != nil +} + +func (p *TExecPlanFragmentParams) IsSetIsMowTable() bool { + return p.IsMowTable != nil +} + +func (p *TExecPlanFragmentParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetProtocolVersion bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -11341,1877 +16902,2075 @@ func (p *TGlobalDict) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I32 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 2: + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I64 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I32 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.I64 { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.LIST { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.STRING { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 24: if fieldTypeId == thrift.MAP { - if err = p.ReadField2(iprot); err != nil { + if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 25: + if fieldTypeId == thrift.I64 { + if err = p.ReadField25(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - default: - if err = iprot.Skip(fieldTypeId); err != nil { + case 26: + if fieldTypeId == thrift.I32 { + if err = p.ReadField26(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGlobalDict[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TGlobalDict) ReadField1(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.Dicts = make(map[int32]*TColumnDict, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - _val := NewTColumnDict() - if err := _val.Read(iprot); err != nil { - return err - } - - p.Dicts[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TGlobalDict) ReadField2(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.SlotDicts = make(map[int32]int32, size) - for i := 0; i < size; i++ { - var _key int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - var _val int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _val = v - } - - p.SlotDicts[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TGlobalDict) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TGlobalDict"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *TGlobalDict) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDicts() { - if err = oprot.WriteFieldBegin("dicts", thrift.MAP, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.Dicts)); err != nil { - return err - } - for k, v := range p.Dicts { - - if err := oprot.WriteI32(k); err != nil { - return err + case 27: + if fieldTypeId == thrift.I32 { + if err = p.ReadField27(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 28: + if fieldTypeId == thrift.I32 { + if err = p.ReadField28(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 29: + if fieldTypeId == thrift.I64 { + if err = p.ReadField29(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 30: + if fieldTypeId == thrift.LIST { + if err = p.ReadField30(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - - if err := v.Write(oprot); err != nil { - return err + case 31: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField31(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TGlobalDict) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetSlotDicts() { - if err = oprot.WriteFieldBegin("slot_dicts", thrift.MAP, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.SlotDicts)); err != nil { - return err - } - for k, v := range p.SlotDicts { - - if err := oprot.WriteI32(k); err != nil { - return err + case 32: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField32(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - - if err := oprot.WriteI32(v); err != nil { - return err + case 1000: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TGlobalDict) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TGlobalDict(%+v)", *p) -} - -func (p *TGlobalDict) DeepEqual(ano *TGlobalDict) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Dicts) { - return false - } - if !p.Field2DeepEqual(ano.SlotDicts) { - return false - } - return true -} - -func (p *TGlobalDict) Field1DeepEqual(src map[int32]*TColumnDict) bool { - - if len(p.Dicts) != len(src) { - return false - } - for k, v := range p.Dicts { - _src := src[k] - if !v.DeepEqual(_src) { - return false - } - } - return true -} -func (p *TGlobalDict) Field2DeepEqual(src map[int32]int32) bool { - - if len(p.SlotDicts) != len(src) { - return false - } - for k, v := range p.SlotDicts { - _src := src[k] - if v != _src { - return false + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } - return true -} - -type TExecPlanFragmentParams struct { - ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` - Fragment *planner.TPlanFragment `thrift:"fragment,2,optional" frugal:"2,optional,planner.TPlanFragment" json:"fragment,omitempty"` - DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,3,optional" frugal:"3,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` - Params *TPlanFragmentExecParams `thrift:"params,4,optional" frugal:"4,optional,TPlanFragmentExecParams" json:"params,omitempty"` - Coord *types.TNetworkAddress `thrift:"coord,5,optional" frugal:"5,optional,types.TNetworkAddress" json:"coord,omitempty"` - BackendNum *int32 `thrift:"backend_num,6,optional" frugal:"6,optional,i32" json:"backend_num,omitempty"` - QueryGlobals *TQueryGlobals `thrift:"query_globals,7,optional" frugal:"7,optional,TQueryGlobals" json:"query_globals,omitempty"` - QueryOptions *TQueryOptions `thrift:"query_options,8,optional" frugal:"8,optional,TQueryOptions" json:"query_options,omitempty"` - IsReportSuccess *bool `thrift:"is_report_success,9,optional" frugal:"9,optional,bool" json:"is_report_success,omitempty"` - ResourceInfo *types.TResourceInfo `thrift:"resource_info,10,optional" frugal:"10,optional,types.TResourceInfo" json:"resource_info,omitempty"` - ImportLabel *string `thrift:"import_label,11,optional" frugal:"11,optional,string" json:"import_label,omitempty"` - DbName *string `thrift:"db_name,12,optional" frugal:"12,optional,string" json:"db_name,omitempty"` - LoadJobId *int64 `thrift:"load_job_id,13,optional" frugal:"13,optional,i64" json:"load_job_id,omitempty"` - LoadErrorHubInfo *TLoadErrorHubInfo `thrift:"load_error_hub_info,14,optional" frugal:"14,optional,TLoadErrorHubInfo" json:"load_error_hub_info,omitempty"` - FragmentNumOnHost *int32 `thrift:"fragment_num_on_host,15,optional" frugal:"15,optional,i32" json:"fragment_num_on_host,omitempty"` - IsSimplifiedParam bool `thrift:"is_simplified_param,16,optional" frugal:"16,optional,bool" json:"is_simplified_param,omitempty"` - TxnConf *TTxnParams `thrift:"txn_conf,17,optional" frugal:"17,optional,TTxnParams" json:"txn_conf,omitempty"` - BackendId *int64 `thrift:"backend_id,18,optional" frugal:"18,optional,i64" json:"backend_id,omitempty"` - GlobalDict *TGlobalDict `thrift:"global_dict,19,optional" frugal:"19,optional,TGlobalDict" json:"global_dict,omitempty"` - NeedWaitExecutionTrigger bool `thrift:"need_wait_execution_trigger,20,optional" frugal:"20,optional,bool" json:"need_wait_execution_trigger,omitempty"` - BuildHashTableForBroadcastJoin bool `thrift:"build_hash_table_for_broadcast_join,21,optional" frugal:"21,optional,bool" json:"build_hash_table_for_broadcast_join,omitempty"` - InstancesSharingHashTable []*types.TUniqueId `thrift:"instances_sharing_hash_table,22,optional" frugal:"22,optional,list" json:"instances_sharing_hash_table,omitempty"` - TableName *string `thrift:"table_name,23,optional" frugal:"23,optional,string" json:"table_name,omitempty"` - FileScanParams map[types.TPlanNodeId]*plannodes.TFileScanRangeParams `thrift:"file_scan_params,24,optional" frugal:"24,optional,map" json:"file_scan_params,omitempty"` - WalId *int64 `thrift:"wal_id,25,optional" frugal:"25,optional,i64" json:"wal_id,omitempty"` -} - -func NewTExecPlanFragmentParams() *TExecPlanFragmentParams { - return &TExecPlanFragmentParams{ - - IsSimplifiedParam: false, - NeedWaitExecutionTrigger: false, - BuildHashTableForBroadcastJoin: false, - } -} - -func (p *TExecPlanFragmentParams) InitDefault() { - *p = TExecPlanFragmentParams{ - - IsSimplifiedParam: false, - NeedWaitExecutionTrigger: false, - BuildHashTableForBroadcastJoin: false, - } -} - -func (p *TExecPlanFragmentParams) GetProtocolVersion() (v PaloInternalServiceVersion) { - return p.ProtocolVersion -} - -var TExecPlanFragmentParams_Fragment_DEFAULT *planner.TPlanFragment - -func (p *TExecPlanFragmentParams) GetFragment() (v *planner.TPlanFragment) { - if !p.IsSetFragment() { - return TExecPlanFragmentParams_Fragment_DEFAULT - } - return p.Fragment -} - -var TExecPlanFragmentParams_DescTbl_DEFAULT *descriptors.TDescriptorTable - -func (p *TExecPlanFragmentParams) GetDescTbl() (v *descriptors.TDescriptorTable) { - if !p.IsSetDescTbl() { - return TExecPlanFragmentParams_DescTbl_DEFAULT - } - return p.DescTbl -} - -var TExecPlanFragmentParams_Params_DEFAULT *TPlanFragmentExecParams - -func (p *TExecPlanFragmentParams) GetParams() (v *TPlanFragmentExecParams) { - if !p.IsSetParams() { - return TExecPlanFragmentParams_Params_DEFAULT - } - return p.Params -} - -var TExecPlanFragmentParams_Coord_DEFAULT *types.TNetworkAddress - -func (p *TExecPlanFragmentParams) GetCoord() (v *types.TNetworkAddress) { - if !p.IsSetCoord() { - return TExecPlanFragmentParams_Coord_DEFAULT + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return p.Coord -} - -var TExecPlanFragmentParams_BackendNum_DEFAULT int32 -func (p *TExecPlanFragmentParams) GetBackendNum() (v int32) { - if !p.IsSetBackendNum() { - return TExecPlanFragmentParams_BackendNum_DEFAULT + if !issetProtocolVersion { + fieldId = 1 + goto RequiredFieldNotSetError } - return *p.BackendNum -} - -var TExecPlanFragmentParams_QueryGlobals_DEFAULT *TQueryGlobals + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExecPlanFragmentParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -func (p *TExecPlanFragmentParams) GetQueryGlobals() (v *TQueryGlobals) { - if !p.IsSetQueryGlobals() { - return TExecPlanFragmentParams_QueryGlobals_DEFAULT - } - return p.QueryGlobals +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExecPlanFragmentParams[fieldId])) } -var TExecPlanFragmentParams_QueryOptions_DEFAULT *TQueryOptions +func (p *TExecPlanFragmentParams) ReadField1(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) GetQueryOptions() (v *TQueryOptions) { - if !p.IsSetQueryOptions() { - return TExecPlanFragmentParams_QueryOptions_DEFAULT + var _field PaloInternalServiceVersion + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = PaloInternalServiceVersion(v) } - return p.QueryOptions + p.ProtocolVersion = _field + return nil } - -var TExecPlanFragmentParams_IsReportSuccess_DEFAULT bool - -func (p *TExecPlanFragmentParams) GetIsReportSuccess() (v bool) { - if !p.IsSetIsReportSuccess() { - return TExecPlanFragmentParams_IsReportSuccess_DEFAULT +func (p *TExecPlanFragmentParams) ReadField2(iprot thrift.TProtocol) error { + _field := planner.NewTPlanFragment() + if err := _field.Read(iprot); err != nil { + return err } - return *p.IsReportSuccess + p.Fragment = _field + return nil } - -var TExecPlanFragmentParams_ResourceInfo_DEFAULT *types.TResourceInfo - -func (p *TExecPlanFragmentParams) GetResourceInfo() (v *types.TResourceInfo) { - if !p.IsSetResourceInfo() { - return TExecPlanFragmentParams_ResourceInfo_DEFAULT +func (p *TExecPlanFragmentParams) ReadField3(iprot thrift.TProtocol) error { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { + return err } - return p.ResourceInfo + p.DescTbl = _field + return nil } - -var TExecPlanFragmentParams_ImportLabel_DEFAULT string - -func (p *TExecPlanFragmentParams) GetImportLabel() (v string) { - if !p.IsSetImportLabel() { - return TExecPlanFragmentParams_ImportLabel_DEFAULT +func (p *TExecPlanFragmentParams) ReadField4(iprot thrift.TProtocol) error { + _field := NewTPlanFragmentExecParams() + if err := _field.Read(iprot); err != nil { + return err } - return *p.ImportLabel + p.Params = _field + return nil } - -var TExecPlanFragmentParams_DbName_DEFAULT string - -func (p *TExecPlanFragmentParams) GetDbName() (v string) { - if !p.IsSetDbName() { - return TExecPlanFragmentParams_DbName_DEFAULT +func (p *TExecPlanFragmentParams) ReadField5(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err } - return *p.DbName + p.Coord = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField6(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_LoadJobId_DEFAULT int64 - -func (p *TExecPlanFragmentParams) GetLoadJobId() (v int64) { - if !p.IsSetLoadJobId() { - return TExecPlanFragmentParams_LoadJobId_DEFAULT + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return *p.LoadJobId + p.BackendNum = _field + return nil } - -var TExecPlanFragmentParams_LoadErrorHubInfo_DEFAULT *TLoadErrorHubInfo - -func (p *TExecPlanFragmentParams) GetLoadErrorHubInfo() (v *TLoadErrorHubInfo) { - if !p.IsSetLoadErrorHubInfo() { - return TExecPlanFragmentParams_LoadErrorHubInfo_DEFAULT +func (p *TExecPlanFragmentParams) ReadField7(iprot thrift.TProtocol) error { + _field := NewTQueryGlobals() + if err := _field.Read(iprot); err != nil { + return err } - return p.LoadErrorHubInfo + p.QueryGlobals = _field + return nil } - -var TExecPlanFragmentParams_FragmentNumOnHost_DEFAULT int32 - -func (p *TExecPlanFragmentParams) GetFragmentNumOnHost() (v int32) { - if !p.IsSetFragmentNumOnHost() { - return TExecPlanFragmentParams_FragmentNumOnHost_DEFAULT +func (p *TExecPlanFragmentParams) ReadField8(iprot thrift.TProtocol) error { + _field := NewTQueryOptions() + if err := _field.Read(iprot); err != nil { + return err } - return *p.FragmentNumOnHost + p.QueryOptions = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField9(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_IsSimplifiedParam_DEFAULT bool = false - -func (p *TExecPlanFragmentParams) GetIsSimplifiedParam() (v bool) { - if !p.IsSetIsSimplifiedParam() { - return TExecPlanFragmentParams_IsSimplifiedParam_DEFAULT + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v } - return p.IsSimplifiedParam + p.IsReportSuccess = _field + return nil } - -var TExecPlanFragmentParams_TxnConf_DEFAULT *TTxnParams - -func (p *TExecPlanFragmentParams) GetTxnConf() (v *TTxnParams) { - if !p.IsSetTxnConf() { - return TExecPlanFragmentParams_TxnConf_DEFAULT +func (p *TExecPlanFragmentParams) ReadField10(iprot thrift.TProtocol) error { + _field := types.NewTResourceInfo() + if err := _field.Read(iprot); err != nil { + return err } - return p.TxnConf + p.ResourceInfo = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField11(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_BackendId_DEFAULT int64 - -func (p *TExecPlanFragmentParams) GetBackendId() (v int64) { - if !p.IsSetBackendId() { - return TExecPlanFragmentParams_BackendId_DEFAULT + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return *p.BackendId + p.ImportLabel = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField12(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_GlobalDict_DEFAULT *TGlobalDict - -func (p *TExecPlanFragmentParams) GetGlobalDict() (v *TGlobalDict) { - if !p.IsSetGlobalDict() { - return TExecPlanFragmentParams_GlobalDict_DEFAULT + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return p.GlobalDict + p.DbName = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField13(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_NeedWaitExecutionTrigger_DEFAULT bool = false - -func (p *TExecPlanFragmentParams) GetNeedWaitExecutionTrigger() (v bool) { - if !p.IsSetNeedWaitExecutionTrigger() { - return TExecPlanFragmentParams_NeedWaitExecutionTrigger_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return p.NeedWaitExecutionTrigger + p.LoadJobId = _field + return nil } - -var TExecPlanFragmentParams_BuildHashTableForBroadcastJoin_DEFAULT bool = false - -func (p *TExecPlanFragmentParams) GetBuildHashTableForBroadcastJoin() (v bool) { - if !p.IsSetBuildHashTableForBroadcastJoin() { - return TExecPlanFragmentParams_BuildHashTableForBroadcastJoin_DEFAULT +func (p *TExecPlanFragmentParams) ReadField14(iprot thrift.TProtocol) error { + _field := NewTLoadErrorHubInfo() + if err := _field.Read(iprot); err != nil { + return err } - return p.BuildHashTableForBroadcastJoin + p.LoadErrorHubInfo = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField15(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_InstancesSharingHashTable_DEFAULT []*types.TUniqueId - -func (p *TExecPlanFragmentParams) GetInstancesSharingHashTable() (v []*types.TUniqueId) { - if !p.IsSetInstancesSharingHashTable() { - return TExecPlanFragmentParams_InstancesSharingHashTable_DEFAULT + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return p.InstancesSharingHashTable + p.FragmentNumOnHost = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField16(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_TableName_DEFAULT string - -func (p *TExecPlanFragmentParams) GetTableName() (v string) { - if !p.IsSetTableName() { - return TExecPlanFragmentParams_TableName_DEFAULT + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v } - return *p.TableName + p.IsSimplifiedParam = _field + return nil } - -var TExecPlanFragmentParams_FileScanParams_DEFAULT map[types.TPlanNodeId]*plannodes.TFileScanRangeParams - -func (p *TExecPlanFragmentParams) GetFileScanParams() (v map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { - if !p.IsSetFileScanParams() { - return TExecPlanFragmentParams_FileScanParams_DEFAULT +func (p *TExecPlanFragmentParams) ReadField17(iprot thrift.TProtocol) error { + _field := NewTTxnParams() + if err := _field.Read(iprot); err != nil { + return err } - return p.FileScanParams + p.TxnConf = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField18(iprot thrift.TProtocol) error { -var TExecPlanFragmentParams_WalId_DEFAULT int64 - -func (p *TExecPlanFragmentParams) GetWalId() (v int64) { - if !p.IsSetWalId() { - return TExecPlanFragmentParams_WalId_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return *p.WalId -} -func (p *TExecPlanFragmentParams) SetProtocolVersion(val PaloInternalServiceVersion) { - p.ProtocolVersion = val -} -func (p *TExecPlanFragmentParams) SetFragment(val *planner.TPlanFragment) { - p.Fragment = val -} -func (p *TExecPlanFragmentParams) SetDescTbl(val *descriptors.TDescriptorTable) { - p.DescTbl = val -} -func (p *TExecPlanFragmentParams) SetParams(val *TPlanFragmentExecParams) { - p.Params = val -} -func (p *TExecPlanFragmentParams) SetCoord(val *types.TNetworkAddress) { - p.Coord = val -} -func (p *TExecPlanFragmentParams) SetBackendNum(val *int32) { - p.BackendNum = val -} -func (p *TExecPlanFragmentParams) SetQueryGlobals(val *TQueryGlobals) { - p.QueryGlobals = val -} -func (p *TExecPlanFragmentParams) SetQueryOptions(val *TQueryOptions) { - p.QueryOptions = val -} -func (p *TExecPlanFragmentParams) SetIsReportSuccess(val *bool) { - p.IsReportSuccess = val -} -func (p *TExecPlanFragmentParams) SetResourceInfo(val *types.TResourceInfo) { - p.ResourceInfo = val -} -func (p *TExecPlanFragmentParams) SetImportLabel(val *string) { - p.ImportLabel = val -} -func (p *TExecPlanFragmentParams) SetDbName(val *string) { - p.DbName = val -} -func (p *TExecPlanFragmentParams) SetLoadJobId(val *int64) { - p.LoadJobId = val -} -func (p *TExecPlanFragmentParams) SetLoadErrorHubInfo(val *TLoadErrorHubInfo) { - p.LoadErrorHubInfo = val -} -func (p *TExecPlanFragmentParams) SetFragmentNumOnHost(val *int32) { - p.FragmentNumOnHost = val -} -func (p *TExecPlanFragmentParams) SetIsSimplifiedParam(val bool) { - p.IsSimplifiedParam = val -} -func (p *TExecPlanFragmentParams) SetTxnConf(val *TTxnParams) { - p.TxnConf = val -} -func (p *TExecPlanFragmentParams) SetBackendId(val *int64) { - p.BackendId = val -} -func (p *TExecPlanFragmentParams) SetGlobalDict(val *TGlobalDict) { - p.GlobalDict = val -} -func (p *TExecPlanFragmentParams) SetNeedWaitExecutionTrigger(val bool) { - p.NeedWaitExecutionTrigger = val -} -func (p *TExecPlanFragmentParams) SetBuildHashTableForBroadcastJoin(val bool) { - p.BuildHashTableForBroadcastJoin = val -} -func (p *TExecPlanFragmentParams) SetInstancesSharingHashTable(val []*types.TUniqueId) { - p.InstancesSharingHashTable = val -} -func (p *TExecPlanFragmentParams) SetTableName(val *string) { - p.TableName = val -} -func (p *TExecPlanFragmentParams) SetFileScanParams(val map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { - p.FileScanParams = val + p.BackendId = _field + return nil } -func (p *TExecPlanFragmentParams) SetWalId(val *int64) { - p.WalId = val +func (p *TExecPlanFragmentParams) ReadField19(iprot thrift.TProtocol) error { + _field := NewTGlobalDict() + if err := _field.Read(iprot); err != nil { + return err + } + p.GlobalDict = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField20(iprot thrift.TProtocol) error { -var fieldIDToName_TExecPlanFragmentParams = map[int16]string{ - 1: "protocol_version", - 2: "fragment", - 3: "desc_tbl", - 4: "params", - 5: "coord", - 6: "backend_num", - 7: "query_globals", - 8: "query_options", - 9: "is_report_success", - 10: "resource_info", - 11: "import_label", - 12: "db_name", - 13: "load_job_id", - 14: "load_error_hub_info", - 15: "fragment_num_on_host", - 16: "is_simplified_param", - 17: "txn_conf", - 18: "backend_id", - 19: "global_dict", - 20: "need_wait_execution_trigger", - 21: "build_hash_table_for_broadcast_join", - 22: "instances_sharing_hash_table", - 23: "table_name", - 24: "file_scan_params", - 25: "wal_id", + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.NeedWaitExecutionTrigger = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField21(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetFragment() bool { - return p.Fragment != nil + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.BuildHashTableForBroadcastJoin = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField22(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TUniqueId, 0, size) + values := make([]types.TUniqueId, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TExecPlanFragmentParams) IsSetDescTbl() bool { - return p.DescTbl != nil + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.InstancesSharingHashTable = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField23(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetParams() bool { - return p.Params != nil + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableName = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField24(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) + values := make([]plannodes.TFileScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -func (p *TExecPlanFragmentParams) IsSetCoord() bool { - return p.Coord != nil + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.FileScanParams = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField25(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetBackendNum() bool { - return p.BackendNum != nil + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.WalId = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField26(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetQueryGlobals() bool { - return p.QueryGlobals != nil + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.LoadStreamPerNode = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField27(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetQueryOptions() bool { - return p.QueryOptions != nil + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.TotalLoadStreams = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField28(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetIsReportSuccess() bool { - return p.IsReportSuccess != nil + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.NumLocalSink = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField29(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetResourceInfo() bool { - return p.ResourceInfo != nil + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ContentLength = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField30(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TPipelineWorkloadGroup, 0, size) + values := make([]TPipelineWorkloadGroup, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TExecPlanFragmentParams) IsSetImportLabel() bool { - return p.ImportLabel != nil + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.WorkloadGroups = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField31(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetDbName() bool { - return p.DbName != nil + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsNereids = _field + return nil +} +func (p *TExecPlanFragmentParams) ReadField32(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.CurrentConnectFe = _field + return nil } +func (p *TExecPlanFragmentParams) ReadField1000(iprot thrift.TProtocol) error { -func (p *TExecPlanFragmentParams) IsSetLoadJobId() bool { - return p.LoadJobId != nil + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsMowTable = _field + return nil } -func (p *TExecPlanFragmentParams) IsSetLoadErrorHubInfo() bool { - return p.LoadErrorHubInfo != nil +func (p *TExecPlanFragmentParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TExecPlanFragmentParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } + if err = p.writeField25(oprot); err != nil { + fieldId = 25 + goto WriteFieldError + } + if err = p.writeField26(oprot); err != nil { + fieldId = 26 + goto WriteFieldError + } + if err = p.writeField27(oprot); err != nil { + fieldId = 27 + goto WriteFieldError + } + if err = p.writeField28(oprot); err != nil { + fieldId = 28 + goto WriteFieldError + } + if err = p.writeField29(oprot); err != nil { + fieldId = 29 + goto WriteFieldError + } + if err = p.writeField30(oprot); err != nil { + fieldId = 30 + goto WriteFieldError + } + if err = p.writeField31(oprot); err != nil { + fieldId = 31 + goto WriteFieldError + } + if err = p.writeField32(oprot); err != nil { + fieldId = 32 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetFragmentNumOnHost() bool { - return p.FragmentNumOnHost != nil +func (p *TExecPlanFragmentParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetIsSimplifiedParam() bool { - return p.IsSimplifiedParam != TExecPlanFragmentParams_IsSimplifiedParam_DEFAULT +func (p *TExecPlanFragmentParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFragment() { + if err = oprot.WriteFieldBegin("fragment", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.Fragment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetTxnConf() bool { - return p.TxnConf != nil +func (p *TExecPlanFragmentParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDescTbl() { + if err = oprot.WriteFieldBegin("desc_tbl", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.DescTbl.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetBackendId() bool { - return p.BackendId != nil +func (p *TExecPlanFragmentParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetParams() { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetGlobalDict() bool { - return p.GlobalDict != nil +func (p *TExecPlanFragmentParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCoord() { + if err = oprot.WriteFieldBegin("coord", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.Coord.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetNeedWaitExecutionTrigger() bool { - return p.NeedWaitExecutionTrigger != TExecPlanFragmentParams_NeedWaitExecutionTrigger_DEFAULT +func (p *TExecPlanFragmentParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendNum() { + if err = oprot.WriteFieldBegin("backend_num", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BackendNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetBuildHashTableForBroadcastJoin() bool { - return p.BuildHashTableForBroadcastJoin != TExecPlanFragmentParams_BuildHashTableForBroadcastJoin_DEFAULT +func (p *TExecPlanFragmentParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryGlobals() { + if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryGlobals.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetInstancesSharingHashTable() bool { - return p.InstancesSharingHashTable != nil +func (p *TExecPlanFragmentParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryOptions() { + if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryOptions.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetTableName() bool { - return p.TableName != nil +func (p *TExecPlanFragmentParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsReportSuccess() { + if err = oprot.WriteFieldBegin("is_report_success", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsReportSuccess); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetFileScanParams() bool { - return p.FileScanParams != nil +func (p *TExecPlanFragmentParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetResourceInfo() { + if err = oprot.WriteFieldBegin("resource_info", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.ResourceInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TExecPlanFragmentParams) IsSetWalId() bool { - return p.WalId != nil +func (p *TExecPlanFragmentParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetImportLabel() { + if err = oprot.WriteFieldBegin("import_label", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ImportLabel); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TExecPlanFragmentParams) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetProtocolVersion bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TExecPlanFragmentParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError +func (p *TExecPlanFragmentParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadJobId() { + if err = oprot.WriteFieldBegin("load_job_id", thrift.I64, 13); err != nil { + goto WriteFieldBeginError } - if fieldTypeId == thrift.STOP { - break + if err := oprot.WriteI64(*p.LoadJobId); err != nil { + return err } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I32 { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRING { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.I64 { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField14(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.I32 { - if err = p.ReadField15(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 16: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField16(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 17: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField17(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.I64 { - if err = p.ReadField18(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField19(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField20(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField21(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.LIST { - if err = p.ReadField22(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 23: - if fieldTypeId == thrift.STRING { - if err = p.ReadField23(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 24: - if fieldTypeId == thrift.MAP { - if err = p.ReadField24(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 25: - if fieldTypeId == thrift.I64 { - if err = p.ReadField25(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } +func (p *TExecPlanFragmentParams) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadErrorHubInfo() { + if err = oprot.WriteFieldBegin("load_error_hub_info", thrift.STRUCT, 14); err != nil { + goto WriteFieldBeginError } + if err := p.LoadErrorHubInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError +func (p *TExecPlanFragmentParams) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentNumOnHost() { + if err = oprot.WriteFieldBegin("fragment_num_on_host", thrift.I32, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FragmentNumOnHost); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError +func (p *TExecPlanFragmentParams) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetIsSimplifiedParam() { + if err = oprot.WriteFieldBegin("is_simplified_param", thrift.BOOL, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsSimplifiedParam); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExecPlanFragmentParams[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExecPlanFragmentParams[fieldId])) +func (p *TExecPlanFragmentParams) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnConf() { + if err = oprot.WriteFieldBegin("txn_conf", thrift.STRUCT, 17); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnConf.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ProtocolVersion = PaloInternalServiceVersion(v) +func (p *TExecPlanFragmentParams) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField2(iprot thrift.TProtocol) error { - p.Fragment = planner.NewTPlanFragment() - if err := p.Fragment.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetGlobalDict() { + if err = oprot.WriteFieldBegin("global_dict", thrift.STRUCT, 19); err != nil { + goto WriteFieldBeginError + } + if err := p.GlobalDict.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField3(iprot thrift.TProtocol) error { - p.DescTbl = descriptors.NewTDescriptorTable() - if err := p.DescTbl.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetNeedWaitExecutionTrigger() { + if err = oprot.WriteFieldBegin("need_wait_execution_trigger", thrift.BOOL, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.NeedWaitExecutionTrigger); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField4(iprot thrift.TProtocol) error { - p.Params = NewTPlanFragmentExecParams() - if err := p.Params.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetBuildHashTableForBroadcastJoin() { + if err = oprot.WriteFieldBegin("build_hash_table_for_broadcast_join", thrift.BOOL, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.BuildHashTableForBroadcastJoin); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField5(iprot thrift.TProtocol) error { - p.Coord = types.NewTNetworkAddress() - if err := p.Coord.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetInstancesSharingHashTable() { + if err = oprot.WriteFieldBegin("instances_sharing_hash_table", thrift.LIST, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.InstancesSharingHashTable)); err != nil { + return err + } + for _, v := range p.InstancesSharingHashTable { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.BackendNum = &v +func (p *TExecPlanFragmentParams) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField7(iprot thrift.TProtocol) error { - p.QueryGlobals = NewTQueryGlobals() - if err := p.QueryGlobals.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetFileScanParams() { + if err = oprot.WriteFieldBegin("file_scan_params", thrift.MAP, 24); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.FileScanParams)); err != nil { + return err + } + for k, v := range p.FileScanParams { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField8(iprot thrift.TProtocol) error { - p.QueryOptions = NewTQueryOptions() - if err := p.QueryOptions.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField25(oprot thrift.TProtocol) (err error) { + if p.IsSetWalId() { + if err = oprot.WriteFieldBegin("wal_id", thrift.I64, 25); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.WalId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.IsReportSuccess = &v +func (p *TExecPlanFragmentParams) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadStreamPerNode() { + if err = oprot.WriteFieldBegin("load_stream_per_node", thrift.I32, 26); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.LoadStreamPerNode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField10(iprot thrift.TProtocol) error { - p.ResourceInfo = types.NewTResourceInfo() - if err := p.ResourceInfo.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalLoadStreams() { + if err = oprot.WriteFieldBegin("total_load_streams", thrift.I32, 27); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.TotalLoadStreams); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.ImportLabel = &v +func (p *TExecPlanFragmentParams) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetNumLocalSink() { + if err = oprot.WriteFieldBegin("num_local_sink", thrift.I32, 28); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NumLocalSink); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.DbName = &v +func (p *TExecPlanFragmentParams) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetContentLength() { + if err = oprot.WriteFieldBegin("content_length", thrift.I64, 29); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ContentLength); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadJobId = &v +func (p *TExecPlanFragmentParams) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadGroups() { + if err = oprot.WriteFieldBegin("workload_groups", thrift.LIST, 30); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.WorkloadGroups)); err != nil { + return err + } + for _, v := range p.WorkloadGroups { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField14(iprot thrift.TProtocol) error { - p.LoadErrorHubInfo = NewTLoadErrorHubInfo() - if err := p.LoadErrorHubInfo.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) writeField31(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNereids() { + if err = oprot.WriteFieldBegin("is_nereids", thrift.BOOL, 31); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsNereids); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField15(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.FragmentNumOnHost = &v +func (p *TExecPlanFragmentParams) writeField32(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentConnectFe() { + if err = oprot.WriteFieldBegin("current_connect_fe", thrift.STRUCT, 32); err != nil { + goto WriteFieldBeginError + } + if err := p.CurrentConnectFe.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField16(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.IsSimplifiedParam = v +func (p *TExecPlanFragmentParams) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetIsMowTable() { + if err = oprot.WriteFieldBegin("is_mow_table", thrift.BOOL, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsMowTable); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) } -func (p *TExecPlanFragmentParams) ReadField17(iprot thrift.TProtocol) error { - p.TxnConf = NewTTxnParams() - if err := p.TxnConf.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) String() string { + if p == nil { + return "" } - return nil + return fmt.Sprintf("TExecPlanFragmentParams(%+v)", *p) + } -func (p *TExecPlanFragmentParams) ReadField18(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.BackendId = &v +func (p *TExecPlanFragmentParams) DeepEqual(ano *TExecPlanFragmentParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil + if !p.Field1DeepEqual(ano.ProtocolVersion) { + return false + } + if !p.Field2DeepEqual(ano.Fragment) { + return false + } + if !p.Field3DeepEqual(ano.DescTbl) { + return false + } + if !p.Field4DeepEqual(ano.Params) { + return false + } + if !p.Field5DeepEqual(ano.Coord) { + return false + } + if !p.Field6DeepEqual(ano.BackendNum) { + return false + } + if !p.Field7DeepEqual(ano.QueryGlobals) { + return false + } + if !p.Field8DeepEqual(ano.QueryOptions) { + return false + } + if !p.Field9DeepEqual(ano.IsReportSuccess) { + return false + } + if !p.Field10DeepEqual(ano.ResourceInfo) { + return false + } + if !p.Field11DeepEqual(ano.ImportLabel) { + return false + } + if !p.Field12DeepEqual(ano.DbName) { + return false + } + if !p.Field13DeepEqual(ano.LoadJobId) { + return false + } + if !p.Field14DeepEqual(ano.LoadErrorHubInfo) { + return false + } + if !p.Field15DeepEqual(ano.FragmentNumOnHost) { + return false + } + if !p.Field16DeepEqual(ano.IsSimplifiedParam) { + return false + } + if !p.Field17DeepEqual(ano.TxnConf) { + return false + } + if !p.Field18DeepEqual(ano.BackendId) { + return false + } + if !p.Field19DeepEqual(ano.GlobalDict) { + return false + } + if !p.Field20DeepEqual(ano.NeedWaitExecutionTrigger) { + return false + } + if !p.Field21DeepEqual(ano.BuildHashTableForBroadcastJoin) { + return false + } + if !p.Field22DeepEqual(ano.InstancesSharingHashTable) { + return false + } + if !p.Field23DeepEqual(ano.TableName) { + return false + } + if !p.Field24DeepEqual(ano.FileScanParams) { + return false + } + if !p.Field25DeepEqual(ano.WalId) { + return false + } + if !p.Field26DeepEqual(ano.LoadStreamPerNode) { + return false + } + if !p.Field27DeepEqual(ano.TotalLoadStreams) { + return false + } + if !p.Field28DeepEqual(ano.NumLocalSink) { + return false + } + if !p.Field29DeepEqual(ano.ContentLength) { + return false + } + if !p.Field30DeepEqual(ano.WorkloadGroups) { + return false + } + if !p.Field31DeepEqual(ano.IsNereids) { + return false + } + if !p.Field32DeepEqual(ano.CurrentConnectFe) { + return false + } + if !p.Field1000DeepEqual(ano.IsMowTable) { + return false + } + return true } -func (p *TExecPlanFragmentParams) ReadField19(iprot thrift.TProtocol) error { - p.GlobalDict = NewTGlobalDict() - if err := p.GlobalDict.Read(iprot); err != nil { - return err +func (p *TExecPlanFragmentParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { + + if p.ProtocolVersion != src { + return false } - return nil + return true } +func (p *TExecPlanFragmentParams) Field2DeepEqual(src *planner.TPlanFragment) bool { -func (p *TExecPlanFragmentParams) ReadField20(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.NeedWaitExecutionTrigger = v + if !p.Fragment.DeepEqual(src) { + return false } - return nil + return true } +func (p *TExecPlanFragmentParams) Field3DeepEqual(src *descriptors.TDescriptorTable) bool { -func (p *TExecPlanFragmentParams) ReadField21(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.BuildHashTableForBroadcastJoin = v + if !p.DescTbl.DeepEqual(src) { + return false } - return nil + return true } +func (p *TExecPlanFragmentParams) Field4DeepEqual(src *TPlanFragmentExecParams) bool { -func (p *TExecPlanFragmentParams) ReadField22(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err + if !p.Params.DeepEqual(src) { + return false } - p.InstancesSharingHashTable = make([]*types.TUniqueId, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTUniqueId() - if err := _elem.Read(iprot); err != nil { - return err - } + return true +} +func (p *TExecPlanFragmentParams) Field5DeepEqual(src *types.TNetworkAddress) bool { - p.InstancesSharingHashTable = append(p.InstancesSharingHashTable, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + if !p.Coord.DeepEqual(src) { + return false } - return nil + return true } +func (p *TExecPlanFragmentParams) Field6DeepEqual(src *int32) bool { -func (p *TExecPlanFragmentParams) ReadField23(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.TableName = &v + if p.BackendNum == src { + return true + } else if p.BackendNum == nil || src == nil { + return false + } + if *p.BackendNum != *src { + return false } - return nil + return true } +func (p *TExecPlanFragmentParams) Field7DeepEqual(src *TQueryGlobals) bool { -func (p *TExecPlanFragmentParams) ReadField24(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err + if !p.QueryGlobals.DeepEqual(src) { + return false } - p.FileScanParams = make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - _val := plannodes.NewTFileScanRangeParams() - if err := _val.Read(iprot); err != nil { - return err - } + return true +} +func (p *TExecPlanFragmentParams) Field8DeepEqual(src *TQueryOptions) bool { - p.FileScanParams[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err + if !p.QueryOptions.DeepEqual(src) { + return false } - return nil + return true } +func (p *TExecPlanFragmentParams) Field9DeepEqual(src *bool) bool { -func (p *TExecPlanFragmentParams) ReadField25(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.WalId = &v + if p.IsReportSuccess == src { + return true + } else if p.IsReportSuccess == nil || src == nil { + return false } - return nil + if *p.IsReportSuccess != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field10DeepEqual(src *types.TResourceInfo) bool { -func (p *TExecPlanFragmentParams) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TExecPlanFragmentParams"); err != nil { - goto WriteStructBeginError + if !p.ResourceInfo.DeepEqual(src) { + return false } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError - } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError - } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError - } - if err = p.writeField17(oprot); err != nil { - fieldId = 17 - goto WriteFieldError - } - if err = p.writeField18(oprot); err != nil { - fieldId = 18 - goto WriteFieldError - } - if err = p.writeField19(oprot); err != nil { - fieldId = 19 - goto WriteFieldError - } - if err = p.writeField20(oprot); err != nil { - fieldId = 20 - goto WriteFieldError - } - if err = p.writeField21(oprot); err != nil { - fieldId = 21 - goto WriteFieldError - } - if err = p.writeField22(oprot); err != nil { - fieldId = 22 - goto WriteFieldError - } - if err = p.writeField23(oprot); err != nil { - fieldId = 23 - goto WriteFieldError - } - if err = p.writeField24(oprot); err != nil { - fieldId = 24 - goto WriteFieldError - } - if err = p.writeField25(oprot); err != nil { - fieldId = 25 - goto WriteFieldError - } + return true +} +func (p *TExecPlanFragmentParams) Field11DeepEqual(src *string) bool { + if p.ImportLabel == src { + return true + } else if p.ImportLabel == nil || src == nil { + return false } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if strings.Compare(*p.ImportLabel, *src) != 0 { + return false } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field12DeepEqual(src *string) bool { -func (p *TExecPlanFragmentParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { - return err + if p.DbName == src { + return true + } else if p.DbName == nil || src == nil { + return false } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if strings.Compare(*p.DbName, *src) != 0 { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field13DeepEqual(src *int64) bool { -func (p *TExecPlanFragmentParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetFragment() { - if err = oprot.WriteFieldBegin("fragment", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.Fragment.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.LoadJobId == src { + return true + } else if p.LoadJobId == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + if *p.LoadJobId != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field14DeepEqual(src *TLoadErrorHubInfo) bool { -func (p *TExecPlanFragmentParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDescTbl() { - if err = oprot.WriteFieldBegin("desc_tbl", thrift.STRUCT, 3); err != nil { - goto WriteFieldBeginError - } - if err := p.DescTbl.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.LoadErrorHubInfo.DeepEqual(src) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field15DeepEqual(src *int32) bool { -func (p *TExecPlanFragmentParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetParams() { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 4); err != nil { - goto WriteFieldBeginError - } - if err := p.Params.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.FragmentNumOnHost == src { + return true + } else if p.FragmentNumOnHost == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + if *p.FragmentNumOnHost != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field16DeepEqual(src bool) bool { -func (p *TExecPlanFragmentParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetCoord() { - if err = oprot.WriteFieldBegin("coord", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.Coord.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.IsSimplifiedParam != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field17DeepEqual(src *TTxnParams) bool { -func (p *TExecPlanFragmentParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendNum() { - if err = oprot.WriteFieldBegin("backend_num", thrift.I32, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.BackendNum); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.TxnConf.DeepEqual(src) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field18DeepEqual(src *int64) bool { -func (p *TExecPlanFragmentParams) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryGlobals() { - if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 7); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryGlobals.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + if *p.BackendId != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field19DeepEqual(src *TGlobalDict) bool { -func (p *TExecPlanFragmentParams) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryOptions() { - if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 8); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryOptions.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.GlobalDict.DeepEqual(src) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field20DeepEqual(src bool) bool { -func (p *TExecPlanFragmentParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetIsReportSuccess() { - if err = oprot.WriteFieldBegin("is_report_success", thrift.BOOL, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.IsReportSuccess); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.NeedWaitExecutionTrigger != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field21DeepEqual(src bool) bool { -func (p *TExecPlanFragmentParams) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetResourceInfo() { - if err = oprot.WriteFieldBegin("resource_info", thrift.STRUCT, 10); err != nil { - goto WriteFieldBeginError - } - if err := p.ResourceInfo.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.BuildHashTableForBroadcastJoin != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field22DeepEqual(src []*types.TUniqueId) bool { -func (p *TExecPlanFragmentParams) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetImportLabel() { - if err = oprot.WriteFieldBegin("import_label", thrift.STRING, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.ImportLabel); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if len(p.InstancesSharingHashTable) != len(src) { + return false + } + for i, v := range p.InstancesSharingHashTable { + _src := src[i] + if !v.DeepEqual(_src) { + return false } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field23DeepEqual(src *string) bool { -func (p *TExecPlanFragmentParams) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetDbName() { - if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.DbName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.TableName == src { + return true + } else if p.TableName == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) + if strings.Compare(*p.TableName, *src) != 0 { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field24DeepEqual(src map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) bool { -func (p *TExecPlanFragmentParams) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadJobId() { - if err = oprot.WriteFieldBegin("load_job_id", thrift.I64, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LoadJobId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if len(p.FileScanParams) != len(src) { + return false + } + for k, v := range p.FileScanParams { + _src := src[k] + if !v.DeepEqual(_src) { + return false } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field25DeepEqual(src *int64) bool { -func (p *TExecPlanFragmentParams) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadErrorHubInfo() { - if err = oprot.WriteFieldBegin("load_error_hub_info", thrift.STRUCT, 14); err != nil { - goto WriteFieldBeginError - } - if err := p.LoadErrorHubInfo.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.WalId == src { + return true + } else if p.WalId == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) + if *p.WalId != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field26DeepEqual(src *int32) bool { -func (p *TExecPlanFragmentParams) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentNumOnHost() { - if err = oprot.WriteFieldBegin("fragment_num_on_host", thrift.I32, 15); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.FragmentNumOnHost); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.LoadStreamPerNode == src { + return true + } else if p.LoadStreamPerNode == nil || src == nil { + return false + } + if *p.LoadStreamPerNode != *src { + return false + } + return true +} +func (p *TExecPlanFragmentParams) Field27DeepEqual(src *int32) bool { + + if p.TotalLoadStreams == src { + return true + } else if p.TotalLoadStreams == nil || src == nil { + return false + } + if *p.TotalLoadStreams != *src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field28DeepEqual(src *int32) bool { -func (p *TExecPlanFragmentParams) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetIsSimplifiedParam() { - if err = oprot.WriteFieldBegin("is_simplified_param", thrift.BOOL, 16); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.IsSimplifiedParam); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.NumLocalSink == src { + return true + } else if p.NumLocalSink == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) + if *p.NumLocalSink != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field29DeepEqual(src *int64) bool { -func (p *TExecPlanFragmentParams) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnConf() { - if err = oprot.WriteFieldBegin("txn_conf", thrift.STRUCT, 17); err != nil { - goto WriteFieldBeginError - } - if err := p.TxnConf.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.ContentLength == src { + return true + } else if p.ContentLength == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) + if *p.ContentLength != *src { + return false + } + return true } +func (p *TExecPlanFragmentParams) Field30DeepEqual(src []*TPipelineWorkloadGroup) bool { -func (p *TExecPlanFragmentParams) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendId() { - if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 18); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.BackendId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if len(p.WorkloadGroups) != len(src) { + return false + } + for i, v := range p.WorkloadGroups { + _src := src[i] + if !v.DeepEqual(_src) { + return false } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field31DeepEqual(src bool) bool { -func (p *TExecPlanFragmentParams) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetGlobalDict() { - if err = oprot.WriteFieldBegin("global_dict", thrift.STRUCT, 19); err != nil { - goto WriteFieldBeginError - } - if err := p.GlobalDict.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.IsNereids != src { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field32DeepEqual(src *types.TNetworkAddress) bool { -func (p *TExecPlanFragmentParams) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetNeedWaitExecutionTrigger() { - if err = oprot.WriteFieldBegin("need_wait_execution_trigger", thrift.BOOL, 20); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.NeedWaitExecutionTrigger); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if !p.CurrentConnectFe.DeepEqual(src) { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) + return true } +func (p *TExecPlanFragmentParams) Field1000DeepEqual(src *bool) bool { -func (p *TExecPlanFragmentParams) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetBuildHashTableForBroadcastJoin() { - if err = oprot.WriteFieldBegin("build_hash_table_for_broadcast_join", thrift.BOOL, 21); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.BuildHashTableForBroadcastJoin); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + if p.IsMowTable == src { + return true + } else if p.IsMowTable == nil || src == nil { + return false } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) + if *p.IsMowTable != *src { + return false + } + return true } -func (p *TExecPlanFragmentParams) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetInstancesSharingHashTable() { - if err = oprot.WriteFieldBegin("instances_sharing_hash_table", thrift.LIST, 22); err != nil { - goto WriteFieldBeginError +type TExecPlanFragmentParamsList struct { + ParamsList []*TExecPlanFragmentParams `thrift:"paramsList,1,optional" frugal:"1,optional,list" json:"paramsList,omitempty"` +} + +func NewTExecPlanFragmentParamsList() *TExecPlanFragmentParamsList { + return &TExecPlanFragmentParamsList{} +} + +func (p *TExecPlanFragmentParamsList) InitDefault() { +} + +var TExecPlanFragmentParamsList_ParamsList_DEFAULT []*TExecPlanFragmentParams + +func (p *TExecPlanFragmentParamsList) GetParamsList() (v []*TExecPlanFragmentParams) { + if !p.IsSetParamsList() { + return TExecPlanFragmentParamsList_ParamsList_DEFAULT + } + return p.ParamsList +} +func (p *TExecPlanFragmentParamsList) SetParamsList(val []*TExecPlanFragmentParams) { + p.ParamsList = val +} + +var fieldIDToName_TExecPlanFragmentParamsList = map[int16]string{ + 1: "paramsList", +} + +func (p *TExecPlanFragmentParamsList) IsSetParamsList() bool { + return p.ParamsList != nil +} + +func (p *TExecPlanFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.InstancesSharingHashTable)); err != nil { - return err + if fieldTypeId == thrift.STOP { + break } - for _, v := range p.InstancesSharingHashTable { - if err := v.Write(oprot); err != nil { - return err + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExecPlanFragmentParamsList[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TExecPlanFragmentParams) writeField23(oprot thrift.TProtocol) (err error) { - if p.IsSetTableName() { - if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 23); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.TableName); err != nil { +func (p *TExecPlanFragmentParamsList) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TExecPlanFragmentParams, 0, size) + values := make([]TExecPlanFragmentParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ParamsList = _field + return nil +} + +func (p *TExecPlanFragmentParamsList) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TExecPlanFragmentParamsList"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TExecPlanFragmentParams) writeField24(oprot thrift.TProtocol) (err error) { - if p.IsSetFileScanParams() { - if err = oprot.WriteFieldBegin("file_scan_params", thrift.MAP, 24); err != nil { +func (p *TExecPlanFragmentParamsList) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetParamsList() { + if err = oprot.WriteFieldBegin("paramsList", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.FileScanParams)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ParamsList)); err != nil { return err } - for k, v := range p.FileScanParams { - - if err := oprot.WriteI32(k); err != nil { - return err - } - + for _, v := range p.ParamsList { if err := v.Write(oprot); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) -} - -func (p *TExecPlanFragmentParams) writeField25(oprot thrift.TProtocol) (err error) { - if p.IsSetWalId() { - if err = oprot.WriteFieldBegin("wal_id", thrift.I64, 25); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.WalId); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13220,368 +18979,486 @@ func (p *TExecPlanFragmentParams) writeField25(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 25 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExecPlanFragmentParams) String() string { +func (p *TExecPlanFragmentParamsList) String() string { if p == nil { return "" } - return fmt.Sprintf("TExecPlanFragmentParams(%+v)", *p) + return fmt.Sprintf("TExecPlanFragmentParamsList(%+v)", *p) + } -func (p *TExecPlanFragmentParams) DeepEqual(ano *TExecPlanFragmentParams) bool { +func (p *TExecPlanFragmentParamsList) DeepEqual(ano *TExecPlanFragmentParamsList) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ProtocolVersion) { - return false - } - if !p.Field2DeepEqual(ano.Fragment) { - return false - } - if !p.Field3DeepEqual(ano.DescTbl) { - return false - } - if !p.Field4DeepEqual(ano.Params) { - return false - } - if !p.Field5DeepEqual(ano.Coord) { - return false - } - if !p.Field6DeepEqual(ano.BackendNum) { - return false - } - if !p.Field7DeepEqual(ano.QueryGlobals) { - return false - } - if !p.Field8DeepEqual(ano.QueryOptions) { - return false - } - if !p.Field9DeepEqual(ano.IsReportSuccess) { - return false - } - if !p.Field10DeepEqual(ano.ResourceInfo) { - return false - } - if !p.Field11DeepEqual(ano.ImportLabel) { - return false - } - if !p.Field12DeepEqual(ano.DbName) { - return false - } - if !p.Field13DeepEqual(ano.LoadJobId) { + if !p.Field1DeepEqual(ano.ParamsList) { return false } - if !p.Field14DeepEqual(ano.LoadErrorHubInfo) { + return true +} + +func (p *TExecPlanFragmentParamsList) Field1DeepEqual(src []*TExecPlanFragmentParams) bool { + + if len(p.ParamsList) != len(src) { return false } - if !p.Field15DeepEqual(ano.FragmentNumOnHost) { - return false + for i, v := range p.ParamsList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } - if !p.Field16DeepEqual(ano.IsSimplifiedParam) { - return false + return true +} + +type TExecPlanFragmentResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +} + +func NewTExecPlanFragmentResult_() *TExecPlanFragmentResult_ { + return &TExecPlanFragmentResult_{} +} + +func (p *TExecPlanFragmentResult_) InitDefault() { +} + +var TExecPlanFragmentResult__Status_DEFAULT *status.TStatus + +func (p *TExecPlanFragmentResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TExecPlanFragmentResult__Status_DEFAULT } - if !p.Field17DeepEqual(ano.TxnConf) { - return false + return p.Status +} +func (p *TExecPlanFragmentResult_) SetStatus(val *status.TStatus) { + p.Status = val +} + +var fieldIDToName_TExecPlanFragmentResult_ = map[int16]string{ + 1: "status", +} + +func (p *TExecPlanFragmentResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TExecPlanFragmentResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if !p.Field18DeepEqual(ano.BackendId) { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if !p.Field19DeepEqual(ano.GlobalDict) { - return false + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - if !p.Field20DeepEqual(ano.NeedWaitExecutionTrigger) { - return false + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExecPlanFragmentResult_[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TExecPlanFragmentResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err } - if !p.Field21DeepEqual(ano.BuildHashTableForBroadcastJoin) { - return false + p.Status = _field + return nil +} + +func (p *TExecPlanFragmentResult_) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TExecPlanFragmentResult"); err != nil { + goto WriteStructBeginError } - if !p.Field22DeepEqual(ano.InstancesSharingHashTable) { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if !p.Field23DeepEqual(ano.TableName) { - return false + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if !p.Field24DeepEqual(ano.FileScanParams) { - return false + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - if !p.Field25DeepEqual(ano.WalId) { - return false + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TExecPlanFragmentResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExecPlanFragmentParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { - - if p.ProtocolVersion != src { - return false +func (p *TExecPlanFragmentResult_) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TExecPlanFragmentResult_(%+v)", *p) + } -func (p *TExecPlanFragmentParams) Field2DeepEqual(src *planner.TPlanFragment) bool { - if !p.Fragment.DeepEqual(src) { +func (p *TExecPlanFragmentResult_) DeepEqual(ano *TExecPlanFragmentResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { return false } - return true -} -func (p *TExecPlanFragmentParams) Field3DeepEqual(src *descriptors.TDescriptorTable) bool { - - if !p.DescTbl.DeepEqual(src) { + if !p.Field1DeepEqual(ano.Status) { return false } return true } -func (p *TExecPlanFragmentParams) Field4DeepEqual(src *TPlanFragmentExecParams) bool { - if !p.Params.DeepEqual(src) { +func (p *TExecPlanFragmentResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { return false } return true } -func (p *TExecPlanFragmentParams) Field5DeepEqual(src *types.TNetworkAddress) bool { - if !p.Coord.DeepEqual(src) { - return false - } - return true +type TCancelPlanFragmentParams struct { + ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,2,optional" frugal:"2,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` } -func (p *TExecPlanFragmentParams) Field6DeepEqual(src *int32) bool { - if p.BackendNum == src { - return true - } else if p.BackendNum == nil || src == nil { - return false - } - if *p.BackendNum != *src { - return false - } - return true +func NewTCancelPlanFragmentParams() *TCancelPlanFragmentParams { + return &TCancelPlanFragmentParams{} } -func (p *TExecPlanFragmentParams) Field7DeepEqual(src *TQueryGlobals) bool { - if !p.QueryGlobals.DeepEqual(src) { - return false - } - return true +func (p *TCancelPlanFragmentParams) InitDefault() { } -func (p *TExecPlanFragmentParams) Field8DeepEqual(src *TQueryOptions) bool { - if !p.QueryOptions.DeepEqual(src) { - return false - } - return true +func (p *TCancelPlanFragmentParams) GetProtocolVersion() (v PaloInternalServiceVersion) { + return p.ProtocolVersion } -func (p *TExecPlanFragmentParams) Field9DeepEqual(src *bool) bool { - if p.IsReportSuccess == src { - return true - } else if p.IsReportSuccess == nil || src == nil { - return false - } - if *p.IsReportSuccess != *src { - return false +var TCancelPlanFragmentParams_FragmentInstanceId_DEFAULT *types.TUniqueId + +func (p *TCancelPlanFragmentParams) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TCancelPlanFragmentParams_FragmentInstanceId_DEFAULT } - return true + return p.FragmentInstanceId +} +func (p *TCancelPlanFragmentParams) SetProtocolVersion(val PaloInternalServiceVersion) { + p.ProtocolVersion = val +} +func (p *TCancelPlanFragmentParams) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val } -func (p *TExecPlanFragmentParams) Field10DeepEqual(src *types.TResourceInfo) bool { - if !p.ResourceInfo.DeepEqual(src) { - return false - } - return true +var fieldIDToName_TCancelPlanFragmentParams = map[int16]string{ + 1: "protocol_version", + 2: "fragment_instance_id", } -func (p *TExecPlanFragmentParams) Field11DeepEqual(src *string) bool { - if p.ImportLabel == src { - return true - } else if p.ImportLabel == nil || src == nil { - return false - } - if strings.Compare(*p.ImportLabel, *src) != 0 { - return false - } - return true +func (p *TCancelPlanFragmentParams) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil } -func (p *TExecPlanFragmentParams) Field12DeepEqual(src *string) bool { - if p.DbName == src { - return true - } else if p.DbName == nil || src == nil { - return false - } - if strings.Compare(*p.DbName, *src) != 0 { - return false +func (p *TCancelPlanFragmentParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetProtocolVersion bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return true -} -func (p *TExecPlanFragmentParams) Field13DeepEqual(src *int64) bool { - if p.LoadJobId == src { - return true - } else if p.LoadJobId == nil || src == nil { - return false + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if *p.LoadJobId != *src { - return false + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true -} -func (p *TExecPlanFragmentParams) Field14DeepEqual(src *TLoadErrorHubInfo) bool { - if !p.LoadErrorHubInfo.DeepEqual(src) { - return false + if !issetProtocolVersion { + fieldId = 1 + goto RequiredFieldNotSetError } - return true -} -func (p *TExecPlanFragmentParams) Field15DeepEqual(src *int32) bool { + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCancelPlanFragmentParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - if p.FragmentNumOnHost == src { - return true - } else if p.FragmentNumOnHost == nil || src == nil { - return false - } - if *p.FragmentNumOnHost != *src { - return false - } - return true +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCancelPlanFragmentParams[fieldId])) } -func (p *TExecPlanFragmentParams) Field16DeepEqual(src bool) bool { - if p.IsSimplifiedParam != src { - return false +func (p *TCancelPlanFragmentParams) ReadField1(iprot thrift.TProtocol) error { + + var _field PaloInternalServiceVersion + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = PaloInternalServiceVersion(v) } - return true + p.ProtocolVersion = _field + return nil } -func (p *TExecPlanFragmentParams) Field17DeepEqual(src *TTxnParams) bool { - - if !p.TxnConf.DeepEqual(src) { - return false +func (p *TCancelPlanFragmentParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err } - return true + p.FragmentInstanceId = _field + return nil } -func (p *TExecPlanFragmentParams) Field18DeepEqual(src *int64) bool { - if p.BackendId == src { - return true - } else if p.BackendId == nil || src == nil { - return false +func (p *TCancelPlanFragmentParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TCancelPlanFragmentParams"); err != nil { + goto WriteStructBeginError } - if *p.BackendId != *src { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } } - return true -} -func (p *TExecPlanFragmentParams) Field19DeepEqual(src *TGlobalDict) bool { - - if !p.GlobalDict.DeepEqual(src) { - return false + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return true + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TExecPlanFragmentParams) Field20DeepEqual(src bool) bool { - if p.NeedWaitExecutionTrigger != src { - return false +func (p *TCancelPlanFragmentParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { + goto WriteFieldBeginError } - return true + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExecPlanFragmentParams) Field21DeepEqual(src bool) bool { - if p.BuildHashTableForBroadcastJoin != src { - return false +func (p *TCancelPlanFragmentParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentInstanceId() { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.FragmentInstanceId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TExecPlanFragmentParams) Field22DeepEqual(src []*types.TUniqueId) bool { - if len(p.InstancesSharingHashTable) != len(src) { - return false - } - for i, v := range p.InstancesSharingHashTable { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } +func (p *TCancelPlanFragmentParams) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TCancelPlanFragmentParams(%+v)", *p) + } -func (p *TExecPlanFragmentParams) Field23DeepEqual(src *string) bool { - if p.TableName == src { +func (p *TCancelPlanFragmentParams) DeepEqual(ano *TCancelPlanFragmentParams) bool { + if p == ano { return true - } else if p.TableName == nil || src == nil { + } else if p == nil || ano == nil { return false } - if strings.Compare(*p.TableName, *src) != 0 { + if !p.Field1DeepEqual(ano.ProtocolVersion) { + return false + } + if !p.Field2DeepEqual(ano.FragmentInstanceId) { return false } return true } -func (p *TExecPlanFragmentParams) Field24DeepEqual(src map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) bool { - if len(p.FileScanParams) != len(src) { +func (p *TCancelPlanFragmentParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { + + if p.ProtocolVersion != src { return false } - for k, v := range p.FileScanParams { - _src := src[k] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TExecPlanFragmentParams) Field25DeepEqual(src *int64) bool { +func (p *TCancelPlanFragmentParams) Field2DeepEqual(src *types.TUniqueId) bool { - if p.WalId == src { - return true - } else if p.WalId == nil || src == nil { - return false - } - if *p.WalId != *src { + if !p.FragmentInstanceId.DeepEqual(src) { return false } return true } -type TExecPlanFragmentParamsList struct { - ParamsList []*TExecPlanFragmentParams `thrift:"paramsList,1,optional" frugal:"1,optional,list" json:"paramsList,omitempty"` +type TCancelPlanFragmentResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` } -func NewTExecPlanFragmentParamsList() *TExecPlanFragmentParamsList { - return &TExecPlanFragmentParamsList{} +func NewTCancelPlanFragmentResult_() *TCancelPlanFragmentResult_ { + return &TCancelPlanFragmentResult_{} } -func (p *TExecPlanFragmentParamsList) InitDefault() { - *p = TExecPlanFragmentParamsList{} +func (p *TCancelPlanFragmentResult_) InitDefault() { } -var TExecPlanFragmentParamsList_ParamsList_DEFAULT []*TExecPlanFragmentParams +var TCancelPlanFragmentResult__Status_DEFAULT *status.TStatus -func (p *TExecPlanFragmentParamsList) GetParamsList() (v []*TExecPlanFragmentParams) { - if !p.IsSetParamsList() { - return TExecPlanFragmentParamsList_ParamsList_DEFAULT +func (p *TCancelPlanFragmentResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TCancelPlanFragmentResult__Status_DEFAULT } - return p.ParamsList + return p.Status } -func (p *TExecPlanFragmentParamsList) SetParamsList(val []*TExecPlanFragmentParams) { - p.ParamsList = val +func (p *TCancelPlanFragmentResult_) SetStatus(val *status.TStatus) { + p.Status = val } -var fieldIDToName_TExecPlanFragmentParamsList = map[int16]string{ - 1: "paramsList", +var fieldIDToName_TCancelPlanFragmentResult_ = map[int16]string{ + 1: "status", } -func (p *TExecPlanFragmentParamsList) IsSetParamsList() bool { - return p.ParamsList != nil +func (p *TCancelPlanFragmentResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TExecPlanFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { +func (p *TCancelPlanFragmentResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -13601,21 +19478,18 @@ func (p *TExecPlanFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13630,7 +19504,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExecPlanFragmentParamsList[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCancelPlanFragmentResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -13640,29 +19514,18 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TExecPlanFragmentParamsList) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ParamsList = make([]*TExecPlanFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTExecPlanFragmentParams() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.ParamsList = append(p.ParamsList, _elem) - } - if err := iprot.ReadListEnd(); err != nil { +func (p *TCancelPlanFragmentResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } -func (p *TExecPlanFragmentParamsList) Write(oprot thrift.TProtocol) (err error) { +func (p *TCancelPlanFragmentResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TExecPlanFragmentParamsList"); err != nil { + if err = oprot.WriteStructBegin("TCancelPlanFragmentResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13670,7 +19533,6 @@ func (p *TExecPlanFragmentParamsList) Write(oprot thrift.TProtocol) (err error) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13689,20 +19551,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TExecPlanFragmentParamsList) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetParamsList() { - if err = oprot.WriteFieldBegin("paramsList", thrift.LIST, 1); err != nil { +func (p *TCancelPlanFragmentResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ParamsList)); err != nil { - return err - } - for _, v := range p.ParamsList { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13716,75 +19570,61 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExecPlanFragmentParamsList) String() string { +func (p *TCancelPlanFragmentResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TExecPlanFragmentParamsList(%+v)", *p) + return fmt.Sprintf("TCancelPlanFragmentResult_(%+v)", *p) + } -func (p *TExecPlanFragmentParamsList) DeepEqual(ano *TExecPlanFragmentParamsList) bool { +func (p *TCancelPlanFragmentResult_) DeepEqual(ano *TCancelPlanFragmentResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ParamsList) { + if !p.Field1DeepEqual(ano.Status) { return false } return true } -func (p *TExecPlanFragmentParamsList) Field1DeepEqual(src []*TExecPlanFragmentParams) bool { +func (p *TCancelPlanFragmentResult_) Field1DeepEqual(src *status.TStatus) bool { - if len(p.ParamsList) != len(src) { + if !p.Status.DeepEqual(src) { return false } - for i, v := range p.ParamsList { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -type TExecPlanFragmentResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` +type TExprMap struct { + ExprMap map[string]*exprs.TExpr `thrift:"expr_map,1,required" frugal:"1,required,map" json:"expr_map"` } -func NewTExecPlanFragmentResult_() *TExecPlanFragmentResult_ { - return &TExecPlanFragmentResult_{} +func NewTExprMap() *TExprMap { + return &TExprMap{} } -func (p *TExecPlanFragmentResult_) InitDefault() { - *p = TExecPlanFragmentResult_{} +func (p *TExprMap) InitDefault() { } -var TExecPlanFragmentResult__Status_DEFAULT *status.TStatus - -func (p *TExecPlanFragmentResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TExecPlanFragmentResult__Status_DEFAULT - } - return p.Status -} -func (p *TExecPlanFragmentResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TExprMap) GetExprMap() (v map[string]*exprs.TExpr) { + return p.ExprMap } - -var fieldIDToName_TExecPlanFragmentResult_ = map[int16]string{ - 1: "status", +func (p *TExprMap) SetExprMap(val map[string]*exprs.TExpr) { + p.ExprMap = val } -func (p *TExecPlanFragmentResult_) IsSetStatus() bool { - return p.Status != nil +var fieldIDToName_TExprMap = map[int16]string{ + 1: "expr_map", } -func (p *TExecPlanFragmentResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TExprMap) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetExprMap bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -13801,21 +19641,19 @@ func (p *TExecPlanFragmentResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.MAP { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetExprMap = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13824,33 +19662,61 @@ func (p *TExecPlanFragmentResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } + if !issetExprMap { + fieldId = 1 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExecPlanFragmentResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExprMap[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExprMap[fieldId])) +} + +func (p *TExprMap) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } -func (p *TExecPlanFragmentResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { return err } + p.ExprMap = _field return nil } -func (p *TExecPlanFragmentResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TExprMap) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TExecPlanFragmentResult"); err != nil { + if err = oprot.WriteStructBegin("TExprMap"); err != nil { goto WriteStructBeginError } if p != nil { @@ -13858,7 +19724,6 @@ func (p *TExecPlanFragmentResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13877,18 +19742,27 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TExecPlanFragmentResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { +func (p *TExprMap) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("expr_map", thrift.MAP, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRUCT, len(p.ExprMap)); err != nil { + return err + } + for k, v := range p.ExprMap { + if err := oprot.WriteString(k); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err := v.Write(oprot); err != nil { + return err } } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } return nil WriteFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) @@ -13896,79 +19770,158 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExecPlanFragmentResult_) String() string { +func (p *TExprMap) String() string { if p == nil { return "" } - return fmt.Sprintf("TExecPlanFragmentResult_(%+v)", *p) + return fmt.Sprintf("TExprMap(%+v)", *p) + } -func (p *TExecPlanFragmentResult_) DeepEqual(ano *TExecPlanFragmentResult_) bool { +func (p *TExprMap) DeepEqual(ano *TExprMap) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.ExprMap) { return false } return true } -func (p *TExecPlanFragmentResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TExprMap) Field1DeepEqual(src map[string]*exprs.TExpr) bool { - if !p.Status.DeepEqual(src) { + if len(p.ExprMap) != len(src) { return false } + for k, v := range p.ExprMap { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } + } return true } -type TCancelPlanFragmentParams struct { - ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` - FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,2,optional" frugal:"2,optional,types.TUniqueId" json:"fragment_instance_id,omitempty"` +type TFoldConstantParams struct { + ExprMap map[string]map[string]*exprs.TExpr `thrift:"expr_map,1,required" frugal:"1,required,map>" json:"expr_map"` + QueryGlobals *TQueryGlobals `thrift:"query_globals,2,required" frugal:"2,required,TQueryGlobals" json:"query_globals"` + VecExec *bool `thrift:"vec_exec,3,optional" frugal:"3,optional,bool" json:"vec_exec,omitempty"` + QueryOptions *TQueryOptions `thrift:"query_options,4,optional" frugal:"4,optional,TQueryOptions" json:"query_options,omitempty"` + QueryId *types.TUniqueId `thrift:"query_id,5,optional" frugal:"5,optional,types.TUniqueId" json:"query_id,omitempty"` + IsNereids *bool `thrift:"is_nereids,6,optional" frugal:"6,optional,bool" json:"is_nereids,omitempty"` } -func NewTCancelPlanFragmentParams() *TCancelPlanFragmentParams { - return &TCancelPlanFragmentParams{} +func NewTFoldConstantParams() *TFoldConstantParams { + return &TFoldConstantParams{} } -func (p *TCancelPlanFragmentParams) InitDefault() { - *p = TCancelPlanFragmentParams{} +func (p *TFoldConstantParams) InitDefault() { } -func (p *TCancelPlanFragmentParams) GetProtocolVersion() (v PaloInternalServiceVersion) { - return p.ProtocolVersion +func (p *TFoldConstantParams) GetExprMap() (v map[string]map[string]*exprs.TExpr) { + return p.ExprMap } -var TCancelPlanFragmentParams_FragmentInstanceId_DEFAULT *types.TUniqueId +var TFoldConstantParams_QueryGlobals_DEFAULT *TQueryGlobals -func (p *TCancelPlanFragmentParams) GetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetFragmentInstanceId() { - return TCancelPlanFragmentParams_FragmentInstanceId_DEFAULT +func (p *TFoldConstantParams) GetQueryGlobals() (v *TQueryGlobals) { + if !p.IsSetQueryGlobals() { + return TFoldConstantParams_QueryGlobals_DEFAULT } - return p.FragmentInstanceId + return p.QueryGlobals } -func (p *TCancelPlanFragmentParams) SetProtocolVersion(val PaloInternalServiceVersion) { - p.ProtocolVersion = val + +var TFoldConstantParams_VecExec_DEFAULT bool + +func (p *TFoldConstantParams) GetVecExec() (v bool) { + if !p.IsSetVecExec() { + return TFoldConstantParams_VecExec_DEFAULT + } + return *p.VecExec } -func (p *TCancelPlanFragmentParams) SetFragmentInstanceId(val *types.TUniqueId) { - p.FragmentInstanceId = val + +var TFoldConstantParams_QueryOptions_DEFAULT *TQueryOptions + +func (p *TFoldConstantParams) GetQueryOptions() (v *TQueryOptions) { + if !p.IsSetQueryOptions() { + return TFoldConstantParams_QueryOptions_DEFAULT + } + return p.QueryOptions } -var fieldIDToName_TCancelPlanFragmentParams = map[int16]string{ - 1: "protocol_version", - 2: "fragment_instance_id", +var TFoldConstantParams_QueryId_DEFAULT *types.TUniqueId + +func (p *TFoldConstantParams) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TFoldConstantParams_QueryId_DEFAULT + } + return p.QueryId } -func (p *TCancelPlanFragmentParams) IsSetFragmentInstanceId() bool { - return p.FragmentInstanceId != nil +var TFoldConstantParams_IsNereids_DEFAULT bool + +func (p *TFoldConstantParams) GetIsNereids() (v bool) { + if !p.IsSetIsNereids() { + return TFoldConstantParams_IsNereids_DEFAULT + } + return *p.IsNereids +} +func (p *TFoldConstantParams) SetExprMap(val map[string]map[string]*exprs.TExpr) { + p.ExprMap = val +} +func (p *TFoldConstantParams) SetQueryGlobals(val *TQueryGlobals) { + p.QueryGlobals = val +} +func (p *TFoldConstantParams) SetVecExec(val *bool) { + p.VecExec = val +} +func (p *TFoldConstantParams) SetQueryOptions(val *TQueryOptions) { + p.QueryOptions = val +} +func (p *TFoldConstantParams) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TFoldConstantParams) SetIsNereids(val *bool) { + p.IsNereids = val } -func (p *TCancelPlanFragmentParams) Read(iprot thrift.TProtocol) (err error) { +var fieldIDToName_TFoldConstantParams = map[int16]string{ + 1: "expr_map", + 2: "query_globals", + 3: "vec_exec", + 4: "query_options", + 5: "query_id", + 6: "is_nereids", +} + +func (p *TFoldConstantParams) IsSetQueryGlobals() bool { + return p.QueryGlobals != nil +} + +func (p *TFoldConstantParams) IsSetVecExec() bool { + return p.VecExec != nil +} + +func (p *TFoldConstantParams) IsSetQueryOptions() bool { + return p.QueryOptions != nil +} + +func (p *TFoldConstantParams) IsSetQueryId() bool { + return p.QueryId != nil +} + +func (p *TFoldConstantParams) IsSetIsNereids() bool { + return p.IsNereids != nil +} + +func (p *TFoldConstantParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false + var issetExprMap bool = false + var issetQueryGlobals bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -13985,32 +19938,60 @@ func (p *TCancelPlanFragmentParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.MAP { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetExprMap = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetQueryGlobals = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14019,17 +20000,22 @@ func (p *TCancelPlanFragmentParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetProtocolVersion { + if !issetExprMap { fieldId = 1 goto RequiredFieldNotSetError } + + if !issetQueryGlobals { + fieldId = 2 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCancelPlanFragmentParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFoldConstantParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14038,29 +20024,106 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCancelPlanFragmentParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFoldConstantParams[fieldId])) +} + +func (p *TFoldConstantParams) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]map[string]*exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _val := make(map[string]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key1 string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key1 = v + } + + _val1 := &values[i] + _val1.InitDefault() + if err := _val1.Read(iprot); err != nil { + return err + } + + _val[_key1] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.ExprMap = _field + return nil +} +func (p *TFoldConstantParams) ReadField2(iprot thrift.TProtocol) error { + _field := NewTQueryGlobals() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryGlobals = _field + return nil } +func (p *TFoldConstantParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TCancelPlanFragmentParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ProtocolVersion = PaloInternalServiceVersion(v) + _field = &v } + p.VecExec = _field return nil } +func (p *TFoldConstantParams) ReadField4(iprot thrift.TProtocol) error { + _field := NewTQueryOptions() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryOptions = _field + return nil +} +func (p *TFoldConstantParams) ReadField5(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryId = _field + return nil +} +func (p *TFoldConstantParams) ReadField6(iprot thrift.TProtocol) error { -func (p *TCancelPlanFragmentParams) ReadField2(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } + p.IsNereids = _field return nil } -func (p *TCancelPlanFragmentParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TFoldConstantParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCancelPlanFragmentParams"); err != nil { + if err = oprot.WriteStructBegin("TFoldConstantParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14072,7 +20135,22 @@ func (p *TCancelPlanFragmentParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14091,11 +20169,33 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCancelPlanFragmentParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { +func (p *TFoldConstantParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("expr_map", thrift.MAP, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.MAP, len(p.ExprMap)); err != nil { + return err + } + for k, v := range p.ExprMap { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRUCT, len(v)); err != nil { + return err + } + for k, v := range v { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14108,12 +20208,29 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCancelPlanFragmentParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentInstanceId() { - if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 2); err != nil { +func (p *TFoldConstantParams) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryGlobals.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFoldConstantParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVecExec() { + if err = oprot.WriteFieldBegin("vec_exec", thrift.BOOL, 3); err != nil { goto WriteFieldBeginError } - if err := p.FragmentInstanceId.Write(oprot); err != nil { + if err := oprot.WriteBool(*p.VecExec); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14122,250 +20239,321 @@ func (p *TCancelPlanFragmentParams) writeField2(oprot thrift.TProtocol) (err err } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TCancelPlanFragmentParams) String() string { +func (p *TFoldConstantParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryOptions() { + if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryOptions.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TFoldConstantParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TFoldConstantParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNereids() { + if err = oprot.WriteFieldBegin("is_nereids", thrift.BOOL, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsNereids); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TFoldConstantParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TCancelPlanFragmentParams(%+v)", *p) + return fmt.Sprintf("TFoldConstantParams(%+v)", *p) + } -func (p *TCancelPlanFragmentParams) DeepEqual(ano *TCancelPlanFragmentParams) bool { +func (p *TFoldConstantParams) DeepEqual(ano *TFoldConstantParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ProtocolVersion) { + if !p.Field1DeepEqual(ano.ExprMap) { return false } - if !p.Field2DeepEqual(ano.FragmentInstanceId) { + if !p.Field2DeepEqual(ano.QueryGlobals) { + return false + } + if !p.Field3DeepEqual(ano.VecExec) { + return false + } + if !p.Field4DeepEqual(ano.QueryOptions) { + return false + } + if !p.Field5DeepEqual(ano.QueryId) { + return false + } + if !p.Field6DeepEqual(ano.IsNereids) { return false } return true } -func (p *TCancelPlanFragmentParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { +func (p *TFoldConstantParams) Field1DeepEqual(src map[string]map[string]*exprs.TExpr) bool { - if p.ProtocolVersion != src { + if len(p.ExprMap) != len(src) { return false } + for k, v := range p.ExprMap { + _src := src[k] + if len(v) != len(_src) { + return false + } + for k, v := range v { + _src1 := _src[k] + if !v.DeepEqual(_src1) { + return false + } + } + } return true } -func (p *TCancelPlanFragmentParams) Field2DeepEqual(src *types.TUniqueId) bool { +func (p *TFoldConstantParams) Field2DeepEqual(src *TQueryGlobals) bool { - if !p.FragmentInstanceId.DeepEqual(src) { + if !p.QueryGlobals.DeepEqual(src) { return false } return true } +func (p *TFoldConstantParams) Field3DeepEqual(src *bool) bool { -type TCancelPlanFragmentResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + if p.VecExec == src { + return true + } else if p.VecExec == nil || src == nil { + return false + } + if *p.VecExec != *src { + return false + } + return true } +func (p *TFoldConstantParams) Field4DeepEqual(src *TQueryOptions) bool { -func NewTCancelPlanFragmentResult_() *TCancelPlanFragmentResult_ { - return &TCancelPlanFragmentResult_{} + if !p.QueryOptions.DeepEqual(src) { + return false + } + return true } +func (p *TFoldConstantParams) Field5DeepEqual(src *types.TUniqueId) bool { -func (p *TCancelPlanFragmentResult_) InitDefault() { - *p = TCancelPlanFragmentResult_{} + if !p.QueryId.DeepEqual(src) { + return false + } + return true } +func (p *TFoldConstantParams) Field6DeepEqual(src *bool) bool { -var TCancelPlanFragmentResult__Status_DEFAULT *status.TStatus - -func (p *TCancelPlanFragmentResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TCancelPlanFragmentResult__Status_DEFAULT + if p.IsNereids == src { + return true + } else if p.IsNereids == nil || src == nil { + return false } - return p.Status + if *p.IsNereids != *src { + return false + } + return true } -func (p *TCancelPlanFragmentResult_) SetStatus(val *status.TStatus) { - p.Status = val + +type TTransmitDataParams struct { + ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` + DestFragmentInstanceId *types.TUniqueId `thrift:"dest_fragment_instance_id,2,optional" frugal:"2,optional,types.TUniqueId" json:"dest_fragment_instance_id,omitempty"` + DestNodeId *types.TPlanNodeId `thrift:"dest_node_id,4,optional" frugal:"4,optional,i32" json:"dest_node_id,omitempty"` + RowBatch *data.TRowBatch `thrift:"row_batch,5,optional" frugal:"5,optional,data.TRowBatch" json:"row_batch,omitempty"` + Eos *bool `thrift:"eos,6,optional" frugal:"6,optional,bool" json:"eos,omitempty"` + BeNumber *int32 `thrift:"be_number,7,optional" frugal:"7,optional,i32" json:"be_number,omitempty"` + PacketSeq *int64 `thrift:"packet_seq,8,optional" frugal:"8,optional,i64" json:"packet_seq,omitempty"` + SenderId *int32 `thrift:"sender_id,9,optional" frugal:"9,optional,i32" json:"sender_id,omitempty"` } -var fieldIDToName_TCancelPlanFragmentResult_ = map[int16]string{ - 1: "status", +func NewTTransmitDataParams() *TTransmitDataParams { + return &TTransmitDataParams{} } -func (p *TCancelPlanFragmentResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TTransmitDataParams) InitDefault() { } -func (p *TCancelPlanFragmentResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TTransmitDataParams) GetProtocolVersion() (v PaloInternalServiceVersion) { + return p.ProtocolVersion +} - var fieldTypeId thrift.TType - var fieldId int16 +var TTransmitDataParams_DestFragmentInstanceId_DEFAULT *types.TUniqueId - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +func (p *TTransmitDataParams) GetDestFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetDestFragmentInstanceId() { + return TTransmitDataParams_DestFragmentInstanceId_DEFAULT } + return p.DestFragmentInstanceId +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +var TTransmitDataParams_DestNodeId_DEFAULT types.TPlanNodeId - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError +func (p *TTransmitDataParams) GetDestNodeId() (v types.TPlanNodeId) { + if !p.IsSetDestNodeId() { + return TTransmitDataParams_DestNodeId_DEFAULT } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCancelPlanFragmentResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + return *p.DestNodeId } -func (p *TCancelPlanFragmentResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err +var TTransmitDataParams_RowBatch_DEFAULT *data.TRowBatch + +func (p *TTransmitDataParams) GetRowBatch() (v *data.TRowBatch) { + if !p.IsSetRowBatch() { + return TTransmitDataParams_RowBatch_DEFAULT } - return nil + return p.RowBatch } -func (p *TCancelPlanFragmentResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TCancelPlanFragmentResult"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } +var TTransmitDataParams_Eos_DEFAULT bool +func (p *TTransmitDataParams) GetEos() (v bool) { + if !p.IsSetEos() { + return TTransmitDataParams_Eos_DEFAULT } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return *p.Eos } -func (p *TCancelPlanFragmentResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TTransmitDataParams_BeNumber_DEFAULT int32 + +func (p *TTransmitDataParams) GetBeNumber() (v int32) { + if !p.IsSetBeNumber() { + return TTransmitDataParams_BeNumber_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return *p.BeNumber } -func (p *TCancelPlanFragmentResult_) String() string { - if p == nil { - return "" +var TTransmitDataParams_PacketSeq_DEFAULT int64 + +func (p *TTransmitDataParams) GetPacketSeq() (v int64) { + if !p.IsSetPacketSeq() { + return TTransmitDataParams_PacketSeq_DEFAULT } - return fmt.Sprintf("TCancelPlanFragmentResult_(%+v)", *p) + return *p.PacketSeq } -func (p *TCancelPlanFragmentResult_) DeepEqual(ano *TCancelPlanFragmentResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Status) { - return false +var TTransmitDataParams_SenderId_DEFAULT int32 + +func (p *TTransmitDataParams) GetSenderId() (v int32) { + if !p.IsSetSenderId() { + return TTransmitDataParams_SenderId_DEFAULT } - return true + return *p.SenderId +} +func (p *TTransmitDataParams) SetProtocolVersion(val PaloInternalServiceVersion) { + p.ProtocolVersion = val +} +func (p *TTransmitDataParams) SetDestFragmentInstanceId(val *types.TUniqueId) { + p.DestFragmentInstanceId = val +} +func (p *TTransmitDataParams) SetDestNodeId(val *types.TPlanNodeId) { + p.DestNodeId = val +} +func (p *TTransmitDataParams) SetRowBatch(val *data.TRowBatch) { + p.RowBatch = val +} +func (p *TTransmitDataParams) SetEos(val *bool) { + p.Eos = val +} +func (p *TTransmitDataParams) SetBeNumber(val *int32) { + p.BeNumber = val +} +func (p *TTransmitDataParams) SetPacketSeq(val *int64) { + p.PacketSeq = val +} +func (p *TTransmitDataParams) SetSenderId(val *int32) { + p.SenderId = val } -func (p *TCancelPlanFragmentResult_) Field1DeepEqual(src *status.TStatus) bool { +var fieldIDToName_TTransmitDataParams = map[int16]string{ + 1: "protocol_version", + 2: "dest_fragment_instance_id", + 4: "dest_node_id", + 5: "row_batch", + 6: "eos", + 7: "be_number", + 8: "packet_seq", + 9: "sender_id", +} - if !p.Status.DeepEqual(src) { - return false - } - return true +func (p *TTransmitDataParams) IsSetDestFragmentInstanceId() bool { + return p.DestFragmentInstanceId != nil } -type TExprMap struct { - ExprMap map[string]*exprs.TExpr `thrift:"expr_map,1,required" frugal:"1,required,map" json:"expr_map"` +func (p *TTransmitDataParams) IsSetDestNodeId() bool { + return p.DestNodeId != nil } -func NewTExprMap() *TExprMap { - return &TExprMap{} +func (p *TTransmitDataParams) IsSetRowBatch() bool { + return p.RowBatch != nil } -func (p *TExprMap) InitDefault() { - *p = TExprMap{} +func (p *TTransmitDataParams) IsSetEos() bool { + return p.Eos != nil } -func (p *TExprMap) GetExprMap() (v map[string]*exprs.TExpr) { - return p.ExprMap +func (p *TTransmitDataParams) IsSetBeNumber() bool { + return p.BeNumber != nil } -func (p *TExprMap) SetExprMap(val map[string]*exprs.TExpr) { - p.ExprMap = val + +func (p *TTransmitDataParams) IsSetPacketSeq() bool { + return p.PacketSeq != nil } -var fieldIDToName_TExprMap = map[int16]string{ - 1: "expr_map", +func (p *TTransmitDataParams) IsSetSenderId() bool { + return p.SenderId != nil } -func (p *TExprMap) Read(iprot thrift.TProtocol) (err error) { +func (p *TTransmitDataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetExprMap bool = false + var issetProtocolVersion bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -14382,22 +20570,75 @@ func (p *TExprMap) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I32 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetExprMap = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I32 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14406,7 +20647,7 @@ func (p *TExprMap) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetExprMap { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } @@ -14416,7 +20657,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExprMap[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransmitDataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14425,38 +20666,95 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExprMap[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTransmitDataParams[fieldId])) } -func (p *TExprMap) ReadField1(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { +func (p *TTransmitDataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field PaloInternalServiceVersion + if v, err := iprot.ReadI32(); err != nil { return err + } else { + _field = PaloInternalServiceVersion(v) } - p.ExprMap = make(map[string]*exprs.TExpr, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - _val := exprs.NewTExpr() - if err := _val.Read(iprot); err != nil { - return err - } + p.ProtocolVersion = _field + return nil +} +func (p *TTransmitDataParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.DestFragmentInstanceId = _field + return nil +} +func (p *TTransmitDataParams) ReadField4(iprot thrift.TProtocol) error { - p.ExprMap[_key] = _val + var _field *types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - if err := iprot.ReadMapEnd(); err != nil { + p.DestNodeId = _field + return nil +} +func (p *TTransmitDataParams) ReadField5(iprot thrift.TProtocol) error { + _field := data.NewTRowBatch() + if err := _field.Read(iprot); err != nil { return err } + p.RowBatch = _field return nil } +func (p *TTransmitDataParams) ReadField6(iprot thrift.TProtocol) error { -func (p *TExprMap) Write(oprot thrift.TProtocol) (err error) { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Eos = _field + return nil +} +func (p *TTransmitDataParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.BeNumber = _field + return nil +} +func (p *TTransmitDataParams) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.PacketSeq = _field + return nil +} +func (p *TTransmitDataParams) ReadField9(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SenderId = _field + return nil +} + +func (p *TTransmitDataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TExprMap"); err != nil { + if err = oprot.WriteStructBegin("TTransmitDataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14464,7 +20762,34 @@ func (p *TExprMap) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14473,180 +20798,378 @@ func (p *TExprMap) Write(oprot thrift.TProtocol) (err error) { goto WriteStructEndError } return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDestFragmentInstanceId() { + if err = oprot.WriteFieldBegin("dest_fragment_instance_id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.DestFragmentInstanceId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDestNodeId() { + if err = oprot.WriteFieldBegin("dest_node_id", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.DestNodeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRowBatch() { + if err = oprot.WriteFieldBegin("row_batch", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.RowBatch.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetEos() { + if err = oprot.WriteFieldBegin("eos", thrift.BOOL, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Eos); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetBeNumber() { + if err = oprot.WriteFieldBegin("be_number", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BeNumber); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TTransmitDataParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetPacketSeq() { + if err = oprot.WriteFieldBegin("packet_seq", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.PacketSeq); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TExprMap) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("expr_map", thrift.MAP, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRUCT, len(p.ExprMap)); err != nil { - return err - } - for k, v := range p.ExprMap { - - if err := oprot.WriteString(k); err != nil { - return err +func (p *TTransmitDataParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetSenderId() { + if err = oprot.WriteFieldBegin("sender_id", thrift.I32, 9); err != nil { + goto WriteFieldBeginError } - - if err := v.Write(oprot); err != nil { + if err := oprot.WriteI32(*p.SenderId); err != nil { return err } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TExprMap) String() string { +func (p *TTransmitDataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TExprMap(%+v)", *p) + return fmt.Sprintf("TTransmitDataParams(%+v)", *p) + } -func (p *TExprMap) DeepEqual(ano *TExprMap) bool { +func (p *TTransmitDataParams) DeepEqual(ano *TTransmitDataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ExprMap) { + if !p.Field1DeepEqual(ano.ProtocolVersion) { + return false + } + if !p.Field2DeepEqual(ano.DestFragmentInstanceId) { + return false + } + if !p.Field4DeepEqual(ano.DestNodeId) { + return false + } + if !p.Field5DeepEqual(ano.RowBatch) { + return false + } + if !p.Field6DeepEqual(ano.Eos) { + return false + } + if !p.Field7DeepEqual(ano.BeNumber) { + return false + } + if !p.Field8DeepEqual(ano.PacketSeq) { + return false + } + if !p.Field9DeepEqual(ano.SenderId) { return false } return true } -func (p *TExprMap) Field1DeepEqual(src map[string]*exprs.TExpr) bool { +func (p *TTransmitDataParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { - if len(p.ExprMap) != len(src) { + if p.ProtocolVersion != src { return false } - for k, v := range p.ExprMap { - _src := src[k] - if !v.DeepEqual(_src) { - return false - } + return true +} +func (p *TTransmitDataParams) Field2DeepEqual(src *types.TUniqueId) bool { + + if !p.DestFragmentInstanceId.DeepEqual(src) { + return false } return true } +func (p *TTransmitDataParams) Field4DeepEqual(src *types.TPlanNodeId) bool { -type TFoldConstantParams struct { - ExprMap map[string]map[string]*exprs.TExpr `thrift:"expr_map,1,required" frugal:"1,required,map>" json:"expr_map"` - QueryGlobals *TQueryGlobals `thrift:"query_globals,2,required" frugal:"2,required,TQueryGlobals" json:"query_globals"` - VecExec *bool `thrift:"vec_exec,3,optional" frugal:"3,optional,bool" json:"vec_exec,omitempty"` - QueryOptions *TQueryOptions `thrift:"query_options,4,optional" frugal:"4,optional,TQueryOptions" json:"query_options,omitempty"` - QueryId *types.TUniqueId `thrift:"query_id,5,optional" frugal:"5,optional,types.TUniqueId" json:"query_id,omitempty"` + if p.DestNodeId == src { + return true + } else if p.DestNodeId == nil || src == nil { + return false + } + if *p.DestNodeId != *src { + return false + } + return true } +func (p *TTransmitDataParams) Field5DeepEqual(src *data.TRowBatch) bool { -func NewTFoldConstantParams() *TFoldConstantParams { - return &TFoldConstantParams{} + if !p.RowBatch.DeepEqual(src) { + return false + } + return true } +func (p *TTransmitDataParams) Field6DeepEqual(src *bool) bool { -func (p *TFoldConstantParams) InitDefault() { - *p = TFoldConstantParams{} + if p.Eos == src { + return true + } else if p.Eos == nil || src == nil { + return false + } + if *p.Eos != *src { + return false + } + return true } +func (p *TTransmitDataParams) Field7DeepEqual(src *int32) bool { -func (p *TFoldConstantParams) GetExprMap() (v map[string]map[string]*exprs.TExpr) { - return p.ExprMap + if p.BeNumber == src { + return true + } else if p.BeNumber == nil || src == nil { + return false + } + if *p.BeNumber != *src { + return false + } + return true } +func (p *TTransmitDataParams) Field8DeepEqual(src *int64) bool { -var TFoldConstantParams_QueryGlobals_DEFAULT *TQueryGlobals + if p.PacketSeq == src { + return true + } else if p.PacketSeq == nil || src == nil { + return false + } + if *p.PacketSeq != *src { + return false + } + return true +} +func (p *TTransmitDataParams) Field9DeepEqual(src *int32) bool { -func (p *TFoldConstantParams) GetQueryGlobals() (v *TQueryGlobals) { - if !p.IsSetQueryGlobals() { - return TFoldConstantParams_QueryGlobals_DEFAULT + if p.SenderId == src { + return true + } else if p.SenderId == nil || src == nil { + return false } - return p.QueryGlobals + if *p.SenderId != *src { + return false + } + return true } -var TFoldConstantParams_VecExec_DEFAULT bool +type TTransmitDataResult_ struct { + Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` + PacketSeq *int64 `thrift:"packet_seq,2,optional" frugal:"2,optional,i64" json:"packet_seq,omitempty"` + DestFragmentInstanceId *types.TUniqueId `thrift:"dest_fragment_instance_id,3,optional" frugal:"3,optional,types.TUniqueId" json:"dest_fragment_instance_id,omitempty"` + DestNodeId *types.TPlanNodeId `thrift:"dest_node_id,4,optional" frugal:"4,optional,i32" json:"dest_node_id,omitempty"` +} -func (p *TFoldConstantParams) GetVecExec() (v bool) { - if !p.IsSetVecExec() { - return TFoldConstantParams_VecExec_DEFAULT +func NewTTransmitDataResult_() *TTransmitDataResult_ { + return &TTransmitDataResult_{} +} + +func (p *TTransmitDataResult_) InitDefault() { +} + +var TTransmitDataResult__Status_DEFAULT *status.TStatus + +func (p *TTransmitDataResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TTransmitDataResult__Status_DEFAULT } - return *p.VecExec + return p.Status } -var TFoldConstantParams_QueryOptions_DEFAULT *TQueryOptions +var TTransmitDataResult__PacketSeq_DEFAULT int64 -func (p *TFoldConstantParams) GetQueryOptions() (v *TQueryOptions) { - if !p.IsSetQueryOptions() { - return TFoldConstantParams_QueryOptions_DEFAULT +func (p *TTransmitDataResult_) GetPacketSeq() (v int64) { + if !p.IsSetPacketSeq() { + return TTransmitDataResult__PacketSeq_DEFAULT } - return p.QueryOptions + return *p.PacketSeq } -var TFoldConstantParams_QueryId_DEFAULT *types.TUniqueId +var TTransmitDataResult__DestFragmentInstanceId_DEFAULT *types.TUniqueId -func (p *TFoldConstantParams) GetQueryId() (v *types.TUniqueId) { - if !p.IsSetQueryId() { - return TFoldConstantParams_QueryId_DEFAULT +func (p *TTransmitDataResult_) GetDestFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetDestFragmentInstanceId() { + return TTransmitDataResult__DestFragmentInstanceId_DEFAULT } - return p.QueryId + return p.DestFragmentInstanceId } -func (p *TFoldConstantParams) SetExprMap(val map[string]map[string]*exprs.TExpr) { - p.ExprMap = val + +var TTransmitDataResult__DestNodeId_DEFAULT types.TPlanNodeId + +func (p *TTransmitDataResult_) GetDestNodeId() (v types.TPlanNodeId) { + if !p.IsSetDestNodeId() { + return TTransmitDataResult__DestNodeId_DEFAULT + } + return *p.DestNodeId } -func (p *TFoldConstantParams) SetQueryGlobals(val *TQueryGlobals) { - p.QueryGlobals = val +func (p *TTransmitDataResult_) SetStatus(val *status.TStatus) { + p.Status = val } -func (p *TFoldConstantParams) SetVecExec(val *bool) { - p.VecExec = val +func (p *TTransmitDataResult_) SetPacketSeq(val *int64) { + p.PacketSeq = val } -func (p *TFoldConstantParams) SetQueryOptions(val *TQueryOptions) { - p.QueryOptions = val +func (p *TTransmitDataResult_) SetDestFragmentInstanceId(val *types.TUniqueId) { + p.DestFragmentInstanceId = val } -func (p *TFoldConstantParams) SetQueryId(val *types.TUniqueId) { - p.QueryId = val +func (p *TTransmitDataResult_) SetDestNodeId(val *types.TPlanNodeId) { + p.DestNodeId = val } -var fieldIDToName_TFoldConstantParams = map[int16]string{ - 1: "expr_map", - 2: "query_globals", - 3: "vec_exec", - 4: "query_options", - 5: "query_id", +var fieldIDToName_TTransmitDataResult_ = map[int16]string{ + 1: "status", + 2: "packet_seq", + 3: "dest_fragment_instance_id", + 4: "dest_node_id", } -func (p *TFoldConstantParams) IsSetQueryGlobals() bool { - return p.QueryGlobals != nil +func (p *TTransmitDataResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TFoldConstantParams) IsSetVecExec() bool { - return p.VecExec != nil +func (p *TTransmitDataResult_) IsSetPacketSeq() bool { + return p.PacketSeq != nil } -func (p *TFoldConstantParams) IsSetQueryOptions() bool { - return p.QueryOptions != nil +func (p *TTransmitDataResult_) IsSetDestFragmentInstanceId() bool { + return p.DestFragmentInstanceId != nil } -func (p *TFoldConstantParams) IsSetQueryId() bool { - return p.QueryId != nil +func (p *TTransmitDataResult_) IsSetDestNodeId() bool { + return p.DestNodeId != nil } -func (p *TFoldConstantParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TTransmitDataResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetExprMap bool = false - var issetQueryGlobals bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -14663,63 +21186,42 @@ func (p *TFoldConstantParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetExprMap = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetQueryGlobals = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14728,22 +21230,13 @@ func (p *TFoldConstantParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetExprMap { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetQueryGlobals { - fieldId = 2 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFoldConstantParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransmitDataResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14751,91 +21244,50 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFoldConstantParams[fieldId])) -} - -func (p *TFoldConstantParams) ReadField1(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.ExprMap = make(map[string]map[string]*exprs.TExpr, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - _val := make(map[string]*exprs.TExpr, size) - for i := 0; i < size; i++ { - var _key1 string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key1 = v - } - _val1 := exprs.NewTExpr() - if err := _val1.Read(iprot); err != nil { - return err - } - - _val[_key1] = _val1 - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - - p.ExprMap[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil } -func (p *TFoldConstantParams) ReadField2(iprot thrift.TProtocol) error { - p.QueryGlobals = NewTQueryGlobals() - if err := p.QueryGlobals.Read(iprot); err != nil { +func (p *TTransmitDataResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } +func (p *TTransmitDataResult_) ReadField2(iprot thrift.TProtocol) error { -func (p *TFoldConstantParams) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.VecExec = &v + _field = &v } + p.PacketSeq = _field return nil } - -func (p *TFoldConstantParams) ReadField4(iprot thrift.TProtocol) error { - p.QueryOptions = NewTQueryOptions() - if err := p.QueryOptions.Read(iprot); err != nil { +func (p *TTransmitDataResult_) ReadField3(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.DestFragmentInstanceId = _field return nil } +func (p *TTransmitDataResult_) ReadField4(iprot thrift.TProtocol) error { -func (p *TFoldConstantParams) ReadField5(iprot thrift.TProtocol) error { - p.QueryId = types.NewTUniqueId() - if err := p.QueryId.Read(iprot); err != nil { + var _field *types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { return err + } else { + _field = &v } + p.DestNodeId = _field return nil } -func (p *TFoldConstantParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TTransmitDataResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFoldConstantParams"); err != nil { + if err = oprot.WriteStructBegin("TTransmitDataResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14855,11 +21307,6 @@ func (p *TFoldConstantParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14878,42 +21325,18 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFoldConstantParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("expr_map", thrift.MAP, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.MAP, len(p.ExprMap)); err != nil { - return err - } - for k, v := range p.ExprMap { - - if err := oprot.WriteString(k); err != nil { - return err +func (p *TTransmitDataResult_) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRUCT, len(v)); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } - for k, v := range v { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } return nil WriteFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) @@ -14921,29 +21344,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFoldConstantParams) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryGlobals.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TFoldConstantParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetVecExec() { - if err = oprot.WriteFieldBegin("vec_exec", thrift.BOOL, 3); err != nil { +func (p *TTransmitDataResult_) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPacketSeq() { + if err = oprot.WriteFieldBegin("packet_seq", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(*p.VecExec); err != nil { + if err := oprot.WriteI64(*p.PacketSeq); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14952,17 +21358,17 @@ func (p *TFoldConstantParams) writeField3(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFoldConstantParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryOptions() { - if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 4); err != nil { +func (p *TTransmitDataResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDestFragmentInstanceId() { + if err = oprot.WriteFieldBegin("dest_fragment_instance_id", thrift.STRUCT, 3); err != nil { goto WriteFieldBeginError } - if err := p.QueryOptions.Write(oprot); err != nil { + if err := p.DestFragmentInstanceId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14971,17 +21377,17 @@ func (p *TFoldConstantParams) writeField4(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TFoldConstantParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryId() { - if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 5); err != nil { +func (p *TTransmitDataResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDestNodeId() { + if err = oprot.WriteFieldBegin("dest_node_id", thrift.I32, 4); err != nil { goto WriteFieldBeginError } - if err := p.QueryId.Write(oprot); err != nil { + if err := oprot.WriteI32(*p.DestNodeId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14990,351 +21396,154 @@ func (p *TFoldConstantParams) writeField5(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TFoldConstantParams) String() string { +func (p *TTransmitDataResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TFoldConstantParams(%+v)", *p) -} - -func (p *TFoldConstantParams) DeepEqual(ano *TFoldConstantParams) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.ExprMap) { - return false - } - if !p.Field2DeepEqual(ano.QueryGlobals) { - return false - } - if !p.Field3DeepEqual(ano.VecExec) { - return false - } - if !p.Field4DeepEqual(ano.QueryOptions) { - return false - } - if !p.Field5DeepEqual(ano.QueryId) { - return false - } - return true -} - -func (p *TFoldConstantParams) Field1DeepEqual(src map[string]map[string]*exprs.TExpr) bool { - - if len(p.ExprMap) != len(src) { - return false - } - for k, v := range p.ExprMap { - _src := src[k] - if len(v) != len(_src) { - return false - } - for k, v := range v { - _src1 := _src[k] - if !v.DeepEqual(_src1) { - return false - } - } - } - return true -} -func (p *TFoldConstantParams) Field2DeepEqual(src *TQueryGlobals) bool { - - if !p.QueryGlobals.DeepEqual(src) { - return false - } - return true + return fmt.Sprintf("TTransmitDataResult_(%+v)", *p) + } -func (p *TFoldConstantParams) Field3DeepEqual(src *bool) bool { - if p.VecExec == src { +func (p *TTransmitDataResult_) DeepEqual(ano *TTransmitDataResult_) bool { + if p == ano { return true - } else if p.VecExec == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.VecExec != *src { + if !p.Field1DeepEqual(ano.Status) { return false } - return true -} -func (p *TFoldConstantParams) Field4DeepEqual(src *TQueryOptions) bool { - - if !p.QueryOptions.DeepEqual(src) { + if !p.Field2DeepEqual(ano.PacketSeq) { return false } - return true -} -func (p *TFoldConstantParams) Field5DeepEqual(src *types.TUniqueId) bool { - - if !p.QueryId.DeepEqual(src) { + if !p.Field3DeepEqual(ano.DestFragmentInstanceId) { return false } - return true -} - -type TTransmitDataParams struct { - ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` - DestFragmentInstanceId *types.TUniqueId `thrift:"dest_fragment_instance_id,2,optional" frugal:"2,optional,types.TUniqueId" json:"dest_fragment_instance_id,omitempty"` - DestNodeId *types.TPlanNodeId `thrift:"dest_node_id,4,optional" frugal:"4,optional,i32" json:"dest_node_id,omitempty"` - RowBatch *data.TRowBatch `thrift:"row_batch,5,optional" frugal:"5,optional,data.TRowBatch" json:"row_batch,omitempty"` - Eos *bool `thrift:"eos,6,optional" frugal:"6,optional,bool" json:"eos,omitempty"` - BeNumber *int32 `thrift:"be_number,7,optional" frugal:"7,optional,i32" json:"be_number,omitempty"` - PacketSeq *int64 `thrift:"packet_seq,8,optional" frugal:"8,optional,i64" json:"packet_seq,omitempty"` - SenderId *int32 `thrift:"sender_id,9,optional" frugal:"9,optional,i32" json:"sender_id,omitempty"` -} - -func NewTTransmitDataParams() *TTransmitDataParams { - return &TTransmitDataParams{} -} - -func (p *TTransmitDataParams) InitDefault() { - *p = TTransmitDataParams{} -} - -func (p *TTransmitDataParams) GetProtocolVersion() (v PaloInternalServiceVersion) { - return p.ProtocolVersion -} - -var TTransmitDataParams_DestFragmentInstanceId_DEFAULT *types.TUniqueId - -func (p *TTransmitDataParams) GetDestFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetDestFragmentInstanceId() { - return TTransmitDataParams_DestFragmentInstanceId_DEFAULT + if !p.Field4DeepEqual(ano.DestNodeId) { + return false } - return p.DestFragmentInstanceId + return true } -var TTransmitDataParams_DestNodeId_DEFAULT types.TPlanNodeId +func (p *TTransmitDataResult_) Field1DeepEqual(src *status.TStatus) bool { -func (p *TTransmitDataParams) GetDestNodeId() (v types.TPlanNodeId) { - if !p.IsSetDestNodeId() { - return TTransmitDataParams_DestNodeId_DEFAULT + if !p.Status.DeepEqual(src) { + return false } - return *p.DestNodeId + return true } +func (p *TTransmitDataResult_) Field2DeepEqual(src *int64) bool { -var TTransmitDataParams_RowBatch_DEFAULT *data.TRowBatch - -func (p *TTransmitDataParams) GetRowBatch() (v *data.TRowBatch) { - if !p.IsSetRowBatch() { - return TTransmitDataParams_RowBatch_DEFAULT + if p.PacketSeq == src { + return true + } else if p.PacketSeq == nil || src == nil { + return false } - return p.RowBatch -} - -var TTransmitDataParams_Eos_DEFAULT bool - -func (p *TTransmitDataParams) GetEos() (v bool) { - if !p.IsSetEos() { - return TTransmitDataParams_Eos_DEFAULT + if *p.PacketSeq != *src { + return false } - return *p.Eos + return true } +func (p *TTransmitDataResult_) Field3DeepEqual(src *types.TUniqueId) bool { -var TTransmitDataParams_BeNumber_DEFAULT int32 - -func (p *TTransmitDataParams) GetBeNumber() (v int32) { - if !p.IsSetBeNumber() { - return TTransmitDataParams_BeNumber_DEFAULT + if !p.DestFragmentInstanceId.DeepEqual(src) { + return false } - return *p.BeNumber + return true } +func (p *TTransmitDataResult_) Field4DeepEqual(src *types.TPlanNodeId) bool { -var TTransmitDataParams_PacketSeq_DEFAULT int64 - -func (p *TTransmitDataParams) GetPacketSeq() (v int64) { - if !p.IsSetPacketSeq() { - return TTransmitDataParams_PacketSeq_DEFAULT + if p.DestNodeId == src { + return true + } else if p.DestNodeId == nil || src == nil { + return false } - return *p.PacketSeq -} - -var TTransmitDataParams_SenderId_DEFAULT int32 - -func (p *TTransmitDataParams) GetSenderId() (v int32) { - if !p.IsSetSenderId() { - return TTransmitDataParams_SenderId_DEFAULT + if *p.DestNodeId != *src { + return false } - return *p.SenderId -} -func (p *TTransmitDataParams) SetProtocolVersion(val PaloInternalServiceVersion) { - p.ProtocolVersion = val -} -func (p *TTransmitDataParams) SetDestFragmentInstanceId(val *types.TUniqueId) { - p.DestFragmentInstanceId = val -} -func (p *TTransmitDataParams) SetDestNodeId(val *types.TPlanNodeId) { - p.DestNodeId = val -} -func (p *TTransmitDataParams) SetRowBatch(val *data.TRowBatch) { - p.RowBatch = val -} -func (p *TTransmitDataParams) SetEos(val *bool) { - p.Eos = val -} -func (p *TTransmitDataParams) SetBeNumber(val *int32) { - p.BeNumber = val -} -func (p *TTransmitDataParams) SetPacketSeq(val *int64) { - p.PacketSeq = val -} -func (p *TTransmitDataParams) SetSenderId(val *int32) { - p.SenderId = val -} - -var fieldIDToName_TTransmitDataParams = map[int16]string{ - 1: "protocol_version", - 2: "dest_fragment_instance_id", - 4: "dest_node_id", - 5: "row_batch", - 6: "eos", - 7: "be_number", - 8: "packet_seq", - 9: "sender_id", -} - -func (p *TTransmitDataParams) IsSetDestFragmentInstanceId() bool { - return p.DestFragmentInstanceId != nil + return true } -func (p *TTransmitDataParams) IsSetDestNodeId() bool { - return p.DestNodeId != nil +type TTabletWithPartition struct { + PartitionId int64 `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"` + TabletId int64 `thrift:"tablet_id,2,required" frugal:"2,required,i64" json:"tablet_id"` } -func (p *TTransmitDataParams) IsSetRowBatch() bool { - return p.RowBatch != nil +func NewTTabletWithPartition() *TTabletWithPartition { + return &TTabletWithPartition{} } -func (p *TTransmitDataParams) IsSetEos() bool { - return p.Eos != nil +func (p *TTabletWithPartition) InitDefault() { } -func (p *TTransmitDataParams) IsSetBeNumber() bool { - return p.BeNumber != nil +func (p *TTabletWithPartition) GetPartitionId() (v int64) { + return p.PartitionId } -func (p *TTransmitDataParams) IsSetPacketSeq() bool { - return p.PacketSeq != nil +func (p *TTabletWithPartition) GetTabletId() (v int64) { + return p.TabletId } - -func (p *TTransmitDataParams) IsSetSenderId() bool { - return p.SenderId != nil +func (p *TTabletWithPartition) SetPartitionId(val int64) { + p.PartitionId = val } - -func (p *TTransmitDataParams) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetProtocolVersion bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: +func (p *TTabletWithPartition) SetTabletId(val int64) { + p.TabletId = val +} + +var fieldIDToName_TTabletWithPartition = map[int16]string{ + 1: "partition_id", + 2: "tablet_id", +} + +func (p *TTabletWithPartition) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + var issetPartitionId bool = false + var issetTabletId bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: if fieldTypeId == thrift.I64 { - if err = p.ReadField8(iprot); err != nil { + if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPartitionId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 9: - if fieldTypeId == thrift.I32 { - if err = p.ReadField9(iprot); err != nil { + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetTabletId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15343,17 +21552,22 @@ func (p *TTransmitDataParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetProtocolVersion { + if !issetPartitionId { fieldId = 1 goto RequiredFieldNotSetError } + + if !issetTabletId { + fieldId = 2 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransmitDataParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWithPartition[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15362,82 +21576,35 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTransmitDataParams[fieldId])) -} - -func (p *TTransmitDataParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ProtocolVersion = PaloInternalServiceVersion(v) - } - return nil -} - -func (p *TTransmitDataParams) ReadField2(iprot thrift.TProtocol) error { - p.DestFragmentInstanceId = types.NewTUniqueId() - if err := p.DestFragmentInstanceId.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TTransmitDataParams) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.DestNodeId = &v - } - return nil -} - -func (p *TTransmitDataParams) ReadField5(iprot thrift.TProtocol) error { - p.RowBatch = data.NewTRowBatch() - if err := p.RowBatch.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TTransmitDataParams) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.Eos = &v - } - return nil + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWithPartition[fieldId])) } -func (p *TTransmitDataParams) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.BeNumber = &v - } - return nil -} +func (p *TTabletWithPartition) ReadField1(iprot thrift.TProtocol) error { -func (p *TTransmitDataParams) ReadField8(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PacketSeq = &v + _field = v } + p.PartitionId = _field return nil } +func (p *TTabletWithPartition) ReadField2(iprot thrift.TProtocol) error { -func (p *TTransmitDataParams) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.SenderId = &v + _field = v } + p.TabletId = _field return nil } -func (p *TTransmitDataParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWithPartition) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTransmitDataParams"); err != nil { + if err = oprot.WriteStructBegin("TTabletWithPartition"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15449,31 +21616,6 @@ func (p *TTransmitDataParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15492,11 +21634,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTransmitDataParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { +func (p *TTabletWithPartition) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + if err := oprot.WriteI64(p.PartitionId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15509,351 +21651,156 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTransmitDataParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDestFragmentInstanceId() { - if err = oprot.WriteFieldBegin("dest_fragment_instance_id", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.DestFragmentInstanceId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TTransmitDataParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDestNodeId() { - if err = oprot.WriteFieldBegin("dest_node_id", thrift.I32, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.DestNodeId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TTransmitDataParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetRowBatch() { - if err = oprot.WriteFieldBegin("row_batch", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.RowBatch.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TTransmitDataParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetEos() { - if err = oprot.WriteFieldBegin("eos", thrift.BOOL, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.Eos); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TTransmitDataParams) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetBeNumber() { - if err = oprot.WriteFieldBegin("be_number", thrift.I32, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.BeNumber); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TTransmitDataParams) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetPacketSeq() { - if err = oprot.WriteFieldBegin("packet_seq", thrift.I64, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.PacketSeq); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TTransmitDataParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetSenderId() { - if err = oprot.WriteFieldBegin("sender_id", thrift.I32, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.SenderId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTabletWithPartition) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TabletId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTransmitDataParams) String() string { +func (p *TTabletWithPartition) String() string { if p == nil { return "" } - return fmt.Sprintf("TTransmitDataParams(%+v)", *p) + return fmt.Sprintf("TTabletWithPartition(%+v)", *p) + } -func (p *TTransmitDataParams) DeepEqual(ano *TTransmitDataParams) bool { +func (p *TTabletWithPartition) DeepEqual(ano *TTabletWithPartition) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ProtocolVersion) { - return false - } - if !p.Field2DeepEqual(ano.DestFragmentInstanceId) { - return false - } - if !p.Field4DeepEqual(ano.DestNodeId) { - return false - } - if !p.Field5DeepEqual(ano.RowBatch) { - return false - } - if !p.Field6DeepEqual(ano.Eos) { - return false - } - if !p.Field7DeepEqual(ano.BeNumber) { - return false - } - if !p.Field8DeepEqual(ano.PacketSeq) { + if !p.Field1DeepEqual(ano.PartitionId) { return false } - if !p.Field9DeepEqual(ano.SenderId) { + if !p.Field2DeepEqual(ano.TabletId) { return false } return true } -func (p *TTransmitDataParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { - - if p.ProtocolVersion != src { - return false - } - return true -} -func (p *TTransmitDataParams) Field2DeepEqual(src *types.TUniqueId) bool { +func (p *TTabletWithPartition) Field1DeepEqual(src int64) bool { - if !p.DestFragmentInstanceId.DeepEqual(src) { + if p.PartitionId != src { return false } return true } -func (p *TTransmitDataParams) Field4DeepEqual(src *types.TPlanNodeId) bool { +func (p *TTabletWithPartition) Field2DeepEqual(src int64) bool { - if p.DestNodeId == src { - return true - } else if p.DestNodeId == nil || src == nil { - return false - } - if *p.DestNodeId != *src { + if p.TabletId != src { return false } return true } -func (p *TTransmitDataParams) Field5DeepEqual(src *data.TRowBatch) bool { - if !p.RowBatch.DeepEqual(src) { - return false - } - return true +type TTabletWriterOpenParams struct { + Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` + IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` + TxnId int64 `thrift:"txn_id,3,required" frugal:"3,required,i64" json:"txn_id"` + Schema *descriptors.TOlapTableSchemaParam `thrift:"schema,4,required" frugal:"4,required,descriptors.TOlapTableSchemaParam" json:"schema"` + Tablets []*TTabletWithPartition `thrift:"tablets,5,required" frugal:"5,required,list" json:"tablets"` + NumSenders int32 `thrift:"num_senders,6,required" frugal:"6,required,i32" json:"num_senders"` } -func (p *TTransmitDataParams) Field6DeepEqual(src *bool) bool { - if p.Eos == src { - return true - } else if p.Eos == nil || src == nil { - return false - } - if *p.Eos != *src { - return false - } - return true +func NewTTabletWriterOpenParams() *TTabletWriterOpenParams { + return &TTabletWriterOpenParams{} } -func (p *TTransmitDataParams) Field7DeepEqual(src *int32) bool { - if p.BeNumber == src { - return true - } else if p.BeNumber == nil || src == nil { - return false - } - if *p.BeNumber != *src { - return false - } - return true +func (p *TTabletWriterOpenParams) InitDefault() { } -func (p *TTransmitDataParams) Field8DeepEqual(src *int64) bool { - if p.PacketSeq == src { - return true - } else if p.PacketSeq == nil || src == nil { - return false - } - if *p.PacketSeq != *src { - return false - } - return true -} -func (p *TTransmitDataParams) Field9DeepEqual(src *int32) bool { +var TTabletWriterOpenParams_Id_DEFAULT *types.TUniqueId - if p.SenderId == src { - return true - } else if p.SenderId == nil || src == nil { - return false - } - if *p.SenderId != *src { - return false +func (p *TTabletWriterOpenParams) GetId() (v *types.TUniqueId) { + if !p.IsSetId() { + return TTabletWriterOpenParams_Id_DEFAULT } - return true -} - -type TTransmitDataResult_ struct { - Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` - PacketSeq *int64 `thrift:"packet_seq,2,optional" frugal:"2,optional,i64" json:"packet_seq,omitempty"` - DestFragmentInstanceId *types.TUniqueId `thrift:"dest_fragment_instance_id,3,optional" frugal:"3,optional,types.TUniqueId" json:"dest_fragment_instance_id,omitempty"` - DestNodeId *types.TPlanNodeId `thrift:"dest_node_id,4,optional" frugal:"4,optional,i32" json:"dest_node_id,omitempty"` -} - -func NewTTransmitDataResult_() *TTransmitDataResult_ { - return &TTransmitDataResult_{} + return p.Id } -func (p *TTransmitDataResult_) InitDefault() { - *p = TTransmitDataResult_{} +func (p *TTabletWriterOpenParams) GetIndexId() (v int64) { + return p.IndexId } -var TTransmitDataResult__Status_DEFAULT *status.TStatus - -func (p *TTransmitDataResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TTransmitDataResult__Status_DEFAULT - } - return p.Status +func (p *TTabletWriterOpenParams) GetTxnId() (v int64) { + return p.TxnId } -var TTransmitDataResult__PacketSeq_DEFAULT int64 +var TTabletWriterOpenParams_Schema_DEFAULT *descriptors.TOlapTableSchemaParam -func (p *TTransmitDataResult_) GetPacketSeq() (v int64) { - if !p.IsSetPacketSeq() { - return TTransmitDataResult__PacketSeq_DEFAULT +func (p *TTabletWriterOpenParams) GetSchema() (v *descriptors.TOlapTableSchemaParam) { + if !p.IsSetSchema() { + return TTabletWriterOpenParams_Schema_DEFAULT } - return *p.PacketSeq + return p.Schema } -var TTransmitDataResult__DestFragmentInstanceId_DEFAULT *types.TUniqueId - -func (p *TTransmitDataResult_) GetDestFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetDestFragmentInstanceId() { - return TTransmitDataResult__DestFragmentInstanceId_DEFAULT - } - return p.DestFragmentInstanceId +func (p *TTabletWriterOpenParams) GetTablets() (v []*TTabletWithPartition) { + return p.Tablets } -var TTransmitDataResult__DestNodeId_DEFAULT types.TPlanNodeId - -func (p *TTransmitDataResult_) GetDestNodeId() (v types.TPlanNodeId) { - if !p.IsSetDestNodeId() { - return TTransmitDataResult__DestNodeId_DEFAULT - } - return *p.DestNodeId +func (p *TTabletWriterOpenParams) GetNumSenders() (v int32) { + return p.NumSenders } -func (p *TTransmitDataResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TTabletWriterOpenParams) SetId(val *types.TUniqueId) { + p.Id = val } -func (p *TTransmitDataResult_) SetPacketSeq(val *int64) { - p.PacketSeq = val +func (p *TTabletWriterOpenParams) SetIndexId(val int64) { + p.IndexId = val } -func (p *TTransmitDataResult_) SetDestFragmentInstanceId(val *types.TUniqueId) { - p.DestFragmentInstanceId = val +func (p *TTabletWriterOpenParams) SetTxnId(val int64) { + p.TxnId = val } -func (p *TTransmitDataResult_) SetDestNodeId(val *types.TPlanNodeId) { - p.DestNodeId = val +func (p *TTabletWriterOpenParams) SetSchema(val *descriptors.TOlapTableSchemaParam) { + p.Schema = val } - -var fieldIDToName_TTransmitDataResult_ = map[int16]string{ - 1: "status", - 2: "packet_seq", - 3: "dest_fragment_instance_id", - 4: "dest_node_id", +func (p *TTabletWriterOpenParams) SetTablets(val []*TTabletWithPartition) { + p.Tablets = val } - -func (p *TTransmitDataResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TTabletWriterOpenParams) SetNumSenders(val int32) { + p.NumSenders = val } -func (p *TTransmitDataResult_) IsSetPacketSeq() bool { - return p.PacketSeq != nil +var fieldIDToName_TTabletWriterOpenParams = map[int16]string{ + 1: "id", + 2: "index_id", + 3: "txn_id", + 4: "schema", + 5: "tablets", + 6: "num_senders", } -func (p *TTransmitDataResult_) IsSetDestFragmentInstanceId() bool { - return p.DestFragmentInstanceId != nil +func (p *TTabletWriterOpenParams) IsSetId() bool { + return p.Id != nil } -func (p *TTransmitDataResult_) IsSetDestNodeId() bool { - return p.DestNodeId != nil +func (p *TTabletWriterOpenParams) IsSetSchema() bool { + return p.Schema != nil } -func (p *TTransmitDataResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterOpenParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetId bool = false + var issetIndexId bool = false + var issetTxnId bool = false + var issetSchema bool = false + var issetTablets bool = false + var issetNumSenders bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -15874,47 +21821,60 @@ func (p *TTransmitDataResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetIndexId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetTxnId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetSchema = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.LIST { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + issetTablets = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 4: + case 6: if fieldTypeId == thrift.I32 { - if err = p.ReadField4(iprot); err != nil { + if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetNumSenders = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15923,13 +21883,42 @@ func (p *TTransmitDataResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } + if !issetId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetIndexId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTxnId { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetSchema { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetTablets { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetNumSenders { + fieldId = 6 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransmitDataResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterOpenParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15937,45 +21926,86 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterOpenParams[fieldId])) } -func (p *TTransmitDataResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TTabletWriterOpenParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.Id = _field return nil } +func (p *TTabletWriterOpenParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TTransmitDataResult_) ReadField2(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PacketSeq = &v + _field = v } + p.IndexId = _field return nil } +func (p *TTabletWriterOpenParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TTransmitDataResult_) ReadField3(iprot thrift.TProtocol) error { - p.DestFragmentInstanceId = types.NewTUniqueId() - if err := p.DestFragmentInstanceId.Read(iprot); err != nil { + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TxnId = _field + return nil +} +func (p *TTabletWriterOpenParams) ReadField4(iprot thrift.TProtocol) error { + _field := descriptors.NewTOlapTableSchemaParam() + if err := _field.Read(iprot); err != nil { return err } + p.Schema = _field return nil } +func (p *TTabletWriterOpenParams) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTabletWithPartition, 0, size) + values := make([]TTabletWithPartition, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TTransmitDataResult_) ReadField4(iprot thrift.TProtocol) error { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tablets = _field + return nil +} +func (p *TTabletWriterOpenParams) ReadField6(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DestNodeId = &v + _field = v } + p.NumSenders = _field return nil } -func (p *TTransmitDataResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterOpenParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTransmitDataResult"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterOpenParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15995,7 +22025,14 @@ func (p *TTransmitDataResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16014,17 +22051,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTransmitDataResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError - } - if err := p.Status.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTabletWriterOpenParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Id.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -16033,17 +22068,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTransmitDataResult_) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetPacketSeq() { - if err = oprot.WriteFieldBegin("packet_seq", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.PacketSeq); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTabletWriterOpenParams) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.IndexId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -16052,17 +22085,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTransmitDataResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDestFragmentInstanceId() { - if err = oprot.WriteFieldBegin("dest_fragment_instance_id", thrift.STRUCT, 3); err != nil { - goto WriteFieldBeginError - } - if err := p.DestFragmentInstanceId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTabletWriterOpenParams) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TxnId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -16071,17 +22102,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTransmitDataResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDestNodeId() { - if err = oprot.WriteFieldBegin("dest_node_id", thrift.I32, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.DestNodeId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTabletWriterOpenParams) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("schema", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.Schema.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -16090,111 +22119,168 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TTransmitDataResult_) String() string { +func (p *TTabletWriterOpenParams) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { + return err + } + for _, v := range p.Tablets { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TTabletWriterOpenParams) writeField6(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("num_senders", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.NumSenders); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TTabletWriterOpenParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TTransmitDataResult_(%+v)", *p) + return fmt.Sprintf("TTabletWriterOpenParams(%+v)", *p) + } -func (p *TTransmitDataResult_) DeepEqual(ano *TTransmitDataResult_) bool { +func (p *TTabletWriterOpenParams) DeepEqual(ano *TTabletWriterOpenParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.PacketSeq) { + if !p.Field2DeepEqual(ano.IndexId) { return false } - if !p.Field3DeepEqual(ano.DestFragmentInstanceId) { + if !p.Field3DeepEqual(ano.TxnId) { return false } - if !p.Field4DeepEqual(ano.DestNodeId) { + if !p.Field4DeepEqual(ano.Schema) { + return false + } + if !p.Field5DeepEqual(ano.Tablets) { + return false + } + if !p.Field6DeepEqual(ano.NumSenders) { return false } return true } -func (p *TTransmitDataResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TTabletWriterOpenParams) Field1DeepEqual(src *types.TUniqueId) bool { - if !p.Status.DeepEqual(src) { + if !p.Id.DeepEqual(src) { return false } return true } -func (p *TTransmitDataResult_) Field2DeepEqual(src *int64) bool { +func (p *TTabletWriterOpenParams) Field2DeepEqual(src int64) bool { - if p.PacketSeq == src { - return true - } else if p.PacketSeq == nil || src == nil { + if p.IndexId != src { return false } - if *p.PacketSeq != *src { + return true +} +func (p *TTabletWriterOpenParams) Field3DeepEqual(src int64) bool { + + if p.TxnId != src { return false } return true } -func (p *TTransmitDataResult_) Field3DeepEqual(src *types.TUniqueId) bool { +func (p *TTabletWriterOpenParams) Field4DeepEqual(src *descriptors.TOlapTableSchemaParam) bool { - if !p.DestFragmentInstanceId.DeepEqual(src) { + if !p.Schema.DeepEqual(src) { return false } return true } -func (p *TTransmitDataResult_) Field4DeepEqual(src *types.TPlanNodeId) bool { +func (p *TTabletWriterOpenParams) Field5DeepEqual(src []*TTabletWithPartition) bool { - if p.DestNodeId == src { - return true - } else if p.DestNodeId == nil || src == nil { + if len(p.Tablets) != len(src) { return false } - if *p.DestNodeId != *src { - return false + for i, v := range p.Tablets { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } return true } +func (p *TTabletWriterOpenParams) Field6DeepEqual(src int32) bool { -type TTabletWithPartition struct { - PartitionId int64 `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"` - TabletId int64 `thrift:"tablet_id,2,required" frugal:"2,required,i64" json:"tablet_id"` + if p.NumSenders != src { + return false + } + return true } -func NewTTabletWithPartition() *TTabletWithPartition { - return &TTabletWithPartition{} +type TTabletWriterOpenResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` } -func (p *TTabletWithPartition) InitDefault() { - *p = TTabletWithPartition{} +func NewTTabletWriterOpenResult_() *TTabletWriterOpenResult_ { + return &TTabletWriterOpenResult_{} } -func (p *TTabletWithPartition) GetPartitionId() (v int64) { - return p.PartitionId +func (p *TTabletWriterOpenResult_) InitDefault() { } -func (p *TTabletWithPartition) GetTabletId() (v int64) { - return p.TabletId +var TTabletWriterOpenResult__Status_DEFAULT *status.TStatus + +func (p *TTabletWriterOpenResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TTabletWriterOpenResult__Status_DEFAULT + } + return p.Status } -func (p *TTabletWithPartition) SetPartitionId(val int64) { - p.PartitionId = val +func (p *TTabletWriterOpenResult_) SetStatus(val *status.TStatus) { + p.Status = val } -func (p *TTabletWithPartition) SetTabletId(val int64) { - p.TabletId = val + +var fieldIDToName_TTabletWriterOpenResult_ = map[int16]string{ + 1: "status", } -var fieldIDToName_TTabletWithPartition = map[int16]string{ - 1: "partition_id", - 2: "tablet_id", +func (p *TTabletWriterOpenResult_) IsSetStatus() bool { + return p.Status != nil } -func (p *TTabletWithPartition) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterOpenResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetPartitionId bool = false - var issetTabletId bool = false + var issetStatus bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -16211,33 +22297,19 @@ func (p *TTabletWithPartition) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetPartitionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -16246,22 +22318,17 @@ func (p *TTabletWithPartition) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetPartitionId { + if !issetStatus { fieldId = 1 goto RequiredFieldNotSetError } - - if !issetTabletId { - fieldId = 2 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWithPartition[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterOpenResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16270,30 +22337,21 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWithPartition[fieldId])) -} - -func (p *TTabletWithPartition) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.PartitionId = v - } - return nil + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterOpenResult_[fieldId])) } -func (p *TTabletWithPartition) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TTabletWriterOpenResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err - } else { - p.TabletId = v } + p.Status = _field return nil } -func (p *TTabletWithPartition) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterOpenResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWithPartition"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterOpenResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16301,11 +22359,6 @@ func (p *TTabletWithPartition) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16324,11 +22377,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWithPartition) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil { +func (p *TTabletWriterOpenResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.PartitionId); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -16341,156 +22394,129 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWithPartition) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.TabletId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) -} - -func (p *TTabletWithPartition) String() string { +func (p *TTabletWriterOpenResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWithPartition(%+v)", *p) + return fmt.Sprintf("TTabletWriterOpenResult_(%+v)", *p) + } -func (p *TTabletWithPartition) DeepEqual(ano *TTabletWithPartition) bool { +func (p *TTabletWriterOpenResult_) DeepEqual(ano *TTabletWriterOpenResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.PartitionId) { - return false - } - if !p.Field2DeepEqual(ano.TabletId) { + if !p.Field1DeepEqual(ano.Status) { return false } return true } -func (p *TTabletWithPartition) Field1DeepEqual(src int64) bool { - - if p.PartitionId != src { - return false - } - return true -} -func (p *TTabletWithPartition) Field2DeepEqual(src int64) bool { +func (p *TTabletWriterOpenResult_) Field1DeepEqual(src *status.TStatus) bool { - if p.TabletId != src { + if !p.Status.DeepEqual(src) { return false } return true } -type TTabletWriterOpenParams struct { - Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` - IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` - TxnId int64 `thrift:"txn_id,3,required" frugal:"3,required,i64" json:"txn_id"` - Schema *descriptors.TOlapTableSchemaParam `thrift:"schema,4,required" frugal:"4,required,descriptors.TOlapTableSchemaParam" json:"schema"` - Tablets []*TTabletWithPartition `thrift:"tablets,5,required" frugal:"5,required,list" json:"tablets"` - NumSenders int32 `thrift:"num_senders,6,required" frugal:"6,required,i32" json:"num_senders"` +type TTabletWriterAddBatchParams struct { + Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` + IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` + PacketSeq int64 `thrift:"packet_seq,3,required" frugal:"3,required,i64" json:"packet_seq"` + TabletIds []types.TTabletId `thrift:"tablet_ids,4,required" frugal:"4,required,list" json:"tablet_ids"` + RowBatch *data.TRowBatch `thrift:"row_batch,5,required" frugal:"5,required,data.TRowBatch" json:"row_batch"` + SenderNo int32 `thrift:"sender_no,6,required" frugal:"6,required,i32" json:"sender_no"` } -func NewTTabletWriterOpenParams() *TTabletWriterOpenParams { - return &TTabletWriterOpenParams{} +func NewTTabletWriterAddBatchParams() *TTabletWriterAddBatchParams { + return &TTabletWriterAddBatchParams{} } -func (p *TTabletWriterOpenParams) InitDefault() { - *p = TTabletWriterOpenParams{} +func (p *TTabletWriterAddBatchParams) InitDefault() { } -var TTabletWriterOpenParams_Id_DEFAULT *types.TUniqueId +var TTabletWriterAddBatchParams_Id_DEFAULT *types.TUniqueId -func (p *TTabletWriterOpenParams) GetId() (v *types.TUniqueId) { +func (p *TTabletWriterAddBatchParams) GetId() (v *types.TUniqueId) { if !p.IsSetId() { - return TTabletWriterOpenParams_Id_DEFAULT + return TTabletWriterAddBatchParams_Id_DEFAULT } return p.Id } -func (p *TTabletWriterOpenParams) GetIndexId() (v int64) { +func (p *TTabletWriterAddBatchParams) GetIndexId() (v int64) { return p.IndexId } -func (p *TTabletWriterOpenParams) GetTxnId() (v int64) { - return p.TxnId +func (p *TTabletWriterAddBatchParams) GetPacketSeq() (v int64) { + return p.PacketSeq } -var TTabletWriterOpenParams_Schema_DEFAULT *descriptors.TOlapTableSchemaParam - -func (p *TTabletWriterOpenParams) GetSchema() (v *descriptors.TOlapTableSchemaParam) { - if !p.IsSetSchema() { - return TTabletWriterOpenParams_Schema_DEFAULT - } - return p.Schema +func (p *TTabletWriterAddBatchParams) GetTabletIds() (v []types.TTabletId) { + return p.TabletIds } -func (p *TTabletWriterOpenParams) GetTablets() (v []*TTabletWithPartition) { - return p.Tablets +var TTabletWriterAddBatchParams_RowBatch_DEFAULT *data.TRowBatch + +func (p *TTabletWriterAddBatchParams) GetRowBatch() (v *data.TRowBatch) { + if !p.IsSetRowBatch() { + return TTabletWriterAddBatchParams_RowBatch_DEFAULT + } + return p.RowBatch } -func (p *TTabletWriterOpenParams) GetNumSenders() (v int32) { - return p.NumSenders +func (p *TTabletWriterAddBatchParams) GetSenderNo() (v int32) { + return p.SenderNo } -func (p *TTabletWriterOpenParams) SetId(val *types.TUniqueId) { +func (p *TTabletWriterAddBatchParams) SetId(val *types.TUniqueId) { p.Id = val } -func (p *TTabletWriterOpenParams) SetIndexId(val int64) { +func (p *TTabletWriterAddBatchParams) SetIndexId(val int64) { p.IndexId = val } -func (p *TTabletWriterOpenParams) SetTxnId(val int64) { - p.TxnId = val +func (p *TTabletWriterAddBatchParams) SetPacketSeq(val int64) { + p.PacketSeq = val } -func (p *TTabletWriterOpenParams) SetSchema(val *descriptors.TOlapTableSchemaParam) { - p.Schema = val +func (p *TTabletWriterAddBatchParams) SetTabletIds(val []types.TTabletId) { + p.TabletIds = val } -func (p *TTabletWriterOpenParams) SetTablets(val []*TTabletWithPartition) { - p.Tablets = val +func (p *TTabletWriterAddBatchParams) SetRowBatch(val *data.TRowBatch) { + p.RowBatch = val } -func (p *TTabletWriterOpenParams) SetNumSenders(val int32) { - p.NumSenders = val +func (p *TTabletWriterAddBatchParams) SetSenderNo(val int32) { + p.SenderNo = val } -var fieldIDToName_TTabletWriterOpenParams = map[int16]string{ +var fieldIDToName_TTabletWriterAddBatchParams = map[int16]string{ 1: "id", 2: "index_id", - 3: "txn_id", - 4: "schema", - 5: "tablets", - 6: "num_senders", + 3: "packet_seq", + 4: "tablet_ids", + 5: "row_batch", + 6: "sender_no", } -func (p *TTabletWriterOpenParams) IsSetId() bool { +func (p *TTabletWriterAddBatchParams) IsSetId() bool { return p.Id != nil } -func (p *TTabletWriterOpenParams) IsSetSchema() bool { - return p.Schema != nil +func (p *TTabletWriterAddBatchParams) IsSetRowBatch() bool { + return p.RowBatch != nil } -func (p *TTabletWriterOpenParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 var issetId bool = false var issetIndexId bool = false - var issetTxnId bool = false - var issetSchema bool = false - var issetTablets bool = false - var issetNumSenders bool = false + var issetPacketSeq bool = false + var issetTabletIds bool = false + var issetRowBatch bool = false + var issetSenderNo bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -16512,10 +22538,8 @@ func (p *TTabletWriterOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -16523,61 +22547,50 @@ func (p *TTabletWriterOpenParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndexId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetTxnId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPacketSeq = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetSchema = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetTabletIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - issetTablets = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetRowBatch = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - issetNumSenders = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetSenderNo = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -16596,22 +22609,22 @@ func (p *TTabletWriterOpenParams) Read(iprot thrift.TProtocol) (err error) { goto RequiredFieldNotSetError } - if !issetTxnId { + if !issetPacketSeq { fieldId = 3 goto RequiredFieldNotSetError } - if !issetSchema { + if !issetTabletIds { fieldId = 4 goto RequiredFieldNotSetError } - if !issetTablets { + if !issetRowBatch { fieldId = 5 goto RequiredFieldNotSetError } - if !issetNumSenders { + if !issetSenderNo { fieldId = 6 goto RequiredFieldNotSetError } @@ -16621,7 +22634,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterOpenParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterAddBatchParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16630,75 +22643,85 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterOpenParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterAddBatchParams[fieldId])) } -func (p *TTabletWriterOpenParams) ReadField1(iprot thrift.TProtocol) error { - p.Id = types.NewTUniqueId() - if err := p.Id.Read(iprot); err != nil { +func (p *TTabletWriterAddBatchParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.Id = _field return nil } +func (p *TTabletWriterAddBatchParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TTabletWriterOpenParams) ReadField2(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IndexId = v + _field = v } + p.IndexId = _field return nil } +func (p *TTabletWriterAddBatchParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TTabletWriterOpenParams) ReadField3(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TxnId = v - } - return nil -} - -func (p *TTabletWriterOpenParams) ReadField4(iprot thrift.TProtocol) error { - p.Schema = descriptors.NewTOlapTableSchemaParam() - if err := p.Schema.Read(iprot); err != nil { - return err + _field = v } + p.PacketSeq = _field return nil } - -func (p *TTabletWriterOpenParams) ReadField5(iprot thrift.TProtocol) error { +func (p *TTabletWriterAddBatchParams) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Tablets = make([]*TTabletWithPartition, 0, size) + _field := make([]types.TTabletId, 0, size) for i := 0; i < size; i++ { - _elem := NewTTabletWithPartition() - if err := _elem.Read(iprot); err != nil { + + var _elem types.TTabletId + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _elem = v } - p.Tablets = append(p.Tablets, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TabletIds = _field + return nil +} +func (p *TTabletWriterAddBatchParams) ReadField5(iprot thrift.TProtocol) error { + _field := data.NewTRowBatch() + if err := _field.Read(iprot); err != nil { + return err + } + p.RowBatch = _field return nil } +func (p *TTabletWriterAddBatchParams) ReadField6(iprot thrift.TProtocol) error { -func (p *TTabletWriterOpenParams) ReadField6(iprot thrift.TProtocol) error { + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumSenders = v + _field = v } + p.SenderNo = _field return nil } -func (p *TTabletWriterOpenParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterOpenParams"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterAddBatchParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16726,7 +22749,6 @@ func (p *TTabletWriterOpenParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16745,7 +22767,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterOpenParams) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchParams) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -16762,7 +22784,7 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterOpenParams) writeField2(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchParams) writeField2(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil { goto WriteFieldBeginError } @@ -16779,11 +22801,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTabletWriterOpenParams) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 3); err != nil { +func (p *TTabletWriterAddBatchParams) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("packet_seq", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.TxnId); err != nil { + if err := oprot.WriteI64(p.PacketSeq); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -16796,11 +22818,19 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTabletWriterOpenParams) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("schema", thrift.STRUCT, 4); err != nil { +func (p *TTabletWriterAddBatchParams) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 4); err != nil { goto WriteFieldBeginError } - if err := p.Schema.Write(oprot); err != nil { + if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { + return err + } + for _, v := range p.TabletIds { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -16813,19 +22843,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TTabletWriterOpenParams) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 5); err != nil { +func (p *TTabletWriterAddBatchParams) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("row_batch", thrift.STRUCT, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tablets)); err != nil { - return err - } - for _, v := range p.Tablets { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := p.RowBatch.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -16838,11 +22860,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TTabletWriterOpenParams) writeField6(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("num_senders", thrift.I32, 6); err != nil { +func (p *TTabletWriterAddBatchParams) writeField6(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("sender_no", thrift.I32, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(p.NumSenders); err != nil { + if err := oprot.WriteI32(p.SenderNo); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -16855,14 +22877,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TTabletWriterOpenParams) String() string { +func (p *TTabletWriterAddBatchParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterOpenParams(%+v)", *p) + return fmt.Sprintf("TTabletWriterAddBatchParams(%+v)", *p) + } -func (p *TTabletWriterOpenParams) DeepEqual(ano *TTabletWriterOpenParams) bool { +func (p *TTabletWriterAddBatchParams) DeepEqual(ano *TTabletWriterAddBatchParams) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16874,103 +22897,102 @@ func (p *TTabletWriterOpenParams) DeepEqual(ano *TTabletWriterOpenParams) bool { if !p.Field2DeepEqual(ano.IndexId) { return false } - if !p.Field3DeepEqual(ano.TxnId) { + if !p.Field3DeepEqual(ano.PacketSeq) { return false } - if !p.Field4DeepEqual(ano.Schema) { + if !p.Field4DeepEqual(ano.TabletIds) { return false } - if !p.Field5DeepEqual(ano.Tablets) { + if !p.Field5DeepEqual(ano.RowBatch) { return false } - if !p.Field6DeepEqual(ano.NumSenders) { + if !p.Field6DeepEqual(ano.SenderNo) { return false } return true } -func (p *TTabletWriterOpenParams) Field1DeepEqual(src *types.TUniqueId) bool { +func (p *TTabletWriterAddBatchParams) Field1DeepEqual(src *types.TUniqueId) bool { if !p.Id.DeepEqual(src) { return false } return true } -func (p *TTabletWriterOpenParams) Field2DeepEqual(src int64) bool { +func (p *TTabletWriterAddBatchParams) Field2DeepEqual(src int64) bool { if p.IndexId != src { return false } return true } -func (p *TTabletWriterOpenParams) Field3DeepEqual(src int64) bool { +func (p *TTabletWriterAddBatchParams) Field3DeepEqual(src int64) bool { - if p.TxnId != src { + if p.PacketSeq != src { return false } return true } -func (p *TTabletWriterOpenParams) Field4DeepEqual(src *descriptors.TOlapTableSchemaParam) bool { +func (p *TTabletWriterAddBatchParams) Field4DeepEqual(src []types.TTabletId) bool { - if !p.Schema.DeepEqual(src) { + if len(p.TabletIds) != len(src) { return false } + for i, v := range p.TabletIds { + _src := src[i] + if v != _src { + return false + } + } return true } -func (p *TTabletWriterOpenParams) Field5DeepEqual(src []*TTabletWithPartition) bool { +func (p *TTabletWriterAddBatchParams) Field5DeepEqual(src *data.TRowBatch) bool { - if len(p.Tablets) != len(src) { + if !p.RowBatch.DeepEqual(src) { return false } - for i, v := range p.Tablets { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TTabletWriterOpenParams) Field6DeepEqual(src int32) bool { +func (p *TTabletWriterAddBatchParams) Field6DeepEqual(src int32) bool { - if p.NumSenders != src { + if p.SenderNo != src { return false } return true } -type TTabletWriterOpenResult_ struct { +type TTabletWriterAddBatchResult_ struct { Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` } -func NewTTabletWriterOpenResult_() *TTabletWriterOpenResult_ { - return &TTabletWriterOpenResult_{} +func NewTTabletWriterAddBatchResult_() *TTabletWriterAddBatchResult_ { + return &TTabletWriterAddBatchResult_{} } -func (p *TTabletWriterOpenResult_) InitDefault() { - *p = TTabletWriterOpenResult_{} +func (p *TTabletWriterAddBatchResult_) InitDefault() { } -var TTabletWriterOpenResult__Status_DEFAULT *status.TStatus +var TTabletWriterAddBatchResult__Status_DEFAULT *status.TStatus -func (p *TTabletWriterOpenResult_) GetStatus() (v *status.TStatus) { +func (p *TTabletWriterAddBatchResult_) GetStatus() (v *status.TStatus) { if !p.IsSetStatus() { - return TTabletWriterOpenResult__Status_DEFAULT + return TTabletWriterAddBatchResult__Status_DEFAULT } return p.Status } -func (p *TTabletWriterOpenResult_) SetStatus(val *status.TStatus) { +func (p *TTabletWriterAddBatchResult_) SetStatus(val *status.TStatus) { p.Status = val } -var fieldIDToName_TTabletWriterOpenResult_ = map[int16]string{ +var fieldIDToName_TTabletWriterAddBatchResult_ = map[int16]string{ 1: "status", } -func (p *TTabletWriterOpenResult_) IsSetStatus() bool { +func (p *TTabletWriterAddBatchResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TTabletWriterOpenResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16996,17 +23018,14 @@ func (p *TTabletWriterOpenResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17025,7 +23044,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterOpenResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterAddBatchResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17034,20 +23053,21 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterOpenResult_[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterAddBatchResult_[fieldId])) } -func (p *TTabletWriterOpenResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TTabletWriterAddBatchResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } -func (p *TTabletWriterOpenResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterOpenResult"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterAddBatchResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17055,7 +23075,6 @@ func (p *TTabletWriterOpenResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17074,7 +23093,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterOpenResult_) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterAddBatchResult_) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -17091,14 +23110,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterOpenResult_) String() string { +func (p *TTabletWriterAddBatchResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterOpenResult_(%+v)", *p) + return fmt.Sprintf("TTabletWriterAddBatchResult_(%+v)", *p) + } -func (p *TTabletWriterOpenResult_) DeepEqual(ano *TTabletWriterOpenResult_) bool { +func (p *TTabletWriterAddBatchResult_) DeepEqual(ano *TTabletWriterAddBatchResult_) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17110,7 +23130,7 @@ func (p *TTabletWriterOpenResult_) DeepEqual(ano *TTabletWriterOpenResult_) bool return true } -func (p *TTabletWriterOpenResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TTabletWriterAddBatchResult_) Field1DeepEqual(src *status.TStatus) bool { if !p.Status.DeepEqual(src) { return false @@ -17118,101 +23138,61 @@ func (p *TTabletWriterOpenResult_) Field1DeepEqual(src *status.TStatus) bool { return true } -type TTabletWriterAddBatchParams struct { - Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` - IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` - PacketSeq int64 `thrift:"packet_seq,3,required" frugal:"3,required,i64" json:"packet_seq"` - TabletIds []types.TTabletId `thrift:"tablet_ids,4,required" frugal:"4,required,list" json:"tablet_ids"` - RowBatch *data.TRowBatch `thrift:"row_batch,5,required" frugal:"5,required,data.TRowBatch" json:"row_batch"` - SenderNo int32 `thrift:"sender_no,6,required" frugal:"6,required,i32" json:"sender_no"` -} - -func NewTTabletWriterAddBatchParams() *TTabletWriterAddBatchParams { - return &TTabletWriterAddBatchParams{} -} - -func (p *TTabletWriterAddBatchParams) InitDefault() { - *p = TTabletWriterAddBatchParams{} -} - -var TTabletWriterAddBatchParams_Id_DEFAULT *types.TUniqueId - -func (p *TTabletWriterAddBatchParams) GetId() (v *types.TUniqueId) { - if !p.IsSetId() { - return TTabletWriterAddBatchParams_Id_DEFAULT - } - return p.Id -} - -func (p *TTabletWriterAddBatchParams) GetIndexId() (v int64) { - return p.IndexId +type TTabletWriterCloseParams struct { + Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` + IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` + SenderNo int32 `thrift:"sender_no,3,required" frugal:"3,required,i32" json:"sender_no"` } -func (p *TTabletWriterAddBatchParams) GetPacketSeq() (v int64) { - return p.PacketSeq +func NewTTabletWriterCloseParams() *TTabletWriterCloseParams { + return &TTabletWriterCloseParams{} } -func (p *TTabletWriterAddBatchParams) GetTabletIds() (v []types.TTabletId) { - return p.TabletIds +func (p *TTabletWriterCloseParams) InitDefault() { } -var TTabletWriterAddBatchParams_RowBatch_DEFAULT *data.TRowBatch +var TTabletWriterCloseParams_Id_DEFAULT *types.TUniqueId -func (p *TTabletWriterAddBatchParams) GetRowBatch() (v *data.TRowBatch) { - if !p.IsSetRowBatch() { - return TTabletWriterAddBatchParams_RowBatch_DEFAULT +func (p *TTabletWriterCloseParams) GetId() (v *types.TUniqueId) { + if !p.IsSetId() { + return TTabletWriterCloseParams_Id_DEFAULT } - return p.RowBatch + return p.Id } -func (p *TTabletWriterAddBatchParams) GetSenderNo() (v int32) { +func (p *TTabletWriterCloseParams) GetIndexId() (v int64) { + return p.IndexId +} + +func (p *TTabletWriterCloseParams) GetSenderNo() (v int32) { return p.SenderNo } -func (p *TTabletWriterAddBatchParams) SetId(val *types.TUniqueId) { +func (p *TTabletWriterCloseParams) SetId(val *types.TUniqueId) { p.Id = val } -func (p *TTabletWriterAddBatchParams) SetIndexId(val int64) { +func (p *TTabletWriterCloseParams) SetIndexId(val int64) { p.IndexId = val } -func (p *TTabletWriterAddBatchParams) SetPacketSeq(val int64) { - p.PacketSeq = val -} -func (p *TTabletWriterAddBatchParams) SetTabletIds(val []types.TTabletId) { - p.TabletIds = val -} -func (p *TTabletWriterAddBatchParams) SetRowBatch(val *data.TRowBatch) { - p.RowBatch = val -} -func (p *TTabletWriterAddBatchParams) SetSenderNo(val int32) { +func (p *TTabletWriterCloseParams) SetSenderNo(val int32) { p.SenderNo = val } -var fieldIDToName_TTabletWriterAddBatchParams = map[int16]string{ +var fieldIDToName_TTabletWriterCloseParams = map[int16]string{ 1: "id", 2: "index_id", - 3: "packet_seq", - 4: "tablet_ids", - 5: "row_batch", - 6: "sender_no", + 3: "sender_no", } -func (p *TTabletWriterAddBatchParams) IsSetId() bool { +func (p *TTabletWriterCloseParams) IsSetId() bool { return p.Id != nil } -func (p *TTabletWriterAddBatchParams) IsSetRowBatch() bool { - return p.RowBatch != nil -} - -func (p *TTabletWriterAddBatchParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 var issetId bool = false var issetIndexId bool = false - var issetPacketSeq bool = false - var issetTabletIds bool = false - var issetRowBatch bool = false var issetSenderNo bool = false if _, err = iprot.ReadStructBegin(); err != nil { @@ -17235,10 +23215,8 @@ func (p *TTabletWriterAddBatchParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -17246,61 +23224,23 @@ func (p *TTabletWriterAddBatchParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndexId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - issetPacketSeq = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - issetTabletIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - issetRowBatch = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: if fieldTypeId == thrift.I32 { - if err = p.ReadField6(iprot); err != nil { + if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } issetSenderNo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17319,23 +23259,8 @@ func (p *TTabletWriterAddBatchParams) Read(iprot thrift.TProtocol) (err error) { goto RequiredFieldNotSetError } - if !issetPacketSeq { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetTabletIds { - fieldId = 4 - goto RequiredFieldNotSetError - } - - if !issetRowBatch { - fieldId = 5 - goto RequiredFieldNotSetError - } - if !issetSenderNo { - fieldId = 6 + fieldId = 3 goto RequiredFieldNotSetError } return nil @@ -17344,7 +23269,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterAddBatchParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterCloseParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17353,77 +23278,43 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterAddBatchParams[fieldId])) -} - -func (p *TTabletWriterAddBatchParams) ReadField1(iprot thrift.TProtocol) error { - p.Id = types.NewTUniqueId() - if err := p.Id.Read(iprot); err != nil { - return err - } - return nil + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterCloseParams[fieldId])) } -func (p *TTabletWriterAddBatchParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TTabletWriterCloseParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err - } else { - p.IndexId = v } + p.Id = _field return nil } +func (p *TTabletWriterCloseParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TTabletWriterAddBatchParams) ReadField3(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PacketSeq = v - } - return nil -} - -func (p *TTabletWriterAddBatchParams) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.TabletIds = make([]types.TTabletId, 0, size) - for i := 0; i < size; i++ { - var _elem types.TTabletId - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - _elem = v - } - - p.TabletIds = append(p.TabletIds, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TTabletWriterAddBatchParams) ReadField5(iprot thrift.TProtocol) error { - p.RowBatch = data.NewTRowBatch() - if err := p.RowBatch.Read(iprot); err != nil { - return err + _field = v } + p.IndexId = _field return nil } +func (p *TTabletWriterCloseParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TTabletWriterAddBatchParams) ReadField6(iprot thrift.TProtocol) error { + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SenderNo = v + _field = v } + p.SenderNo = _field return nil } -func (p *TTabletWriterAddBatchParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterAddBatchParams"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterCloseParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17439,19 +23330,6 @@ func (p *TTabletWriterAddBatchParams) Write(oprot thrift.TProtocol) (err error) fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17470,7 +23348,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterAddBatchParams) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseParams) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -17487,7 +23365,7 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterAddBatchParams) writeField2(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseParams) writeField2(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil { goto WriteFieldBeginError } @@ -17504,11 +23382,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTabletWriterAddBatchParams) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("packet_seq", thrift.I64, 3); err != nil { +func (p *TTabletWriterCloseParams) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("sender_no", thrift.I32, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.PacketSeq); err != nil { + if err := oprot.WriteI32(p.SenderNo); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -17521,73 +23399,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTabletWriterAddBatchParams) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil { - return err - } - for _, v := range p.TabletIds { - if err := oprot.WriteI64(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TTabletWriterAddBatchParams) writeField5(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("row_batch", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.RowBatch.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TTabletWriterAddBatchParams) writeField6(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("sender_no", thrift.I32, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.SenderNo); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TTabletWriterAddBatchParams) String() string { +func (p *TTabletWriterCloseParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterAddBatchParams(%+v)", *p) + return fmt.Sprintf("TTabletWriterCloseParams(%+v)", *p) + } -func (p *TTabletWriterAddBatchParams) DeepEqual(ano *TTabletWriterAddBatchParams) bool { +func (p *TTabletWriterCloseParams) DeepEqual(ano *TTabletWriterCloseParams) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17596,66 +23416,30 @@ func (p *TTabletWriterAddBatchParams) DeepEqual(ano *TTabletWriterAddBatchParams if !p.Field1DeepEqual(ano.Id) { return false } - if !p.Field2DeepEqual(ano.IndexId) { - return false - } - if !p.Field3DeepEqual(ano.PacketSeq) { - return false - } - if !p.Field4DeepEqual(ano.TabletIds) { - return false - } - if !p.Field5DeepEqual(ano.RowBatch) { - return false - } - if !p.Field6DeepEqual(ano.SenderNo) { - return false - } - return true -} - -func (p *TTabletWriterAddBatchParams) Field1DeepEqual(src *types.TUniqueId) bool { - - if !p.Id.DeepEqual(src) { + if !p.Field2DeepEqual(ano.IndexId) { return false } - return true -} -func (p *TTabletWriterAddBatchParams) Field2DeepEqual(src int64) bool { - - if p.IndexId != src { + if !p.Field3DeepEqual(ano.SenderNo) { return false } return true } -func (p *TTabletWriterAddBatchParams) Field3DeepEqual(src int64) bool { - if p.PacketSeq != src { - return false - } - return true -} -func (p *TTabletWriterAddBatchParams) Field4DeepEqual(src []types.TTabletId) bool { +func (p *TTabletWriterCloseParams) Field1DeepEqual(src *types.TUniqueId) bool { - if len(p.TabletIds) != len(src) { + if !p.Id.DeepEqual(src) { return false } - for i, v := range p.TabletIds { - _src := src[i] - if v != _src { - return false - } - } return true } -func (p *TTabletWriterAddBatchParams) Field5DeepEqual(src *data.TRowBatch) bool { +func (p *TTabletWriterCloseParams) Field2DeepEqual(src int64) bool { - if !p.RowBatch.DeepEqual(src) { + if p.IndexId != src { return false } return true } -func (p *TTabletWriterAddBatchParams) Field6DeepEqual(src int32) bool { +func (p *TTabletWriterCloseParams) Field3DeepEqual(src int32) bool { if p.SenderNo != src { return false @@ -17663,39 +23447,38 @@ func (p *TTabletWriterAddBatchParams) Field6DeepEqual(src int32) bool { return true } -type TTabletWriterAddBatchResult_ struct { +type TTabletWriterCloseResult_ struct { Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` } -func NewTTabletWriterAddBatchResult_() *TTabletWriterAddBatchResult_ { - return &TTabletWriterAddBatchResult_{} +func NewTTabletWriterCloseResult_() *TTabletWriterCloseResult_ { + return &TTabletWriterCloseResult_{} } -func (p *TTabletWriterAddBatchResult_) InitDefault() { - *p = TTabletWriterAddBatchResult_{} +func (p *TTabletWriterCloseResult_) InitDefault() { } -var TTabletWriterAddBatchResult__Status_DEFAULT *status.TStatus +var TTabletWriterCloseResult__Status_DEFAULT *status.TStatus -func (p *TTabletWriterAddBatchResult_) GetStatus() (v *status.TStatus) { +func (p *TTabletWriterCloseResult_) GetStatus() (v *status.TStatus) { if !p.IsSetStatus() { - return TTabletWriterAddBatchResult__Status_DEFAULT + return TTabletWriterCloseResult__Status_DEFAULT } return p.Status } -func (p *TTabletWriterAddBatchResult_) SetStatus(val *status.TStatus) { +func (p *TTabletWriterCloseResult_) SetStatus(val *status.TStatus) { p.Status = val } -var fieldIDToName_TTabletWriterAddBatchResult_ = map[int16]string{ +var fieldIDToName_TTabletWriterCloseResult_ = map[int16]string{ 1: "status", } -func (p *TTabletWriterAddBatchResult_) IsSetStatus() bool { +func (p *TTabletWriterCloseResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TTabletWriterAddBatchResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17721,17 +23504,14 @@ func (p *TTabletWriterAddBatchResult_) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17750,7 +23530,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterAddBatchResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterCloseResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17759,20 +23539,21 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterAddBatchResult_[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterCloseResult_[fieldId])) } -func (p *TTabletWriterAddBatchResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TTabletWriterCloseResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } -func (p *TTabletWriterAddBatchResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterAddBatchResult"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterCloseResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17780,7 +23561,6 @@ func (p *TTabletWriterAddBatchResult_) Write(oprot thrift.TProtocol) (err error) fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17799,7 +23579,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterAddBatchResult_) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCloseResult_) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -17816,14 +23596,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterAddBatchResult_) String() string { +func (p *TTabletWriterCloseResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterAddBatchResult_(%+v)", *p) + return fmt.Sprintf("TTabletWriterCloseResult_(%+v)", *p) + } -func (p *TTabletWriterAddBatchResult_) DeepEqual(ano *TTabletWriterAddBatchResult_) bool { +func (p *TTabletWriterCloseResult_) DeepEqual(ano *TTabletWriterCloseResult_) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17835,7 +23616,7 @@ func (p *TTabletWriterAddBatchResult_) DeepEqual(ano *TTabletWriterAddBatchResul return true } -func (p *TTabletWriterAddBatchResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TTabletWriterCloseResult_) Field1DeepEqual(src *status.TStatus) bool { if !p.Status.DeepEqual(src) { return false @@ -17843,57 +23624,56 @@ func (p *TTabletWriterAddBatchResult_) Field1DeepEqual(src *status.TStatus) bool return true } -type TTabletWriterCloseParams struct { +type TTabletWriterCancelParams struct { Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` SenderNo int32 `thrift:"sender_no,3,required" frugal:"3,required,i32" json:"sender_no"` } -func NewTTabletWriterCloseParams() *TTabletWriterCloseParams { - return &TTabletWriterCloseParams{} +func NewTTabletWriterCancelParams() *TTabletWriterCancelParams { + return &TTabletWriterCancelParams{} } -func (p *TTabletWriterCloseParams) InitDefault() { - *p = TTabletWriterCloseParams{} +func (p *TTabletWriterCancelParams) InitDefault() { } -var TTabletWriterCloseParams_Id_DEFAULT *types.TUniqueId +var TTabletWriterCancelParams_Id_DEFAULT *types.TUniqueId -func (p *TTabletWriterCloseParams) GetId() (v *types.TUniqueId) { +func (p *TTabletWriterCancelParams) GetId() (v *types.TUniqueId) { if !p.IsSetId() { - return TTabletWriterCloseParams_Id_DEFAULT + return TTabletWriterCancelParams_Id_DEFAULT } return p.Id } -func (p *TTabletWriterCloseParams) GetIndexId() (v int64) { +func (p *TTabletWriterCancelParams) GetIndexId() (v int64) { return p.IndexId } -func (p *TTabletWriterCloseParams) GetSenderNo() (v int32) { +func (p *TTabletWriterCancelParams) GetSenderNo() (v int32) { return p.SenderNo } -func (p *TTabletWriterCloseParams) SetId(val *types.TUniqueId) { +func (p *TTabletWriterCancelParams) SetId(val *types.TUniqueId) { p.Id = val } -func (p *TTabletWriterCloseParams) SetIndexId(val int64) { +func (p *TTabletWriterCancelParams) SetIndexId(val int64) { p.IndexId = val } -func (p *TTabletWriterCloseParams) SetSenderNo(val int32) { +func (p *TTabletWriterCancelParams) SetSenderNo(val int32) { p.SenderNo = val } -var fieldIDToName_TTabletWriterCloseParams = map[int16]string{ +var fieldIDToName_TTabletWriterCancelParams = map[int16]string{ 1: "id", 2: "index_id", 3: "sender_no", } -func (p *TTabletWriterCloseParams) IsSetId() bool { +func (p *TTabletWriterCancelParams) IsSetId() bool { return p.Id != nil } -func (p *TTabletWriterCloseParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCancelParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17921,10 +23701,8 @@ func (p *TTabletWriterCloseParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -17932,10 +23710,8 @@ func (p *TTabletWriterCloseParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndexId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -17943,17 +23719,14 @@ func (p *TTabletWriterCloseParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSenderNo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17982,7 +23755,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterCloseParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterCancelParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17991,38 +23764,43 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterCloseParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterCancelParams[fieldId])) } -func (p *TTabletWriterCloseParams) ReadField1(iprot thrift.TProtocol) error { - p.Id = types.NewTUniqueId() - if err := p.Id.Read(iprot); err != nil { +func (p *TTabletWriterCancelParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.Id = _field return nil } +func (p *TTabletWriterCancelParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TTabletWriterCloseParams) ReadField2(iprot thrift.TProtocol) error { + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.IndexId = v + _field = v } + p.IndexId = _field return nil } +func (p *TTabletWriterCancelParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TTabletWriterCloseParams) ReadField3(iprot thrift.TProtocol) error { + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SenderNo = v + _field = v } + p.SenderNo = _field return nil } -func (p *TTabletWriterCloseParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCancelParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterCloseParams"); err != nil { + if err = oprot.WriteStructBegin("TTabletWriterCancelParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18038,7 +23816,6 @@ func (p *TTabletWriterCloseParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18057,7 +23834,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterCloseParams) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCancelParams) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -18074,7 +23851,7 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterCloseParams) writeField2(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCancelParams) writeField2(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil { goto WriteFieldBeginError } @@ -18091,7 +23868,7 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTabletWriterCloseParams) writeField3(oprot thrift.TProtocol) (err error) { +func (p *TTabletWriterCancelParams) writeField3(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("sender_no", thrift.I32, 3); err != nil { goto WriteFieldBeginError } @@ -18108,14 +23885,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTabletWriterCloseParams) String() string { +func (p *TTabletWriterCancelParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterCloseParams(%+v)", *p) + return fmt.Sprintf("TTabletWriterCancelParams(%+v)", *p) + } -func (p *TTabletWriterCloseParams) DeepEqual(ano *TTabletWriterCloseParams) bool { +func (p *TTabletWriterCancelParams) DeepEqual(ano *TTabletWriterCancelParams) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18133,21 +23911,21 @@ func (p *TTabletWriterCloseParams) DeepEqual(ano *TTabletWriterCloseParams) bool return true } -func (p *TTabletWriterCloseParams) Field1DeepEqual(src *types.TUniqueId) bool { +func (p *TTabletWriterCancelParams) Field1DeepEqual(src *types.TUniqueId) bool { if !p.Id.DeepEqual(src) { return false } return true } -func (p *TTabletWriterCloseParams) Field2DeepEqual(src int64) bool { +func (p *TTabletWriterCancelParams) Field2DeepEqual(src int64) bool { if p.IndexId != src { return false } return true } -func (p *TTabletWriterCloseParams) Field3DeepEqual(src int32) bool { +func (p *TTabletWriterCancelParams) Field3DeepEqual(src int32) bool { if p.SenderNo != src { return false @@ -18155,43 +23933,144 @@ func (p *TTabletWriterCloseParams) Field3DeepEqual(src int32) bool { return true } -type TTabletWriterCloseResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` +type TTabletWriterCancelResult_ struct { +} + +func NewTTabletWriterCancelResult_() *TTabletWriterCancelResult_ { + return &TTabletWriterCancelResult_{} +} + +func (p *TTabletWriterCancelResult_) InitDefault() { +} + +var fieldIDToName_TTabletWriterCancelResult_ = map[int16]string{} + +func (p *TTabletWriterCancelResult_) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTabletWriterCancelResult_) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TTabletWriterCancelResult"); err != nil { + goto WriteStructBeginError + } + if p != nil { + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTabletWriterCancelResult_) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTabletWriterCancelResult_(%+v)", *p) + +} + +func (p *TTabletWriterCancelResult_) DeepEqual(ano *TTabletWriterCancelResult_) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + return true +} + +type TFetchDataParams struct { + ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,2,required" frugal:"2,required,types.TUniqueId" json:"fragment_instance_id"` +} + +func NewTFetchDataParams() *TFetchDataParams { + return &TFetchDataParams{} } -func NewTTabletWriterCloseResult_() *TTabletWriterCloseResult_ { - return &TTabletWriterCloseResult_{} +func (p *TFetchDataParams) InitDefault() { } -func (p *TTabletWriterCloseResult_) InitDefault() { - *p = TTabletWriterCloseResult_{} +func (p *TFetchDataParams) GetProtocolVersion() (v PaloInternalServiceVersion) { + return p.ProtocolVersion } -var TTabletWriterCloseResult__Status_DEFAULT *status.TStatus +var TFetchDataParams_FragmentInstanceId_DEFAULT *types.TUniqueId -func (p *TTabletWriterCloseResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TTabletWriterCloseResult__Status_DEFAULT +func (p *TFetchDataParams) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TFetchDataParams_FragmentInstanceId_DEFAULT } - return p.Status + return p.FragmentInstanceId } -func (p *TTabletWriterCloseResult_) SetStatus(val *status.TStatus) { - p.Status = val +func (p *TFetchDataParams) SetProtocolVersion(val PaloInternalServiceVersion) { + p.ProtocolVersion = val +} +func (p *TFetchDataParams) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val } -var fieldIDToName_TTabletWriterCloseResult_ = map[int16]string{ - 1: "status", +var fieldIDToName_TFetchDataParams = map[int16]string{ + 1: "protocol_version", + 2: "fragment_instance_id", } -func (p *TTabletWriterCloseResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TFetchDataParams) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil } -func (p *TTabletWriterCloseResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TFetchDataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false + var issetProtocolVersion bool = false + var issetFragmentInstanceId bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -18208,22 +24087,28 @@ func (p *TTabletWriterCloseResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I32 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError } + issetFragmentInstanceId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18232,17 +24117,22 @@ func (p *TTabletWriterCloseResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetStatus { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } + + if !issetFragmentInstanceId { + fieldId = 2 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterCloseResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18251,20 +24141,32 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterCloseResult_[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataParams[fieldId])) } -func (p *TTabletWriterCloseResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { +func (p *TFetchDataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field PaloInternalServiceVersion + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = PaloInternalServiceVersion(v) + } + p.ProtocolVersion = _field + return nil +} +func (p *TFetchDataParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.FragmentInstanceId = _field return nil } -func (p *TTabletWriterCloseResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TFetchDataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterCloseResult"); err != nil { + if err = oprot.WriteStructBegin("TFetchDataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18272,7 +24174,10 @@ func (p *TTabletWriterCloseResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18291,11 +24196,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterCloseResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { +func (p *TFetchDataParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18308,90 +24213,135 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterCloseResult_) String() string { +func (p *TFetchDataParams) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.FragmentInstanceId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFetchDataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterCloseResult_(%+v)", *p) + return fmt.Sprintf("TFetchDataParams(%+v)", *p) + } -func (p *TTabletWriterCloseResult_) DeepEqual(ano *TTabletWriterCloseResult_) bool { +func (p *TFetchDataParams) DeepEqual(ano *TFetchDataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Status) { + if !p.Field1DeepEqual(ano.ProtocolVersion) { + return false + } + if !p.Field2DeepEqual(ano.FragmentInstanceId) { return false } return true } -func (p *TTabletWriterCloseResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TFetchDataParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { - if !p.Status.DeepEqual(src) { + if p.ProtocolVersion != src { return false } return true } +func (p *TFetchDataParams) Field2DeepEqual(src *types.TUniqueId) bool { -type TTabletWriterCancelParams struct { - Id *types.TUniqueId `thrift:"id,1,required" frugal:"1,required,types.TUniqueId" json:"id"` - IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"` - SenderNo int32 `thrift:"sender_no,3,required" frugal:"3,required,i32" json:"sender_no"` + if !p.FragmentInstanceId.DeepEqual(src) { + return false + } + return true } -func NewTTabletWriterCancelParams() *TTabletWriterCancelParams { - return &TTabletWriterCancelParams{} +type TFetchDataResult_ struct { + ResultBatch *data.TResultBatch `thrift:"result_batch,1,required" frugal:"1,required,data.TResultBatch" json:"result_batch"` + Eos bool `thrift:"eos,2,required" frugal:"2,required,bool" json:"eos"` + PacketNum int32 `thrift:"packet_num,3,required" frugal:"3,required,i32" json:"packet_num"` + Status *status.TStatus `thrift:"status,4,optional" frugal:"4,optional,status.TStatus" json:"status,omitempty"` } -func (p *TTabletWriterCancelParams) InitDefault() { - *p = TTabletWriterCancelParams{} +func NewTFetchDataResult_() *TFetchDataResult_ { + return &TFetchDataResult_{} } -var TTabletWriterCancelParams_Id_DEFAULT *types.TUniqueId +func (p *TFetchDataResult_) InitDefault() { +} -func (p *TTabletWriterCancelParams) GetId() (v *types.TUniqueId) { - if !p.IsSetId() { - return TTabletWriterCancelParams_Id_DEFAULT +var TFetchDataResult__ResultBatch_DEFAULT *data.TResultBatch + +func (p *TFetchDataResult_) GetResultBatch() (v *data.TResultBatch) { + if !p.IsSetResultBatch() { + return TFetchDataResult__ResultBatch_DEFAULT } - return p.Id + return p.ResultBatch } -func (p *TTabletWriterCancelParams) GetIndexId() (v int64) { - return p.IndexId +func (p *TFetchDataResult_) GetEos() (v bool) { + return p.Eos } -func (p *TTabletWriterCancelParams) GetSenderNo() (v int32) { - return p.SenderNo +func (p *TFetchDataResult_) GetPacketNum() (v int32) { + return p.PacketNum } -func (p *TTabletWriterCancelParams) SetId(val *types.TUniqueId) { - p.Id = val + +var TFetchDataResult__Status_DEFAULT *status.TStatus + +func (p *TFetchDataResult_) GetStatus() (v *status.TStatus) { + if !p.IsSetStatus() { + return TFetchDataResult__Status_DEFAULT + } + return p.Status } -func (p *TTabletWriterCancelParams) SetIndexId(val int64) { - p.IndexId = val +func (p *TFetchDataResult_) SetResultBatch(val *data.TResultBatch) { + p.ResultBatch = val } -func (p *TTabletWriterCancelParams) SetSenderNo(val int32) { - p.SenderNo = val +func (p *TFetchDataResult_) SetEos(val bool) { + p.Eos = val +} +func (p *TFetchDataResult_) SetPacketNum(val int32) { + p.PacketNum = val +} +func (p *TFetchDataResult_) SetStatus(val *status.TStatus) { + p.Status = val } -var fieldIDToName_TTabletWriterCancelParams = map[int16]string{ - 1: "id", - 2: "index_id", - 3: "sender_no", +var fieldIDToName_TFetchDataResult_ = map[int16]string{ + 1: "result_batch", + 2: "eos", + 3: "packet_num", + 4: "status", } -func (p *TTabletWriterCancelParams) IsSetId() bool { - return p.Id != nil +func (p *TFetchDataResult_) IsSetResultBatch() bool { + return p.ResultBatch != nil } -func (p *TTabletWriterCancelParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TFetchDataResult_) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TFetchDataResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetId bool = false - var issetIndexId bool = false - var issetSenderNo bool = false + var issetResultBatch bool = false + var issetEos bool = false + var issetPacketNum bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -18412,40 +24362,41 @@ func (p *TTabletWriterCancelParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetResultBatch = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetIndexId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetEos = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetSenderNo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetPacketNum = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18454,17 +24405,17 @@ func (p *TTabletWriterCancelParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetId { + if !issetResultBatch { fieldId = 1 goto RequiredFieldNotSetError } - if !issetIndexId { + if !issetEos { fieldId = 2 goto RequiredFieldNotSetError } - if !issetSenderNo { + if !issetPacketNum { fieldId = 3 goto RequiredFieldNotSetError } @@ -18474,7 +24425,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTabletWriterCancelParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18483,38 +24434,51 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTabletWriterCancelParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataResult_[fieldId])) } -func (p *TTabletWriterCancelParams) ReadField1(iprot thrift.TProtocol) error { - p.Id = types.NewTUniqueId() - if err := p.Id.Read(iprot); err != nil { +func (p *TFetchDataResult_) ReadField1(iprot thrift.TProtocol) error { + _field := data.NewTResultBatch() + if err := _field.Read(iprot); err != nil { return err } + p.ResultBatch = _field return nil } +func (p *TFetchDataResult_) ReadField2(iprot thrift.TProtocol) error { -func (p *TTabletWriterCancelParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IndexId = v + _field = v } + p.Eos = _field return nil } +func (p *TFetchDataResult_) ReadField3(iprot thrift.TProtocol) error { -func (p *TTabletWriterCancelParams) ReadField3(iprot thrift.TProtocol) error { + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err - } else { - p.SenderNo = v + } else { + _field = v + } + p.PacketNum = _field + return nil +} +func (p *TFetchDataResult_) ReadField4(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { + return err } + p.Status = _field return nil } -func (p *TTabletWriterCancelParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TFetchDataResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTabletWriterCancelParams"); err != nil { + if err = oprot.WriteStructBegin("TFetchDataResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18530,7 +24494,10 @@ func (p *TTabletWriterCancelParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18549,11 +24516,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTabletWriterCancelParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil { +func (p *TFetchDataResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("result_batch", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.Id.Write(oprot); err != nil { + if err := p.ResultBatch.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18566,11 +24533,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTabletWriterCancelParams) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil { +func (p *TFetchDataResult_) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("eos", thrift.BOOL, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.IndexId); err != nil { + if err := oprot.WriteBool(p.Eos); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18583,11 +24550,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTabletWriterCancelParams) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("sender_no", thrift.I32, 3); err != nil { +func (p *TFetchDataResult_) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("packet_num", thrift.I32, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(p.SenderNo); err != nil { + if err := oprot.WriteI32(p.PacketNum); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18600,194 +24567,190 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTabletWriterCancelParams) String() string { +func (p *TFetchDataResult_) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.Status.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TFetchDataResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TTabletWriterCancelParams(%+v)", *p) + return fmt.Sprintf("TFetchDataResult_(%+v)", *p) + } -func (p *TTabletWriterCancelParams) DeepEqual(ano *TTabletWriterCancelParams) bool { +func (p *TFetchDataResult_) DeepEqual(ano *TFetchDataResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Id) { + if !p.Field1DeepEqual(ano.ResultBatch) { return false } - if !p.Field2DeepEqual(ano.IndexId) { + if !p.Field2DeepEqual(ano.Eos) { return false } - if !p.Field3DeepEqual(ano.SenderNo) { + if !p.Field3DeepEqual(ano.PacketNum) { + return false + } + if !p.Field4DeepEqual(ano.Status) { return false } return true } -func (p *TTabletWriterCancelParams) Field1DeepEqual(src *types.TUniqueId) bool { +func (p *TFetchDataResult_) Field1DeepEqual(src *data.TResultBatch) bool { - if !p.Id.DeepEqual(src) { + if !p.ResultBatch.DeepEqual(src) { return false } return true } -func (p *TTabletWriterCancelParams) Field2DeepEqual(src int64) bool { +func (p *TFetchDataResult_) Field2DeepEqual(src bool) bool { - if p.IndexId != src { + if p.Eos != src { return false } return true } -func (p *TTabletWriterCancelParams) Field3DeepEqual(src int32) bool { +func (p *TFetchDataResult_) Field3DeepEqual(src int32) bool { - if p.SenderNo != src { + if p.PacketNum != src { return false } return true } +func (p *TFetchDataResult_) Field4DeepEqual(src *status.TStatus) bool { -type TTabletWriterCancelResult_ struct { -} - -func NewTTabletWriterCancelResult_() *TTabletWriterCancelResult_ { - return &TTabletWriterCancelResult_{} + if !p.Status.DeepEqual(src) { + return false + } + return true } -func (p *TTabletWriterCancelResult_) InitDefault() { - *p = TTabletWriterCancelResult_{} +type TCondition struct { + ColumnName string `thrift:"column_name,1,required" frugal:"1,required,string" json:"column_name"` + ConditionOp string `thrift:"condition_op,2,required" frugal:"2,required,string" json:"condition_op"` + ConditionValues []string `thrift:"condition_values,3,required" frugal:"3,required,list" json:"condition_values"` + ColumnUniqueId *int32 `thrift:"column_unique_id,4,optional" frugal:"4,optional,i32" json:"column_unique_id,omitempty"` + MarkedByRuntimeFilter bool `thrift:"marked_by_runtime_filter,5,optional" frugal:"5,optional,bool" json:"marked_by_runtime_filter,omitempty"` + CompoundType TCompoundType `thrift:"compound_type,1000,optional" frugal:"1000,optional,TCompoundType" json:"compound_type,omitempty"` } -var fieldIDToName_TTabletWriterCancelResult_ = map[int16]string{} - -func (p *TTabletWriterCancelResult_) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 +func NewTCondition() *TCondition { + return &TCondition{ - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError + MarkedByRuntimeFilter: false, + CompoundType: TCompoundType_UNKNOWN, } +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldTypeError - } +func (p *TCondition) InitDefault() { + p.MarkedByRuntimeFilter = false + p.CompoundType = TCompoundType_UNKNOWN +} - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } +func (p *TCondition) GetColumnName() (v string) { + return p.ColumnName +} - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -SkipFieldTypeError: - return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) +func (p *TCondition) GetConditionOp() (v string) { + return p.ConditionOp +} -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +func (p *TCondition) GetConditionValues() (v []string) { + return p.ConditionValues } -func (p *TTabletWriterCancelResult_) Write(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteStructBegin("TTabletWriterCancelResult"); err != nil { - goto WriteStructBeginError - } - if p != nil { +var TCondition_ColumnUniqueId_DEFAULT int32 +func (p *TCondition) GetColumnUniqueId() (v int32) { + if !p.IsSetColumnUniqueId() { + return TCondition_ColumnUniqueId_DEFAULT } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return *p.ColumnUniqueId } -func (p *TTabletWriterCancelResult_) String() string { - if p == nil { - return "" +var TCondition_MarkedByRuntimeFilter_DEFAULT bool = false + +func (p *TCondition) GetMarkedByRuntimeFilter() (v bool) { + if !p.IsSetMarkedByRuntimeFilter() { + return TCondition_MarkedByRuntimeFilter_DEFAULT } - return fmt.Sprintf("TTabletWriterCancelResult_(%+v)", *p) + return p.MarkedByRuntimeFilter } -func (p *TTabletWriterCancelResult_) DeepEqual(ano *TTabletWriterCancelResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false +var TCondition_CompoundType_DEFAULT TCompoundType = TCompoundType_UNKNOWN + +func (p *TCondition) GetCompoundType() (v TCompoundType) { + if !p.IsSetCompoundType() { + return TCondition_CompoundType_DEFAULT } - return true + return p.CompoundType } - -type TFetchDataParams struct { - ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` - FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,2,required" frugal:"2,required,types.TUniqueId" json:"fragment_instance_id"` +func (p *TCondition) SetColumnName(val string) { + p.ColumnName = val } - -func NewTFetchDataParams() *TFetchDataParams { - return &TFetchDataParams{} +func (p *TCondition) SetConditionOp(val string) { + p.ConditionOp = val } - -func (p *TFetchDataParams) InitDefault() { - *p = TFetchDataParams{} +func (p *TCondition) SetConditionValues(val []string) { + p.ConditionValues = val } - -func (p *TFetchDataParams) GetProtocolVersion() (v PaloInternalServiceVersion) { - return p.ProtocolVersion +func (p *TCondition) SetColumnUniqueId(val *int32) { + p.ColumnUniqueId = val } - -var TFetchDataParams_FragmentInstanceId_DEFAULT *types.TUniqueId - -func (p *TFetchDataParams) GetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetFragmentInstanceId() { - return TFetchDataParams_FragmentInstanceId_DEFAULT - } - return p.FragmentInstanceId +func (p *TCondition) SetMarkedByRuntimeFilter(val bool) { + p.MarkedByRuntimeFilter = val } -func (p *TFetchDataParams) SetProtocolVersion(val PaloInternalServiceVersion) { - p.ProtocolVersion = val +func (p *TCondition) SetCompoundType(val TCompoundType) { + p.CompoundType = val } -func (p *TFetchDataParams) SetFragmentInstanceId(val *types.TUniqueId) { - p.FragmentInstanceId = val + +var fieldIDToName_TCondition = map[int16]string{ + 1: "column_name", + 2: "condition_op", + 3: "condition_values", + 4: "column_unique_id", + 5: "marked_by_runtime_filter", + 1000: "compound_type", } -var fieldIDToName_TFetchDataParams = map[int16]string{ - 1: "protocol_version", - 2: "fragment_instance_id", +func (p *TCondition) IsSetColumnUniqueId() bool { + return p.ColumnUniqueId != nil } -func (p *TFetchDataParams) IsSetFragmentInstanceId() bool { - return p.FragmentInstanceId != nil +func (p *TCondition) IsSetMarkedByRuntimeFilter() bool { + return p.MarkedByRuntimeFilter != TCondition_MarkedByRuntimeFilter_DEFAULT } -func (p *TFetchDataParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TCondition) IsSetCompoundType() bool { + return p.CompoundType != TCondition_CompoundType_DEFAULT +} + +func (p *TCondition) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false - var issetFragmentInstanceId bool = false + var issetColumnName bool = false + var issetConditionOp bool = false + var issetConditionValues bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -18804,33 +24767,61 @@ func (p *TFetchDataParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetColumnName = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetConditionOp = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetConditionValues = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: if fieldTypeId == thrift.I32 { - if err = p.ReadField1(iprot); err != nil { + if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - issetFragmentInstanceId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18839,22 +24830,27 @@ func (p *TFetchDataParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetProtocolVersion { + if !issetColumnName { fieldId = 1 goto RequiredFieldNotSetError } - if !issetFragmentInstanceId { + if !issetConditionOp { fieldId = 2 goto RequiredFieldNotSetError } + + if !issetConditionValues { + fieldId = 3 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCondition[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18863,29 +24859,91 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCondition[fieldId])) } -func (p *TFetchDataParams) ReadField1(iprot thrift.TProtocol) error { +func (p *TCondition) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.ColumnName = _field + return nil +} +func (p *TCondition) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.ConditionOp = _field + return nil +} +func (p *TCondition) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ConditionValues = _field + return nil +} +func (p *TCondition) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ProtocolVersion = PaloInternalServiceVersion(v) + _field = &v } + p.ColumnUniqueId = _field return nil } +func (p *TCondition) ReadField5(iprot thrift.TProtocol) error { -func (p *TFetchDataParams) ReadField2(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { + var _field bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = v } + p.MarkedByRuntimeFilter = _field return nil } +func (p *TCondition) ReadField1000(iprot thrift.TProtocol) error { -func (p *TFetchDataParams) Write(oprot thrift.TProtocol) (err error) { + var _field TCompoundType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = TCompoundType(v) + } + p.CompoundType = _field + return nil +} + +func (p *TCondition) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFetchDataParams"); err != nil { + if err = oprot.WriteStructBegin("TCondition"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18897,7 +24955,22 @@ func (p *TFetchDataParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18916,11 +24989,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFetchDataParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { +func (p *TCondition) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("column_name", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { + if err := oprot.WriteString(p.ColumnName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18933,11 +25006,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFetchDataParams) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 2); err != nil { +func (p *TCondition) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("condition_op", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := p.FragmentInstanceId.Write(oprot); err != nil { + if err := oprot.WriteString(p.ConditionOp); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -18950,118 +25023,241 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFetchDataParams) String() string { +func (p *TCondition) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("condition_values", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ConditionValues)); err != nil { + return err + } + for _, v := range p.ConditionValues { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TCondition) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnUniqueId() { + if err = oprot.WriteFieldBegin("column_unique_id", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ColumnUniqueId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TCondition) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetMarkedByRuntimeFilter() { + if err = oprot.WriteFieldBegin("marked_by_runtime_filter", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.MarkedByRuntimeFilter); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TCondition) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetCompoundType() { + if err = oprot.WriteFieldBegin("compound_type", thrift.I32, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.CompoundType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) +} + +func (p *TCondition) String() string { if p == nil { return "" } - return fmt.Sprintf("TFetchDataParams(%+v)", *p) + return fmt.Sprintf("TCondition(%+v)", *p) + } -func (p *TFetchDataParams) DeepEqual(ano *TFetchDataParams) bool { +func (p *TCondition) DeepEqual(ano *TCondition) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ProtocolVersion) { + if !p.Field1DeepEqual(ano.ColumnName) { return false } - if !p.Field2DeepEqual(ano.FragmentInstanceId) { + if !p.Field2DeepEqual(ano.ConditionOp) { + return false + } + if !p.Field3DeepEqual(ano.ConditionValues) { + return false + } + if !p.Field4DeepEqual(ano.ColumnUniqueId) { + return false + } + if !p.Field5DeepEqual(ano.MarkedByRuntimeFilter) { + return false + } + if !p.Field1000DeepEqual(ano.CompoundType) { return false } return true } -func (p *TFetchDataParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { +func (p *TCondition) Field1DeepEqual(src string) bool { - if p.ProtocolVersion != src { + if strings.Compare(p.ColumnName, src) != 0 { return false } return true } -func (p *TFetchDataParams) Field2DeepEqual(src *types.TUniqueId) bool { +func (p *TCondition) Field2DeepEqual(src string) bool { - if !p.FragmentInstanceId.DeepEqual(src) { + if strings.Compare(p.ConditionOp, src) != 0 { return false } return true } +func (p *TCondition) Field3DeepEqual(src []string) bool { -type TFetchDataResult_ struct { - ResultBatch *data.TResultBatch `thrift:"result_batch,1,required" frugal:"1,required,data.TResultBatch" json:"result_batch"` - Eos bool `thrift:"eos,2,required" frugal:"2,required,bool" json:"eos"` - PacketNum int32 `thrift:"packet_num,3,required" frugal:"3,required,i32" json:"packet_num"` - Status *status.TStatus `thrift:"status,4,optional" frugal:"4,optional,status.TStatus" json:"status,omitempty"` + if len(p.ConditionValues) != len(src) { + return false + } + for i, v := range p.ConditionValues { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true } +func (p *TCondition) Field4DeepEqual(src *int32) bool { -func NewTFetchDataResult_() *TFetchDataResult_ { - return &TFetchDataResult_{} + if p.ColumnUniqueId == src { + return true + } else if p.ColumnUniqueId == nil || src == nil { + return false + } + if *p.ColumnUniqueId != *src { + return false + } + return true } +func (p *TCondition) Field5DeepEqual(src bool) bool { -func (p *TFetchDataResult_) InitDefault() { - *p = TFetchDataResult_{} + if p.MarkedByRuntimeFilter != src { + return false + } + return true } +func (p *TCondition) Field1000DeepEqual(src TCompoundType) bool { -var TFetchDataResult__ResultBatch_DEFAULT *data.TResultBatch - -func (p *TFetchDataResult_) GetResultBatch() (v *data.TResultBatch) { - if !p.IsSetResultBatch() { - return TFetchDataResult__ResultBatch_DEFAULT + if p.CompoundType != src { + return false } - return p.ResultBatch + return true } -func (p *TFetchDataResult_) GetEos() (v bool) { - return p.Eos +type TExportStatusResult_ struct { + Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` + State types.TExportState `thrift:"state,2,required" frugal:"2,required,TExportState" json:"state"` + Files []string `thrift:"files,3,optional" frugal:"3,optional,list" json:"files,omitempty"` } -func (p *TFetchDataResult_) GetPacketNum() (v int32) { - return p.PacketNum +func NewTExportStatusResult_() *TExportStatusResult_ { + return &TExportStatusResult_{} } -var TFetchDataResult__Status_DEFAULT *status.TStatus +func (p *TExportStatusResult_) InitDefault() { +} -func (p *TFetchDataResult_) GetStatus() (v *status.TStatus) { +var TExportStatusResult__Status_DEFAULT *status.TStatus + +func (p *TExportStatusResult_) GetStatus() (v *status.TStatus) { if !p.IsSetStatus() { - return TFetchDataResult__Status_DEFAULT + return TExportStatusResult__Status_DEFAULT } return p.Status } -func (p *TFetchDataResult_) SetResultBatch(val *data.TResultBatch) { - p.ResultBatch = val -} -func (p *TFetchDataResult_) SetEos(val bool) { - p.Eos = val + +func (p *TExportStatusResult_) GetState() (v types.TExportState) { + return p.State } -func (p *TFetchDataResult_) SetPacketNum(val int32) { - p.PacketNum = val + +var TExportStatusResult__Files_DEFAULT []string + +func (p *TExportStatusResult_) GetFiles() (v []string) { + if !p.IsSetFiles() { + return TExportStatusResult__Files_DEFAULT + } + return p.Files } -func (p *TFetchDataResult_) SetStatus(val *status.TStatus) { +func (p *TExportStatusResult_) SetStatus(val *status.TStatus) { p.Status = val } - -var fieldIDToName_TFetchDataResult_ = map[int16]string{ - 1: "result_batch", - 2: "eos", - 3: "packet_num", - 4: "status", +func (p *TExportStatusResult_) SetState(val types.TExportState) { + p.State = val +} +func (p *TExportStatusResult_) SetFiles(val []string) { + p.Files = val } -func (p *TFetchDataResult_) IsSetResultBatch() bool { - return p.ResultBatch != nil +var fieldIDToName_TExportStatusResult_ = map[int16]string{ + 1: "status", + 2: "state", + 3: "files", } -func (p *TFetchDataResult_) IsSetStatus() bool { +func (p *TExportStatusResult_) IsSetStatus() bool { return p.Status != nil } -func (p *TFetchDataResult_) Read(iprot thrift.TProtocol) (err error) { +func (p *TExportStatusResult_) IsSetFiles() bool { + return p.Files != nil +} + +func (p *TExportStatusResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetResultBatch bool = false - var issetEos bool = false - var issetPacketNum bool = false + var issetStatus bool = false + var issetState bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -19082,50 +25278,32 @@ func (p *TFetchDataResult_) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetResultBatch = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetEos = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetState = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetPacketNum = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19134,27 +25312,22 @@ func (p *TFetchDataResult_) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetResultBatch { + if !issetStatus { fieldId = 1 goto RequiredFieldNotSetError } - if !issetEos { + if !issetState { fieldId = 2 goto RequiredFieldNotSetError } - - if !issetPacketNum { - fieldId = 3 - goto RequiredFieldNotSetError - } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataResult_[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExportStatusResult_[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19163,46 +25336,55 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataResult_[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExportStatusResult_[fieldId])) } -func (p *TFetchDataResult_) ReadField1(iprot thrift.TProtocol) error { - p.ResultBatch = data.NewTResultBatch() - if err := p.ResultBatch.Read(iprot); err != nil { +func (p *TExportStatusResult_) ReadField1(iprot thrift.TProtocol) error { + _field := status.NewTStatus() + if err := _field.Read(iprot); err != nil { return err } + p.Status = _field return nil } +func (p *TExportStatusResult_) ReadField2(iprot thrift.TProtocol) error { -func (p *TFetchDataResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _field types.TExportState + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Eos = v + _field = types.TExportState(v) } + p.State = _field return nil } - -func (p *TFetchDataResult_) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TExportStatusResult_) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.PacketNum = v } - return nil -} + _field := make([]string, 0, size) + for i := 0; i < size; i++ { -func (p *TFetchDataResult_) ReadField4(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.Files = _field return nil } -func (p *TFetchDataResult_) Write(oprot thrift.TProtocol) (err error) { +func (p *TExportStatusResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFetchDataResult"); err != nil { + if err = oprot.WriteStructBegin("TExportStatusResult"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19218,11 +25400,6 @@ func (p *TFetchDataResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -19241,11 +25418,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFetchDataResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("result_batch", thrift.STRUCT, 1); err != nil { +func (p *TExportStatusResult_) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := p.ResultBatch.Write(oprot); err != nil { + if err := p.Status.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19258,11 +25435,11 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFetchDataResult_) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("eos", thrift.BOOL, 2); err != nil { +func (p *TExportStatusResult_) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("state", thrift.I32, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(p.Eos); err != nil { + if err := oprot.WriteI32(int32(p.State)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19275,29 +25452,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFetchDataResult_) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("packet_num", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(p.PacketNum); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TFetchDataResult_) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 4); err != nil { +func (p *TExportStatusResult_) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFiles() { + if err = oprot.WriteFieldBegin("files", thrift.LIST, 3); err != nil { goto WriteFieldBeginError } - if err := p.Status.Write(oprot); err != nil { + if err := oprot.WriteListBegin(thrift.STRING, len(p.Files)); err != nil { + return err + } + for _, v := range p.Files { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19306,158 +25474,241 @@ func (p *TFetchDataResult_) writeField4(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TFetchDataResult_) String() string { +func (p *TExportStatusResult_) String() string { if p == nil { return "" } - return fmt.Sprintf("TFetchDataResult_(%+v)", *p) + return fmt.Sprintf("TExportStatusResult_(%+v)", *p) + } -func (p *TFetchDataResult_) DeepEqual(ano *TFetchDataResult_) bool { +func (p *TExportStatusResult_) DeepEqual(ano *TExportStatusResult_) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ResultBatch) { + if !p.Field1DeepEqual(ano.Status) { return false } - if !p.Field2DeepEqual(ano.Eos) { + if !p.Field2DeepEqual(ano.State) { return false } - if !p.Field3DeepEqual(ano.PacketNum) { + if !p.Field3DeepEqual(ano.Files) { return false } - if !p.Field4DeepEqual(ano.Status) { + return true +} + +func (p *TExportStatusResult_) Field1DeepEqual(src *status.TStatus) bool { + + if !p.Status.DeepEqual(src) { return false } return true } +func (p *TExportStatusResult_) Field2DeepEqual(src types.TExportState) bool { -func (p *TFetchDataResult_) Field1DeepEqual(src *data.TResultBatch) bool { - - if !p.ResultBatch.DeepEqual(src) { + if p.State != src { return false } return true } -func (p *TFetchDataResult_) Field2DeepEqual(src bool) bool { +func (p *TExportStatusResult_) Field3DeepEqual(src []string) bool { - if p.Eos != src { + if len(p.Files) != len(src) { return false } + for i, v := range p.Files { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } return true } -func (p *TFetchDataResult_) Field3DeepEqual(src int32) bool { - if p.PacketNum != src { - return false +type TPipelineInstanceParams struct { + FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,1,required" frugal:"1,required,types.TUniqueId" json:"fragment_instance_id"` + BuildHashTableForBroadcastJoin bool `thrift:"build_hash_table_for_broadcast_join,2,optional" frugal:"2,optional,bool" json:"build_hash_table_for_broadcast_join,omitempty"` + PerNodeScanRanges map[types.TPlanNodeId][]*TScanRangeParams `thrift:"per_node_scan_ranges,3,required" frugal:"3,required,map>" json:"per_node_scan_ranges"` + SenderId *int32 `thrift:"sender_id,4,optional" frugal:"4,optional,i32" json:"sender_id,omitempty"` + RuntimeFilterParams *TRuntimeFilterParams `thrift:"runtime_filter_params,5,optional" frugal:"5,optional,TRuntimeFilterParams" json:"runtime_filter_params,omitempty"` + BackendNum *int32 `thrift:"backend_num,6,optional" frugal:"6,optional,i32" json:"backend_num,omitempty"` + PerNodeSharedScans map[types.TPlanNodeId]bool `thrift:"per_node_shared_scans,7,optional" frugal:"7,optional,map" json:"per_node_shared_scans,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,8,optional" frugal:"8,optional,list" json:"topn_filter_source_node_ids,omitempty"` + TopnFilterDescs []*plannodes.TTopnFilterDesc `thrift:"topn_filter_descs,9,optional" frugal:"9,optional,list" json:"topn_filter_descs,omitempty"` +} + +func NewTPipelineInstanceParams() *TPipelineInstanceParams { + return &TPipelineInstanceParams{ + + BuildHashTableForBroadcastJoin: false, + } +} + +func (p *TPipelineInstanceParams) InitDefault() { + p.BuildHashTableForBroadcastJoin = false +} + +var TPipelineInstanceParams_FragmentInstanceId_DEFAULT *types.TUniqueId + +func (p *TPipelineInstanceParams) GetFragmentInstanceId() (v *types.TUniqueId) { + if !p.IsSetFragmentInstanceId() { + return TPipelineInstanceParams_FragmentInstanceId_DEFAULT } - return true + return p.FragmentInstanceId } -func (p *TFetchDataResult_) Field4DeepEqual(src *status.TStatus) bool { - if !p.Status.DeepEqual(src) { - return false +var TPipelineInstanceParams_BuildHashTableForBroadcastJoin_DEFAULT bool = false + +func (p *TPipelineInstanceParams) GetBuildHashTableForBroadcastJoin() (v bool) { + if !p.IsSetBuildHashTableForBroadcastJoin() { + return TPipelineInstanceParams_BuildHashTableForBroadcastJoin_DEFAULT } - return true + return p.BuildHashTableForBroadcastJoin } -type TCondition struct { - ColumnName string `thrift:"column_name,1,required" frugal:"1,required,string" json:"column_name"` - ConditionOp string `thrift:"condition_op,2,required" frugal:"2,required,string" json:"condition_op"` - ConditionValues []string `thrift:"condition_values,3,required" frugal:"3,required,list" json:"condition_values"` - ColumnUniqueId *int32 `thrift:"column_unique_id,4,optional" frugal:"4,optional,i32" json:"column_unique_id,omitempty"` - MarkedByRuntimeFilter bool `thrift:"marked_by_runtime_filter,5,optional" frugal:"5,optional,bool" json:"marked_by_runtime_filter,omitempty"` +func (p *TPipelineInstanceParams) GetPerNodeScanRanges() (v map[types.TPlanNodeId][]*TScanRangeParams) { + return p.PerNodeScanRanges } -func NewTCondition() *TCondition { - return &TCondition{ +var TPipelineInstanceParams_SenderId_DEFAULT int32 - MarkedByRuntimeFilter: false, +func (p *TPipelineInstanceParams) GetSenderId() (v int32) { + if !p.IsSetSenderId() { + return TPipelineInstanceParams_SenderId_DEFAULT } + return *p.SenderId } -func (p *TCondition) InitDefault() { - *p = TCondition{ +var TPipelineInstanceParams_RuntimeFilterParams_DEFAULT *TRuntimeFilterParams - MarkedByRuntimeFilter: false, +func (p *TPipelineInstanceParams) GetRuntimeFilterParams() (v *TRuntimeFilterParams) { + if !p.IsSetRuntimeFilterParams() { + return TPipelineInstanceParams_RuntimeFilterParams_DEFAULT } + return p.RuntimeFilterParams } -func (p *TCondition) GetColumnName() (v string) { - return p.ColumnName -} +var TPipelineInstanceParams_BackendNum_DEFAULT int32 -func (p *TCondition) GetConditionOp() (v string) { - return p.ConditionOp +func (p *TPipelineInstanceParams) GetBackendNum() (v int32) { + if !p.IsSetBackendNum() { + return TPipelineInstanceParams_BackendNum_DEFAULT + } + return *p.BackendNum } -func (p *TCondition) GetConditionValues() (v []string) { - return p.ConditionValues +var TPipelineInstanceParams_PerNodeSharedScans_DEFAULT map[types.TPlanNodeId]bool + +func (p *TPipelineInstanceParams) GetPerNodeSharedScans() (v map[types.TPlanNodeId]bool) { + if !p.IsSetPerNodeSharedScans() { + return TPipelineInstanceParams_PerNodeSharedScans_DEFAULT + } + return p.PerNodeSharedScans } -var TCondition_ColumnUniqueId_DEFAULT int32 +var TPipelineInstanceParams_TopnFilterSourceNodeIds_DEFAULT []int32 -func (p *TCondition) GetColumnUniqueId() (v int32) { - if !p.IsSetColumnUniqueId() { - return TCondition_ColumnUniqueId_DEFAULT +func (p *TPipelineInstanceParams) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPipelineInstanceParams_TopnFilterSourceNodeIds_DEFAULT } - return *p.ColumnUniqueId + return p.TopnFilterSourceNodeIds } -var TCondition_MarkedByRuntimeFilter_DEFAULT bool = false +var TPipelineInstanceParams_TopnFilterDescs_DEFAULT []*plannodes.TTopnFilterDesc -func (p *TCondition) GetMarkedByRuntimeFilter() (v bool) { - if !p.IsSetMarkedByRuntimeFilter() { - return TCondition_MarkedByRuntimeFilter_DEFAULT +func (p *TPipelineInstanceParams) GetTopnFilterDescs() (v []*plannodes.TTopnFilterDesc) { + if !p.IsSetTopnFilterDescs() { + return TPipelineInstanceParams_TopnFilterDescs_DEFAULT } - return p.MarkedByRuntimeFilter + return p.TopnFilterDescs } -func (p *TCondition) SetColumnName(val string) { - p.ColumnName = val +func (p *TPipelineInstanceParams) SetFragmentInstanceId(val *types.TUniqueId) { + p.FragmentInstanceId = val } -func (p *TCondition) SetConditionOp(val string) { - p.ConditionOp = val +func (p *TPipelineInstanceParams) SetBuildHashTableForBroadcastJoin(val bool) { + p.BuildHashTableForBroadcastJoin = val } -func (p *TCondition) SetConditionValues(val []string) { - p.ConditionValues = val +func (p *TPipelineInstanceParams) SetPerNodeScanRanges(val map[types.TPlanNodeId][]*TScanRangeParams) { + p.PerNodeScanRanges = val } -func (p *TCondition) SetColumnUniqueId(val *int32) { - p.ColumnUniqueId = val +func (p *TPipelineInstanceParams) SetSenderId(val *int32) { + p.SenderId = val } -func (p *TCondition) SetMarkedByRuntimeFilter(val bool) { - p.MarkedByRuntimeFilter = val +func (p *TPipelineInstanceParams) SetRuntimeFilterParams(val *TRuntimeFilterParams) { + p.RuntimeFilterParams = val +} +func (p *TPipelineInstanceParams) SetBackendNum(val *int32) { + p.BackendNum = val +} +func (p *TPipelineInstanceParams) SetPerNodeSharedScans(val map[types.TPlanNodeId]bool) { + p.PerNodeSharedScans = val +} +func (p *TPipelineInstanceParams) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} +func (p *TPipelineInstanceParams) SetTopnFilterDescs(val []*plannodes.TTopnFilterDesc) { + p.TopnFilterDescs = val } -var fieldIDToName_TCondition = map[int16]string{ - 1: "column_name", - 2: "condition_op", - 3: "condition_values", - 4: "column_unique_id", - 5: "marked_by_runtime_filter", +var fieldIDToName_TPipelineInstanceParams = map[int16]string{ + 1: "fragment_instance_id", + 2: "build_hash_table_for_broadcast_join", + 3: "per_node_scan_ranges", + 4: "sender_id", + 5: "runtime_filter_params", + 6: "backend_num", + 7: "per_node_shared_scans", + 8: "topn_filter_source_node_ids", + 9: "topn_filter_descs", } -func (p *TCondition) IsSetColumnUniqueId() bool { - return p.ColumnUniqueId != nil +func (p *TPipelineInstanceParams) IsSetFragmentInstanceId() bool { + return p.FragmentInstanceId != nil } -func (p *TCondition) IsSetMarkedByRuntimeFilter() bool { - return p.MarkedByRuntimeFilter != TCondition_MarkedByRuntimeFilter_DEFAULT +func (p *TPipelineInstanceParams) IsSetBuildHashTableForBroadcastJoin() bool { + return p.BuildHashTableForBroadcastJoin != TPipelineInstanceParams_BuildHashTableForBroadcastJoin_DEFAULT } -func (p *TCondition) Read(iprot thrift.TProtocol) (err error) { +func (p *TPipelineInstanceParams) IsSetSenderId() bool { + return p.SenderId != nil +} + +func (p *TPipelineInstanceParams) IsSetRuntimeFilterParams() bool { + return p.RuntimeFilterParams != nil +} + +func (p *TPipelineInstanceParams) IsSetBackendNum() bool { + return p.BackendNum != nil +} + +func (p *TPipelineInstanceParams) IsSetPerNodeSharedScans() bool { + return p.PerNodeSharedScans != nil +} + +func (p *TPipelineInstanceParams) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil +} + +func (p *TPipelineInstanceParams) IsSetTopnFilterDescs() bool { + return p.TopnFilterDescs != nil +} + +func (p *TPipelineInstanceParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetColumnName bool = false - var issetConditionOp bool = false - var issetConditionValues bool = false + var issetFragmentInstanceId bool = false + var issetPerNodeScanRanges bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -19474,64 +25725,84 @@ func (p *TCondition) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - issetColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetFragmentInstanceId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - issetConditionOp = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.MAP { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - issetConditionValues = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetPerNodeScanRanges = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.MAP { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.LIST { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.LIST { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19540,99 +25811,202 @@ func (p *TCondition) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetColumnName { + if !issetFragmentInstanceId { fieldId = 1 goto RequiredFieldNotSetError } - if !issetConditionOp { - fieldId = 2 - goto RequiredFieldNotSetError + if !issetPerNodeScanRanges { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineInstanceParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineInstanceParams[fieldId])) +} + +func (p *TPipelineInstanceParams) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.FragmentInstanceId = _field + return nil +} +func (p *TPipelineInstanceParams) ReadField2(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.BuildHashTableForBroadcastJoin = _field + return nil +} +func (p *TPipelineInstanceParams) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId][]*TScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _val := make([]*TScanRangeParams, 0, size) + values := make([]TScanRangeParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _val = append(_val, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field[_key] = _val } - - if !issetConditionValues { - fieldId = 3 - goto RequiredFieldNotSetError + if err := iprot.ReadMapEnd(); err != nil { + return err } + p.PerNodeScanRanges = _field return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCondition[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCondition[fieldId])) } +func (p *TPipelineInstanceParams) ReadField4(iprot thrift.TProtocol) error { -func (p *TCondition) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnName = v + _field = &v + } + p.SenderId = _field + return nil +} +func (p *TPipelineInstanceParams) ReadField5(iprot thrift.TProtocol) error { + _field := NewTRuntimeFilterParams() + if err := _field.Read(iprot); err != nil { + return err } + p.RuntimeFilterParams = _field return nil } +func (p *TPipelineInstanceParams) ReadField6(iprot thrift.TProtocol) error { -func (p *TCondition) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ConditionOp = v + _field = &v } + p.BackendNum = _field return nil } +func (p *TPipelineInstanceParams) ReadField7(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId]bool, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -func (p *TCondition) ReadField3(iprot thrift.TProtocol) error { + var _val bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PerNodeSharedScans = _field + return nil +} +func (p *TPipelineInstanceParams) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ConditionValues = make([]string, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { _elem = v } - p.ConditionValues = append(p.ConditionValues, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.TopnFilterSourceNodeIds = _field return nil } - -func (p *TCondition) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TPipelineInstanceParams) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - p.ColumnUniqueId = &v } - return nil -} + _field := make([]*plannodes.TTopnFilterDesc, 0, size) + values := make([]plannodes.TTopnFilterDesc, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TCondition) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - p.MarkedByRuntimeFilter = v } + p.TopnFilterDescs = _field return nil } -func (p *TCondition) Write(oprot thrift.TProtocol) (err error) { +func (p *TPipelineInstanceParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TCondition"); err != nil { + if err = oprot.WriteStructBegin("TPipelineInstanceParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19656,7 +26030,22 @@ func (p *TCondition) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -19675,11 +26064,11 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TCondition) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("column_name", thrift.STRING, 1); err != nil { +func (p *TPipelineInstanceParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(p.ColumnName); err != nil { + if err := p.FragmentInstanceId.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19692,15 +26081,17 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TCondition) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("condition_op", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(p.ConditionOp); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TPipelineInstanceParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBuildHashTableForBroadcastJoin() { + if err = oprot.WriteFieldBegin("build_hash_table_for_broadcast_join", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.BuildHashTableForBroadcastJoin); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil WriteFieldBeginError: @@ -19709,19 +26100,30 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TCondition) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("condition_values", thrift.LIST, 3); err != nil { +func (p *TPipelineInstanceParams) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("per_node_scan_ranges", thrift.MAP, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ConditionValues)); err != nil { + if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.PerNodeScanRanges)); err != nil { return err } - for _, v := range p.ConditionValues { - if err := oprot.WriteString(v); err != nil { + for k, v := range p.PerNodeScanRanges { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19734,12 +26136,107 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TCondition) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnUniqueId() { - if err = oprot.WriteFieldBegin("column_unique_id", thrift.I32, 4); err != nil { +func (p *TPipelineInstanceParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetSenderId() { + if err = oprot.WriteFieldBegin("sender_id", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SenderId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPipelineInstanceParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterParams() { + if err = oprot.WriteFieldBegin("runtime_filter_params", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.RuntimeFilterParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TPipelineInstanceParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendNum() { + if err = oprot.WriteFieldBegin("backend_num", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BackendNum); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TPipelineInstanceParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetPerNodeSharedScans() { + if err = oprot.WriteFieldBegin("per_node_shared_scans", thrift.MAP, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.BOOL, len(p.PerNodeSharedScans)); err != nil { + return err + } + for k, v := range p.PerNodeSharedScans { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteBool(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TPipelineInstanceParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 8); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.ColumnUniqueId); err != nil { + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19748,17 +26245,25 @@ func (p *TCondition) writeField4(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TCondition) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetMarkedByRuntimeFilter() { - if err = oprot.WriteFieldBegin("marked_by_runtime_filter", thrift.BOOL, 5); err != nil { +func (p *TPipelineInstanceParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterDescs() { + if err = oprot.WriteFieldBegin("topn_filter_descs", thrift.LIST, 9); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(p.MarkedByRuntimeFilter); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TopnFilterDescs)); err != nil { + return err + } + for _, v := range p.TopnFilterDescs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -19767,575 +26272,945 @@ func (p *TCondition) writeField5(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TCondition) String() string { +func (p *TPipelineInstanceParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TCondition(%+v)", *p) + return fmt.Sprintf("TPipelineInstanceParams(%+v)", *p) + } -func (p *TCondition) DeepEqual(ano *TCondition) bool { +func (p *TPipelineInstanceParams) DeepEqual(ano *TPipelineInstanceParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ColumnName) { + if !p.Field1DeepEqual(ano.FragmentInstanceId) { return false } - if !p.Field2DeepEqual(ano.ConditionOp) { + if !p.Field2DeepEqual(ano.BuildHashTableForBroadcastJoin) { return false } - if !p.Field3DeepEqual(ano.ConditionValues) { + if !p.Field3DeepEqual(ano.PerNodeScanRanges) { return false } - if !p.Field4DeepEqual(ano.ColumnUniqueId) { + if !p.Field4DeepEqual(ano.SenderId) { return false } - if !p.Field5DeepEqual(ano.MarkedByRuntimeFilter) { + if !p.Field5DeepEqual(ano.RuntimeFilterParams) { + return false + } + if !p.Field6DeepEqual(ano.BackendNum) { + return false + } + if !p.Field7DeepEqual(ano.PerNodeSharedScans) { + return false + } + if !p.Field8DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } + if !p.Field9DeepEqual(ano.TopnFilterDescs) { + return false + } + return true +} + +func (p *TPipelineInstanceParams) Field1DeepEqual(src *types.TUniqueId) bool { + + if !p.FragmentInstanceId.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineInstanceParams) Field2DeepEqual(src bool) bool { + + if p.BuildHashTableForBroadcastJoin != src { + return false + } + return true +} +func (p *TPipelineInstanceParams) Field3DeepEqual(src map[types.TPlanNodeId][]*TScanRangeParams) bool { + + if len(p.PerNodeScanRanges) != len(src) { + return false + } + for k, v := range p.PerNodeScanRanges { + _src := src[k] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true +} +func (p *TPipelineInstanceParams) Field4DeepEqual(src *int32) bool { + + if p.SenderId == src { + return true + } else if p.SenderId == nil || src == nil { + return false + } + if *p.SenderId != *src { + return false + } + return true +} +func (p *TPipelineInstanceParams) Field5DeepEqual(src *TRuntimeFilterParams) bool { + + if !p.RuntimeFilterParams.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineInstanceParams) Field6DeepEqual(src *int32) bool { + + if p.BackendNum == src { + return true + } else if p.BackendNum == nil || src == nil { + return false + } + if *p.BackendNum != *src { + return false + } + return true +} +func (p *TPipelineInstanceParams) Field7DeepEqual(src map[types.TPlanNodeId]bool) bool { + + if len(p.PerNodeSharedScans) != len(src) { + return false + } + for k, v := range p.PerNodeSharedScans { + _src := src[k] + if v != _src { + return false + } + } + return true +} +func (p *TPipelineInstanceParams) Field8DeepEqual(src []int32) bool { + + if len(p.TopnFilterSourceNodeIds) != len(src) { + return false + } + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TPipelineInstanceParams) Field9DeepEqual(src []*plannodes.TTopnFilterDesc) bool { + + if len(p.TopnFilterDescs) != len(src) { return false } - return true + for i, v := range p.TopnFilterDescs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TPipelineFragmentParams struct { + ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` + QueryId *types.TUniqueId `thrift:"query_id,2,required" frugal:"2,required,types.TUniqueId" json:"query_id"` + FragmentId *int32 `thrift:"fragment_id,3,optional" frugal:"3,optional,i32" json:"fragment_id,omitempty"` + PerExchNumSenders map[types.TPlanNodeId]int32 `thrift:"per_exch_num_senders,4,required" frugal:"4,required,map" json:"per_exch_num_senders"` + DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,5,optional" frugal:"5,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` + ResourceInfo *types.TResourceInfo `thrift:"resource_info,6,optional" frugal:"6,optional,types.TResourceInfo" json:"resource_info,omitempty"` + Destinations []*datasinks.TPlanFragmentDestination `thrift:"destinations,7" frugal:"7,default,list" json:"destinations"` + NumSenders *int32 `thrift:"num_senders,8,optional" frugal:"8,optional,i32" json:"num_senders,omitempty"` + SendQueryStatisticsWithEveryBatch *bool `thrift:"send_query_statistics_with_every_batch,9,optional" frugal:"9,optional,bool" json:"send_query_statistics_with_every_batch,omitempty"` + Coord *types.TNetworkAddress `thrift:"coord,10,optional" frugal:"10,optional,types.TNetworkAddress" json:"coord,omitempty"` + QueryGlobals *TQueryGlobals `thrift:"query_globals,11,optional" frugal:"11,optional,TQueryGlobals" json:"query_globals,omitempty"` + QueryOptions *TQueryOptions `thrift:"query_options,12,optional" frugal:"12,optional,TQueryOptions" json:"query_options,omitempty"` + ImportLabel *string `thrift:"import_label,13,optional" frugal:"13,optional,string" json:"import_label,omitempty"` + DbName *string `thrift:"db_name,14,optional" frugal:"14,optional,string" json:"db_name,omitempty"` + LoadJobId *int64 `thrift:"load_job_id,15,optional" frugal:"15,optional,i64" json:"load_job_id,omitempty"` + LoadErrorHubInfo *TLoadErrorHubInfo `thrift:"load_error_hub_info,16,optional" frugal:"16,optional,TLoadErrorHubInfo" json:"load_error_hub_info,omitempty"` + FragmentNumOnHost *int32 `thrift:"fragment_num_on_host,17,optional" frugal:"17,optional,i32" json:"fragment_num_on_host,omitempty"` + BackendId *int64 `thrift:"backend_id,18,optional" frugal:"18,optional,i64" json:"backend_id,omitempty"` + NeedWaitExecutionTrigger bool `thrift:"need_wait_execution_trigger,19,optional" frugal:"19,optional,bool" json:"need_wait_execution_trigger,omitempty"` + InstancesSharingHashTable []*types.TUniqueId `thrift:"instances_sharing_hash_table,20,optional" frugal:"20,optional,list" json:"instances_sharing_hash_table,omitempty"` + IsSimplifiedParam bool `thrift:"is_simplified_param,21,optional" frugal:"21,optional,bool" json:"is_simplified_param,omitempty"` + GlobalDict *TGlobalDict `thrift:"global_dict,22,optional" frugal:"22,optional,TGlobalDict" json:"global_dict,omitempty"` + Fragment *planner.TPlanFragment `thrift:"fragment,23,optional" frugal:"23,optional,planner.TPlanFragment" json:"fragment,omitempty"` + LocalParams []*TPipelineInstanceParams `thrift:"local_params,24" frugal:"24,default,list" json:"local_params"` + WorkloadGroups []*TPipelineWorkloadGroup `thrift:"workload_groups,26,optional" frugal:"26,optional,list" json:"workload_groups,omitempty"` + TxnConf *TTxnParams `thrift:"txn_conf,27,optional" frugal:"27,optional,TTxnParams" json:"txn_conf,omitempty"` + TableName *string `thrift:"table_name,28,optional" frugal:"28,optional,string" json:"table_name,omitempty"` + FileScanParams map[types.TPlanNodeId]*plannodes.TFileScanRangeParams `thrift:"file_scan_params,29,optional" frugal:"29,optional,map" json:"file_scan_params,omitempty"` + GroupCommit bool `thrift:"group_commit,30,optional" frugal:"30,optional,bool" json:"group_commit,omitempty"` + LoadStreamPerNode *int32 `thrift:"load_stream_per_node,31,optional" frugal:"31,optional,i32" json:"load_stream_per_node,omitempty"` + TotalLoadStreams *int32 `thrift:"total_load_streams,32,optional" frugal:"32,optional,i32" json:"total_load_streams,omitempty"` + NumLocalSink *int32 `thrift:"num_local_sink,33,optional" frugal:"33,optional,i32" json:"num_local_sink,omitempty"` + NumBuckets *int32 `thrift:"num_buckets,34,optional" frugal:"34,optional,i32" json:"num_buckets,omitempty"` + BucketSeqToInstanceIdx map[int32]int32 `thrift:"bucket_seq_to_instance_idx,35,optional" frugal:"35,optional,map" json:"bucket_seq_to_instance_idx,omitempty"` + PerNodeSharedScans map[types.TPlanNodeId]bool `thrift:"per_node_shared_scans,36,optional" frugal:"36,optional,map" json:"per_node_shared_scans,omitempty"` + ParallelInstances *int32 `thrift:"parallel_instances,37,optional" frugal:"37,optional,i32" json:"parallel_instances,omitempty"` + TotalInstances *int32 `thrift:"total_instances,38,optional" frugal:"38,optional,i32" json:"total_instances,omitempty"` + ShuffleIdxToInstanceIdx map[int32]int32 `thrift:"shuffle_idx_to_instance_idx,39,optional" frugal:"39,optional,map" json:"shuffle_idx_to_instance_idx,omitempty"` + IsNereids bool `thrift:"is_nereids,40,optional" frugal:"40,optional,bool" json:"is_nereids,omitempty"` + WalId *int64 `thrift:"wal_id,41,optional" frugal:"41,optional,i64" json:"wal_id,omitempty"` + ContentLength *int64 `thrift:"content_length,42,optional" frugal:"42,optional,i64" json:"content_length,omitempty"` + CurrentConnectFe *types.TNetworkAddress `thrift:"current_connect_fe,43,optional" frugal:"43,optional,types.TNetworkAddress" json:"current_connect_fe,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,44,optional" frugal:"44,optional,list" json:"topn_filter_source_node_ids,omitempty"` + IsMowTable *bool `thrift:"is_mow_table,1000,optional" frugal:"1000,optional,bool" json:"is_mow_table,omitempty"` +} + +func NewTPipelineFragmentParams() *TPipelineFragmentParams { + return &TPipelineFragmentParams{ + + NeedWaitExecutionTrigger: false, + IsSimplifiedParam: false, + GroupCommit: false, + IsNereids: true, + } +} + +func (p *TPipelineFragmentParams) InitDefault() { + p.NeedWaitExecutionTrigger = false + p.IsSimplifiedParam = false + p.GroupCommit = false + p.IsNereids = true +} + +func (p *TPipelineFragmentParams) GetProtocolVersion() (v PaloInternalServiceVersion) { + return p.ProtocolVersion +} + +var TPipelineFragmentParams_QueryId_DEFAULT *types.TUniqueId + +func (p *TPipelineFragmentParams) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TPipelineFragmentParams_QueryId_DEFAULT + } + return p.QueryId +} + +var TPipelineFragmentParams_FragmentId_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetFragmentId() (v int32) { + if !p.IsSetFragmentId() { + return TPipelineFragmentParams_FragmentId_DEFAULT + } + return *p.FragmentId +} + +func (p *TPipelineFragmentParams) GetPerExchNumSenders() (v map[types.TPlanNodeId]int32) { + return p.PerExchNumSenders +} + +var TPipelineFragmentParams_DescTbl_DEFAULT *descriptors.TDescriptorTable + +func (p *TPipelineFragmentParams) GetDescTbl() (v *descriptors.TDescriptorTable) { + if !p.IsSetDescTbl() { + return TPipelineFragmentParams_DescTbl_DEFAULT + } + return p.DescTbl +} + +var TPipelineFragmentParams_ResourceInfo_DEFAULT *types.TResourceInfo + +func (p *TPipelineFragmentParams) GetResourceInfo() (v *types.TResourceInfo) { + if !p.IsSetResourceInfo() { + return TPipelineFragmentParams_ResourceInfo_DEFAULT + } + return p.ResourceInfo +} + +func (p *TPipelineFragmentParams) GetDestinations() (v []*datasinks.TPlanFragmentDestination) { + return p.Destinations +} + +var TPipelineFragmentParams_NumSenders_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetNumSenders() (v int32) { + if !p.IsSetNumSenders() { + return TPipelineFragmentParams_NumSenders_DEFAULT + } + return *p.NumSenders +} + +var TPipelineFragmentParams_SendQueryStatisticsWithEveryBatch_DEFAULT bool + +func (p *TPipelineFragmentParams) GetSendQueryStatisticsWithEveryBatch() (v bool) { + if !p.IsSetSendQueryStatisticsWithEveryBatch() { + return TPipelineFragmentParams_SendQueryStatisticsWithEveryBatch_DEFAULT + } + return *p.SendQueryStatisticsWithEveryBatch } -func (p *TCondition) Field1DeepEqual(src string) bool { +var TPipelineFragmentParams_Coord_DEFAULT *types.TNetworkAddress - if strings.Compare(p.ColumnName, src) != 0 { - return false +func (p *TPipelineFragmentParams) GetCoord() (v *types.TNetworkAddress) { + if !p.IsSetCoord() { + return TPipelineFragmentParams_Coord_DEFAULT } - return true + return p.Coord } -func (p *TCondition) Field2DeepEqual(src string) bool { - if strings.Compare(p.ConditionOp, src) != 0 { - return false +var TPipelineFragmentParams_QueryGlobals_DEFAULT *TQueryGlobals + +func (p *TPipelineFragmentParams) GetQueryGlobals() (v *TQueryGlobals) { + if !p.IsSetQueryGlobals() { + return TPipelineFragmentParams_QueryGlobals_DEFAULT } - return true + return p.QueryGlobals } -func (p *TCondition) Field3DeepEqual(src []string) bool { - if len(p.ConditionValues) != len(src) { - return false - } - for i, v := range p.ConditionValues { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } +var TPipelineFragmentParams_QueryOptions_DEFAULT *TQueryOptions + +func (p *TPipelineFragmentParams) GetQueryOptions() (v *TQueryOptions) { + if !p.IsSetQueryOptions() { + return TPipelineFragmentParams_QueryOptions_DEFAULT } - return true + return p.QueryOptions } -func (p *TCondition) Field4DeepEqual(src *int32) bool { - if p.ColumnUniqueId == src { - return true - } else if p.ColumnUniqueId == nil || src == nil { - return false - } - if *p.ColumnUniqueId != *src { - return false +var TPipelineFragmentParams_ImportLabel_DEFAULT string + +func (p *TPipelineFragmentParams) GetImportLabel() (v string) { + if !p.IsSetImportLabel() { + return TPipelineFragmentParams_ImportLabel_DEFAULT } - return true + return *p.ImportLabel } -func (p *TCondition) Field5DeepEqual(src bool) bool { - if p.MarkedByRuntimeFilter != src { - return false +var TPipelineFragmentParams_DbName_DEFAULT string + +func (p *TPipelineFragmentParams) GetDbName() (v string) { + if !p.IsSetDbName() { + return TPipelineFragmentParams_DbName_DEFAULT } - return true + return *p.DbName } -type TExportStatusResult_ struct { - Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` - State types.TExportState `thrift:"state,2,required" frugal:"2,required,TExportState" json:"state"` - Files []string `thrift:"files,3,optional" frugal:"3,optional,list" json:"files,omitempty"` -} +var TPipelineFragmentParams_LoadJobId_DEFAULT int64 -func NewTExportStatusResult_() *TExportStatusResult_ { - return &TExportStatusResult_{} +func (p *TPipelineFragmentParams) GetLoadJobId() (v int64) { + if !p.IsSetLoadJobId() { + return TPipelineFragmentParams_LoadJobId_DEFAULT + } + return *p.LoadJobId } -func (p *TExportStatusResult_) InitDefault() { - *p = TExportStatusResult_{} +var TPipelineFragmentParams_LoadErrorHubInfo_DEFAULT *TLoadErrorHubInfo + +func (p *TPipelineFragmentParams) GetLoadErrorHubInfo() (v *TLoadErrorHubInfo) { + if !p.IsSetLoadErrorHubInfo() { + return TPipelineFragmentParams_LoadErrorHubInfo_DEFAULT + } + return p.LoadErrorHubInfo } -var TExportStatusResult__Status_DEFAULT *status.TStatus +var TPipelineFragmentParams_FragmentNumOnHost_DEFAULT int32 -func (p *TExportStatusResult_) GetStatus() (v *status.TStatus) { - if !p.IsSetStatus() { - return TExportStatusResult__Status_DEFAULT +func (p *TPipelineFragmentParams) GetFragmentNumOnHost() (v int32) { + if !p.IsSetFragmentNumOnHost() { + return TPipelineFragmentParams_FragmentNumOnHost_DEFAULT } - return p.Status + return *p.FragmentNumOnHost } -func (p *TExportStatusResult_) GetState() (v types.TExportState) { - return p.State +var TPipelineFragmentParams_BackendId_DEFAULT int64 + +func (p *TPipelineFragmentParams) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TPipelineFragmentParams_BackendId_DEFAULT + } + return *p.BackendId } -var TExportStatusResult__Files_DEFAULT []string +var TPipelineFragmentParams_NeedWaitExecutionTrigger_DEFAULT bool = false -func (p *TExportStatusResult_) GetFiles() (v []string) { - if !p.IsSetFiles() { - return TExportStatusResult__Files_DEFAULT +func (p *TPipelineFragmentParams) GetNeedWaitExecutionTrigger() (v bool) { + if !p.IsSetNeedWaitExecutionTrigger() { + return TPipelineFragmentParams_NeedWaitExecutionTrigger_DEFAULT } - return p.Files -} -func (p *TExportStatusResult_) SetStatus(val *status.TStatus) { - p.Status = val -} -func (p *TExportStatusResult_) SetState(val types.TExportState) { - p.State = val -} -func (p *TExportStatusResult_) SetFiles(val []string) { - p.Files = val + return p.NeedWaitExecutionTrigger } -var fieldIDToName_TExportStatusResult_ = map[int16]string{ - 1: "status", - 2: "state", - 3: "files", -} +var TPipelineFragmentParams_InstancesSharingHashTable_DEFAULT []*types.TUniqueId -func (p *TExportStatusResult_) IsSetStatus() bool { - return p.Status != nil +func (p *TPipelineFragmentParams) GetInstancesSharingHashTable() (v []*types.TUniqueId) { + if !p.IsSetInstancesSharingHashTable() { + return TPipelineFragmentParams_InstancesSharingHashTable_DEFAULT + } + return p.InstancesSharingHashTable } -func (p *TExportStatusResult_) IsSetFiles() bool { - return p.Files != nil +var TPipelineFragmentParams_IsSimplifiedParam_DEFAULT bool = false + +func (p *TPipelineFragmentParams) GetIsSimplifiedParam() (v bool) { + if !p.IsSetIsSimplifiedParam() { + return TPipelineFragmentParams_IsSimplifiedParam_DEFAULT + } + return p.IsSimplifiedParam } -func (p *TExportStatusResult_) Read(iprot thrift.TProtocol) (err error) { +var TPipelineFragmentParams_GlobalDict_DEFAULT *TGlobalDict - var fieldTypeId thrift.TType - var fieldId int16 - var issetStatus bool = false - var issetState bool = false +func (p *TPipelineFragmentParams) GetGlobalDict() (v *TGlobalDict) { + if !p.IsSetGlobalDict() { + return TPipelineFragmentParams_GlobalDict_DEFAULT + } + return p.GlobalDict +} - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +var TPipelineFragmentParams_Fragment_DEFAULT *planner.TPlanFragment + +func (p *TPipelineFragmentParams) GetFragment() (v *planner.TPlanFragment) { + if !p.IsSetFragment() { + return TPipelineFragmentParams_Fragment_DEFAULT } + return p.Fragment +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } +func (p *TPipelineFragmentParams) GetLocalParams() (v []*TPipelineInstanceParams) { + return p.LocalParams +} - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetStatus = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetState = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } +var TPipelineFragmentParams_WorkloadGroups_DEFAULT []*TPipelineWorkloadGroup - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError +func (p *TPipelineFragmentParams) GetWorkloadGroups() (v []*TPipelineWorkloadGroup) { + if !p.IsSetWorkloadGroups() { + return TPipelineFragmentParams_WorkloadGroups_DEFAULT } + return p.WorkloadGroups +} - if !issetStatus { - fieldId = 1 - goto RequiredFieldNotSetError +var TPipelineFragmentParams_TxnConf_DEFAULT *TTxnParams + +func (p *TPipelineFragmentParams) GetTxnConf() (v *TTxnParams) { + if !p.IsSetTxnConf() { + return TPipelineFragmentParams_TxnConf_DEFAULT } + return p.TxnConf +} - if !issetState { - fieldId = 2 - goto RequiredFieldNotSetError +var TPipelineFragmentParams_TableName_DEFAULT string + +func (p *TPipelineFragmentParams) GetTableName() (v string) { + if !p.IsSetTableName() { + return TPipelineFragmentParams_TableName_DEFAULT } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExportStatusResult_[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + return *p.TableName +} -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExportStatusResult_[fieldId])) +var TPipelineFragmentParams_FileScanParams_DEFAULT map[types.TPlanNodeId]*plannodes.TFileScanRangeParams + +func (p *TPipelineFragmentParams) GetFileScanParams() (v map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + if !p.IsSetFileScanParams() { + return TPipelineFragmentParams_FileScanParams_DEFAULT + } + return p.FileScanParams } -func (p *TExportStatusResult_) ReadField1(iprot thrift.TProtocol) error { - p.Status = status.NewTStatus() - if err := p.Status.Read(iprot); err != nil { - return err +var TPipelineFragmentParams_GroupCommit_DEFAULT bool = false + +func (p *TPipelineFragmentParams) GetGroupCommit() (v bool) { + if !p.IsSetGroupCommit() { + return TPipelineFragmentParams_GroupCommit_DEFAULT } - return nil + return p.GroupCommit } -func (p *TExportStatusResult_) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.State = types.TExportState(v) +var TPipelineFragmentParams_LoadStreamPerNode_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetLoadStreamPerNode() (v int32) { + if !p.IsSetLoadStreamPerNode() { + return TPipelineFragmentParams_LoadStreamPerNode_DEFAULT } - return nil + return *p.LoadStreamPerNode } -func (p *TExportStatusResult_) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +var TPipelineFragmentParams_TotalLoadStreams_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetTotalLoadStreams() (v int32) { + if !p.IsSetTotalLoadStreams() { + return TPipelineFragmentParams_TotalLoadStreams_DEFAULT } - p.Files = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } + return *p.TotalLoadStreams +} + +var TPipelineFragmentParams_NumLocalSink_DEFAULT int32 - p.Files = append(p.Files, _elem) +func (p *TPipelineFragmentParams) GetNumLocalSink() (v int32) { + if !p.IsSetNumLocalSink() { + return TPipelineFragmentParams_NumLocalSink_DEFAULT } - if err := iprot.ReadListEnd(); err != nil { - return err + return *p.NumLocalSink +} + +var TPipelineFragmentParams_NumBuckets_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetNumBuckets() (v int32) { + if !p.IsSetNumBuckets() { + return TPipelineFragmentParams_NumBuckets_DEFAULT } - return nil + return *p.NumBuckets } -func (p *TExportStatusResult_) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TExportStatusResult"); err != nil { - goto WriteStructBeginError +var TPipelineFragmentParams_BucketSeqToInstanceIdx_DEFAULT map[int32]int32 + +func (p *TPipelineFragmentParams) GetBucketSeqToInstanceIdx() (v map[int32]int32) { + if !p.IsSetBucketSeqToInstanceIdx() { + return TPipelineFragmentParams_BucketSeqToInstanceIdx_DEFAULT } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } + return p.BucketSeqToInstanceIdx +} + +var TPipelineFragmentParams_PerNodeSharedScans_DEFAULT map[types.TPlanNodeId]bool +func (p *TPipelineFragmentParams) GetPerNodeSharedScans() (v map[types.TPlanNodeId]bool) { + if !p.IsSetPerNodeSharedScans() { + return TPipelineFragmentParams_PerNodeSharedScans_DEFAULT } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + return p.PerNodeSharedScans +} + +var TPipelineFragmentParams_ParallelInstances_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetParallelInstances() (v int32) { + if !p.IsSetParallelInstances() { + return TPipelineFragmentParams_ParallelInstances_DEFAULT } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + return *p.ParallelInstances +} + +var TPipelineFragmentParams_TotalInstances_DEFAULT int32 + +func (p *TPipelineFragmentParams) GetTotalInstances() (v int32) { + if !p.IsSetTotalInstances() { + return TPipelineFragmentParams_TotalInstances_DEFAULT } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) + return *p.TotalInstances } -func (p *TExportStatusResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError +var TPipelineFragmentParams_ShuffleIdxToInstanceIdx_DEFAULT map[int32]int32 + +func (p *TPipelineFragmentParams) GetShuffleIdxToInstanceIdx() (v map[int32]int32) { + if !p.IsSetShuffleIdxToInstanceIdx() { + return TPipelineFragmentParams_ShuffleIdxToInstanceIdx_DEFAULT } - if err := p.Status.Write(oprot); err != nil { - return err + return p.ShuffleIdxToInstanceIdx +} + +var TPipelineFragmentParams_IsNereids_DEFAULT bool = true + +func (p *TPipelineFragmentParams) GetIsNereids() (v bool) { + if !p.IsSetIsNereids() { + return TPipelineFragmentParams_IsNereids_DEFAULT } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + return p.IsNereids +} + +var TPipelineFragmentParams_WalId_DEFAULT int64 + +func (p *TPipelineFragmentParams) GetWalId() (v int64) { + if !p.IsSetWalId() { + return TPipelineFragmentParams_WalId_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return *p.WalId } -func (p *TExportStatusResult_) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("state", thrift.I32, 2); err != nil { - goto WriteFieldBeginError +var TPipelineFragmentParams_ContentLength_DEFAULT int64 + +func (p *TPipelineFragmentParams) GetContentLength() (v int64) { + if !p.IsSetContentLength() { + return TPipelineFragmentParams_ContentLength_DEFAULT } - if err := oprot.WriteI32(int32(p.State)); err != nil { - return err + return *p.ContentLength +} + +var TPipelineFragmentParams_CurrentConnectFe_DEFAULT *types.TNetworkAddress + +func (p *TPipelineFragmentParams) GetCurrentConnectFe() (v *types.TNetworkAddress) { + if !p.IsSetCurrentConnectFe() { + return TPipelineFragmentParams_CurrentConnectFe_DEFAULT } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + return p.CurrentConnectFe +} + +var TPipelineFragmentParams_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TPipelineFragmentParams) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPipelineFragmentParams_TopnFilterSourceNodeIds_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return p.TopnFilterSourceNodeIds } -func (p *TExportStatusResult_) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetFiles() { - if err = oprot.WriteFieldBegin("files", thrift.LIST, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Files)); err != nil { - return err - } - for _, v := range p.Files { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +var TPipelineFragmentParams_IsMowTable_DEFAULT bool + +func (p *TPipelineFragmentParams) GetIsMowTable() (v bool) { + if !p.IsSetIsMowTable() { + return TPipelineFragmentParams_IsMowTable_DEFAULT } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return *p.IsMowTable +} +func (p *TPipelineFragmentParams) SetProtocolVersion(val PaloInternalServiceVersion) { + p.ProtocolVersion = val +} +func (p *TPipelineFragmentParams) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TPipelineFragmentParams) SetFragmentId(val *int32) { + p.FragmentId = val +} +func (p *TPipelineFragmentParams) SetPerExchNumSenders(val map[types.TPlanNodeId]int32) { + p.PerExchNumSenders = val +} +func (p *TPipelineFragmentParams) SetDescTbl(val *descriptors.TDescriptorTable) { + p.DescTbl = val +} +func (p *TPipelineFragmentParams) SetResourceInfo(val *types.TResourceInfo) { + p.ResourceInfo = val +} +func (p *TPipelineFragmentParams) SetDestinations(val []*datasinks.TPlanFragmentDestination) { + p.Destinations = val +} +func (p *TPipelineFragmentParams) SetNumSenders(val *int32) { + p.NumSenders = val +} +func (p *TPipelineFragmentParams) SetSendQueryStatisticsWithEveryBatch(val *bool) { + p.SendQueryStatisticsWithEveryBatch = val +} +func (p *TPipelineFragmentParams) SetCoord(val *types.TNetworkAddress) { + p.Coord = val +} +func (p *TPipelineFragmentParams) SetQueryGlobals(val *TQueryGlobals) { + p.QueryGlobals = val +} +func (p *TPipelineFragmentParams) SetQueryOptions(val *TQueryOptions) { + p.QueryOptions = val +} +func (p *TPipelineFragmentParams) SetImportLabel(val *string) { + p.ImportLabel = val +} +func (p *TPipelineFragmentParams) SetDbName(val *string) { + p.DbName = val +} +func (p *TPipelineFragmentParams) SetLoadJobId(val *int64) { + p.LoadJobId = val +} +func (p *TPipelineFragmentParams) SetLoadErrorHubInfo(val *TLoadErrorHubInfo) { + p.LoadErrorHubInfo = val +} +func (p *TPipelineFragmentParams) SetFragmentNumOnHost(val *int32) { + p.FragmentNumOnHost = val +} +func (p *TPipelineFragmentParams) SetBackendId(val *int64) { + p.BackendId = val +} +func (p *TPipelineFragmentParams) SetNeedWaitExecutionTrigger(val bool) { + p.NeedWaitExecutionTrigger = val +} +func (p *TPipelineFragmentParams) SetInstancesSharingHashTable(val []*types.TUniqueId) { + p.InstancesSharingHashTable = val +} +func (p *TPipelineFragmentParams) SetIsSimplifiedParam(val bool) { + p.IsSimplifiedParam = val +} +func (p *TPipelineFragmentParams) SetGlobalDict(val *TGlobalDict) { + p.GlobalDict = val +} +func (p *TPipelineFragmentParams) SetFragment(val *planner.TPlanFragment) { + p.Fragment = val +} +func (p *TPipelineFragmentParams) SetLocalParams(val []*TPipelineInstanceParams) { + p.LocalParams = val +} +func (p *TPipelineFragmentParams) SetWorkloadGroups(val []*TPipelineWorkloadGroup) { + p.WorkloadGroups = val +} +func (p *TPipelineFragmentParams) SetTxnConf(val *TTxnParams) { + p.TxnConf = val +} +func (p *TPipelineFragmentParams) SetTableName(val *string) { + p.TableName = val +} +func (p *TPipelineFragmentParams) SetFileScanParams(val map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + p.FileScanParams = val +} +func (p *TPipelineFragmentParams) SetGroupCommit(val bool) { + p.GroupCommit = val +} +func (p *TPipelineFragmentParams) SetLoadStreamPerNode(val *int32) { + p.LoadStreamPerNode = val +} +func (p *TPipelineFragmentParams) SetTotalLoadStreams(val *int32) { + p.TotalLoadStreams = val +} +func (p *TPipelineFragmentParams) SetNumLocalSink(val *int32) { + p.NumLocalSink = val +} +func (p *TPipelineFragmentParams) SetNumBuckets(val *int32) { + p.NumBuckets = val +} +func (p *TPipelineFragmentParams) SetBucketSeqToInstanceIdx(val map[int32]int32) { + p.BucketSeqToInstanceIdx = val +} +func (p *TPipelineFragmentParams) SetPerNodeSharedScans(val map[types.TPlanNodeId]bool) { + p.PerNodeSharedScans = val +} +func (p *TPipelineFragmentParams) SetParallelInstances(val *int32) { + p.ParallelInstances = val +} +func (p *TPipelineFragmentParams) SetTotalInstances(val *int32) { + p.TotalInstances = val +} +func (p *TPipelineFragmentParams) SetShuffleIdxToInstanceIdx(val map[int32]int32) { + p.ShuffleIdxToInstanceIdx = val +} +func (p *TPipelineFragmentParams) SetIsNereids(val bool) { + p.IsNereids = val +} +func (p *TPipelineFragmentParams) SetWalId(val *int64) { + p.WalId = val +} +func (p *TPipelineFragmentParams) SetContentLength(val *int64) { + p.ContentLength = val +} +func (p *TPipelineFragmentParams) SetCurrentConnectFe(val *types.TNetworkAddress) { + p.CurrentConnectFe = val +} +func (p *TPipelineFragmentParams) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} +func (p *TPipelineFragmentParams) SetIsMowTable(val *bool) { + p.IsMowTable = val +} + +var fieldIDToName_TPipelineFragmentParams = map[int16]string{ + 1: "protocol_version", + 2: "query_id", + 3: "fragment_id", + 4: "per_exch_num_senders", + 5: "desc_tbl", + 6: "resource_info", + 7: "destinations", + 8: "num_senders", + 9: "send_query_statistics_with_every_batch", + 10: "coord", + 11: "query_globals", + 12: "query_options", + 13: "import_label", + 14: "db_name", + 15: "load_job_id", + 16: "load_error_hub_info", + 17: "fragment_num_on_host", + 18: "backend_id", + 19: "need_wait_execution_trigger", + 20: "instances_sharing_hash_table", + 21: "is_simplified_param", + 22: "global_dict", + 23: "fragment", + 24: "local_params", + 26: "workload_groups", + 27: "txn_conf", + 28: "table_name", + 29: "file_scan_params", + 30: "group_commit", + 31: "load_stream_per_node", + 32: "total_load_streams", + 33: "num_local_sink", + 34: "num_buckets", + 35: "bucket_seq_to_instance_idx", + 36: "per_node_shared_scans", + 37: "parallel_instances", + 38: "total_instances", + 39: "shuffle_idx_to_instance_idx", + 40: "is_nereids", + 41: "wal_id", + 42: "content_length", + 43: "current_connect_fe", + 44: "topn_filter_source_node_ids", + 1000: "is_mow_table", +} + +func (p *TPipelineFragmentParams) IsSetQueryId() bool { + return p.QueryId != nil } -func (p *TExportStatusResult_) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TExportStatusResult_(%+v)", *p) +func (p *TPipelineFragmentParams) IsSetFragmentId() bool { + return p.FragmentId != nil } -func (p *TExportStatusResult_) DeepEqual(ano *TExportStatusResult_) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Status) { - return false - } - if !p.Field2DeepEqual(ano.State) { - return false - } - if !p.Field3DeepEqual(ano.Files) { - return false - } - return true +func (p *TPipelineFragmentParams) IsSetDescTbl() bool { + return p.DescTbl != nil } -func (p *TExportStatusResult_) Field1DeepEqual(src *status.TStatus) bool { +func (p *TPipelineFragmentParams) IsSetResourceInfo() bool { + return p.ResourceInfo != nil +} - if !p.Status.DeepEqual(src) { - return false - } - return true +func (p *TPipelineFragmentParams) IsSetNumSenders() bool { + return p.NumSenders != nil } -func (p *TExportStatusResult_) Field2DeepEqual(src types.TExportState) bool { - if p.State != src { - return false - } - return true +func (p *TPipelineFragmentParams) IsSetSendQueryStatisticsWithEveryBatch() bool { + return p.SendQueryStatisticsWithEveryBatch != nil } -func (p *TExportStatusResult_) Field3DeepEqual(src []string) bool { - if len(p.Files) != len(src) { - return false - } - for i, v := range p.Files { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } - } - return true +func (p *TPipelineFragmentParams) IsSetCoord() bool { + return p.Coord != nil } -type TPipelineInstanceParams struct { - FragmentInstanceId *types.TUniqueId `thrift:"fragment_instance_id,1,required" frugal:"1,required,types.TUniqueId" json:"fragment_instance_id"` - BuildHashTableForBroadcastJoin bool `thrift:"build_hash_table_for_broadcast_join,2,optional" frugal:"2,optional,bool" json:"build_hash_table_for_broadcast_join,omitempty"` - PerNodeScanRanges map[types.TPlanNodeId][]*TScanRangeParams `thrift:"per_node_scan_ranges,3,required" frugal:"3,required,map>" json:"per_node_scan_ranges"` - SenderId *int32 `thrift:"sender_id,4,optional" frugal:"4,optional,i32" json:"sender_id,omitempty"` - RuntimeFilterParams *TRuntimeFilterParams `thrift:"runtime_filter_params,5,optional" frugal:"5,optional,TRuntimeFilterParams" json:"runtime_filter_params,omitempty"` - BackendNum *int32 `thrift:"backend_num,6,optional" frugal:"6,optional,i32" json:"backend_num,omitempty"` - PerNodeSharedScans map[types.TPlanNodeId]bool `thrift:"per_node_shared_scans,7,optional" frugal:"7,optional,map" json:"per_node_shared_scans,omitempty"` +func (p *TPipelineFragmentParams) IsSetQueryGlobals() bool { + return p.QueryGlobals != nil } -func NewTPipelineInstanceParams() *TPipelineInstanceParams { - return &TPipelineInstanceParams{ +func (p *TPipelineFragmentParams) IsSetQueryOptions() bool { + return p.QueryOptions != nil +} - BuildHashTableForBroadcastJoin: false, - } +func (p *TPipelineFragmentParams) IsSetImportLabel() bool { + return p.ImportLabel != nil } -func (p *TPipelineInstanceParams) InitDefault() { - *p = TPipelineInstanceParams{ +func (p *TPipelineFragmentParams) IsSetDbName() bool { + return p.DbName != nil +} - BuildHashTableForBroadcastJoin: false, - } +func (p *TPipelineFragmentParams) IsSetLoadJobId() bool { + return p.LoadJobId != nil } -var TPipelineInstanceParams_FragmentInstanceId_DEFAULT *types.TUniqueId +func (p *TPipelineFragmentParams) IsSetLoadErrorHubInfo() bool { + return p.LoadErrorHubInfo != nil +} -func (p *TPipelineInstanceParams) GetFragmentInstanceId() (v *types.TUniqueId) { - if !p.IsSetFragmentInstanceId() { - return TPipelineInstanceParams_FragmentInstanceId_DEFAULT - } - return p.FragmentInstanceId +func (p *TPipelineFragmentParams) IsSetFragmentNumOnHost() bool { + return p.FragmentNumOnHost != nil } -var TPipelineInstanceParams_BuildHashTableForBroadcastJoin_DEFAULT bool = false +func (p *TPipelineFragmentParams) IsSetBackendId() bool { + return p.BackendId != nil +} -func (p *TPipelineInstanceParams) GetBuildHashTableForBroadcastJoin() (v bool) { - if !p.IsSetBuildHashTableForBroadcastJoin() { - return TPipelineInstanceParams_BuildHashTableForBroadcastJoin_DEFAULT - } - return p.BuildHashTableForBroadcastJoin +func (p *TPipelineFragmentParams) IsSetNeedWaitExecutionTrigger() bool { + return p.NeedWaitExecutionTrigger != TPipelineFragmentParams_NeedWaitExecutionTrigger_DEFAULT } -func (p *TPipelineInstanceParams) GetPerNodeScanRanges() (v map[types.TPlanNodeId][]*TScanRangeParams) { - return p.PerNodeScanRanges +func (p *TPipelineFragmentParams) IsSetInstancesSharingHashTable() bool { + return p.InstancesSharingHashTable != nil } -var TPipelineInstanceParams_SenderId_DEFAULT int32 +func (p *TPipelineFragmentParams) IsSetIsSimplifiedParam() bool { + return p.IsSimplifiedParam != TPipelineFragmentParams_IsSimplifiedParam_DEFAULT +} -func (p *TPipelineInstanceParams) GetSenderId() (v int32) { - if !p.IsSetSenderId() { - return TPipelineInstanceParams_SenderId_DEFAULT - } - return *p.SenderId +func (p *TPipelineFragmentParams) IsSetGlobalDict() bool { + return p.GlobalDict != nil } -var TPipelineInstanceParams_RuntimeFilterParams_DEFAULT *TRuntimeFilterParams +func (p *TPipelineFragmentParams) IsSetFragment() bool { + return p.Fragment != nil +} -func (p *TPipelineInstanceParams) GetRuntimeFilterParams() (v *TRuntimeFilterParams) { - if !p.IsSetRuntimeFilterParams() { - return TPipelineInstanceParams_RuntimeFilterParams_DEFAULT - } - return p.RuntimeFilterParams +func (p *TPipelineFragmentParams) IsSetWorkloadGroups() bool { + return p.WorkloadGroups != nil } -var TPipelineInstanceParams_BackendNum_DEFAULT int32 +func (p *TPipelineFragmentParams) IsSetTxnConf() bool { + return p.TxnConf != nil +} -func (p *TPipelineInstanceParams) GetBackendNum() (v int32) { - if !p.IsSetBackendNum() { - return TPipelineInstanceParams_BackendNum_DEFAULT - } - return *p.BackendNum +func (p *TPipelineFragmentParams) IsSetTableName() bool { + return p.TableName != nil } -var TPipelineInstanceParams_PerNodeSharedScans_DEFAULT map[types.TPlanNodeId]bool +func (p *TPipelineFragmentParams) IsSetFileScanParams() bool { + return p.FileScanParams != nil +} -func (p *TPipelineInstanceParams) GetPerNodeSharedScans() (v map[types.TPlanNodeId]bool) { - if !p.IsSetPerNodeSharedScans() { - return TPipelineInstanceParams_PerNodeSharedScans_DEFAULT - } - return p.PerNodeSharedScans +func (p *TPipelineFragmentParams) IsSetGroupCommit() bool { + return p.GroupCommit != TPipelineFragmentParams_GroupCommit_DEFAULT } -func (p *TPipelineInstanceParams) SetFragmentInstanceId(val *types.TUniqueId) { - p.FragmentInstanceId = val + +func (p *TPipelineFragmentParams) IsSetLoadStreamPerNode() bool { + return p.LoadStreamPerNode != nil } -func (p *TPipelineInstanceParams) SetBuildHashTableForBroadcastJoin(val bool) { - p.BuildHashTableForBroadcastJoin = val + +func (p *TPipelineFragmentParams) IsSetTotalLoadStreams() bool { + return p.TotalLoadStreams != nil } -func (p *TPipelineInstanceParams) SetPerNodeScanRanges(val map[types.TPlanNodeId][]*TScanRangeParams) { - p.PerNodeScanRanges = val + +func (p *TPipelineFragmentParams) IsSetNumLocalSink() bool { + return p.NumLocalSink != nil } -func (p *TPipelineInstanceParams) SetSenderId(val *int32) { - p.SenderId = val + +func (p *TPipelineFragmentParams) IsSetNumBuckets() bool { + return p.NumBuckets != nil } -func (p *TPipelineInstanceParams) SetRuntimeFilterParams(val *TRuntimeFilterParams) { - p.RuntimeFilterParams = val + +func (p *TPipelineFragmentParams) IsSetBucketSeqToInstanceIdx() bool { + return p.BucketSeqToInstanceIdx != nil } -func (p *TPipelineInstanceParams) SetBackendNum(val *int32) { - p.BackendNum = val + +func (p *TPipelineFragmentParams) IsSetPerNodeSharedScans() bool { + return p.PerNodeSharedScans != nil } -func (p *TPipelineInstanceParams) SetPerNodeSharedScans(val map[types.TPlanNodeId]bool) { - p.PerNodeSharedScans = val + +func (p *TPipelineFragmentParams) IsSetParallelInstances() bool { + return p.ParallelInstances != nil } -var fieldIDToName_TPipelineInstanceParams = map[int16]string{ - 1: "fragment_instance_id", - 2: "build_hash_table_for_broadcast_join", - 3: "per_node_scan_ranges", - 4: "sender_id", - 5: "runtime_filter_params", - 6: "backend_num", - 7: "per_node_shared_scans", +func (p *TPipelineFragmentParams) IsSetTotalInstances() bool { + return p.TotalInstances != nil } -func (p *TPipelineInstanceParams) IsSetFragmentInstanceId() bool { - return p.FragmentInstanceId != nil +func (p *TPipelineFragmentParams) IsSetShuffleIdxToInstanceIdx() bool { + return p.ShuffleIdxToInstanceIdx != nil } -func (p *TPipelineInstanceParams) IsSetBuildHashTableForBroadcastJoin() bool { - return p.BuildHashTableForBroadcastJoin != TPipelineInstanceParams_BuildHashTableForBroadcastJoin_DEFAULT +func (p *TPipelineFragmentParams) IsSetIsNereids() bool { + return p.IsNereids != TPipelineFragmentParams_IsNereids_DEFAULT } -func (p *TPipelineInstanceParams) IsSetSenderId() bool { - return p.SenderId != nil +func (p *TPipelineFragmentParams) IsSetWalId() bool { + return p.WalId != nil } -func (p *TPipelineInstanceParams) IsSetRuntimeFilterParams() bool { - return p.RuntimeFilterParams != nil +func (p *TPipelineFragmentParams) IsSetContentLength() bool { + return p.ContentLength != nil } -func (p *TPipelineInstanceParams) IsSetBackendNum() bool { - return p.BackendNum != nil +func (p *TPipelineFragmentParams) IsSetCurrentConnectFe() bool { + return p.CurrentConnectFe != nil } -func (p *TPipelineInstanceParams) IsSetPerNodeSharedScans() bool { - return p.PerNodeSharedScans != nil +func (p *TPipelineFragmentParams) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil } -func (p *TPipelineInstanceParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TPipelineFragmentParams) IsSetIsMowTable() bool { + return p.IsMowTable != nil +} + +func (p *TPipelineFragmentParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 - var issetFragmentInstanceId bool = false - var issetPerNodeScanRanges bool = false + var issetProtocolVersion bool = false + var issetQueryId bool = false + var issetPerExchNumSenders bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -20352,83 +27227,365 @@ func (p *TPipelineInstanceParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetProtocolVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetQueryId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetPerExchNumSenders = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I32 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: if fieldTypeId == thrift.STRUCT { - if err = p.ReadField1(iprot); err != nil { + if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - issetFragmentInstanceId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 2: + case 12: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRING { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.STRING { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I64 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.I32 { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.I64 { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: if fieldTypeId == thrift.BOOL { - if err = p.ReadField2(iprot); err != nil { + if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.LIST { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 3: + case 21: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 24: + if fieldTypeId == thrift.LIST { + if err = p.ReadField24(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 26: + if fieldTypeId == thrift.LIST { + if err = p.ReadField26(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 27: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField27(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 28: + if fieldTypeId == thrift.STRING { + if err = p.ReadField28(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 29: if fieldTypeId == thrift.MAP { - if err = p.ReadField3(iprot); err != nil { + if err = p.ReadField29(iprot); err != nil { goto ReadFieldError } - issetPerNodeScanRanges = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 30: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField30(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 4: + case 31: if fieldTypeId == thrift.I32 { - if err = p.ReadField4(iprot); err != nil { + if err = p.ReadField31(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 32: + if fieldTypeId == thrift.I32 { + if err = p.ReadField32(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { + case 33: + if fieldTypeId == thrift.I32 { + if err = p.ReadField33(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 34: + if fieldTypeId == thrift.I32 { + if err = p.ReadField34(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 6: + case 35: + if fieldTypeId == thrift.MAP { + if err = p.ReadField35(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 36: + if fieldTypeId == thrift.MAP { + if err = p.ReadField36(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 37: if fieldTypeId == thrift.I32 { - if err = p.ReadField6(iprot); err != nil { + if err = p.ReadField37(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 38: + if fieldTypeId == thrift.I32 { + if err = p.ReadField38(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 7: + case 39: if fieldTypeId == thrift.MAP { - if err = p.ReadField7(iprot); err != nil { + if err = p.ReadField39(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 40: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField40(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 41: + if fieldTypeId == thrift.I64 { + if err = p.ReadField41(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 42: + if fieldTypeId == thrift.I64 { + if err = p.ReadField42(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 43: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField43(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 44: + if fieldTypeId == thrift.LIST { + if err = p.ReadField44(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1000: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1000(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -20437,13 +27594,18 @@ func (p *TPipelineInstanceParams) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } - if !issetFragmentInstanceId { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } - if !issetPerNodeScanRanges { - fieldId = 3 + if !issetQueryId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPerExchNumSenders { + fieldId = 4 goto RequiredFieldNotSetError } return nil @@ -20452,7 +27614,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineInstanceParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -20461,32 +27623,45 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineInstanceParams[fieldId])) + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineFragmentParams[fieldId])) } -func (p *TPipelineInstanceParams) ReadField1(iprot thrift.TProtocol) error { - p.FragmentInstanceId = types.NewTUniqueId() - if err := p.FragmentInstanceId.Read(iprot); err != nil { +func (p *TPipelineFragmentParams) ReadField1(iprot thrift.TProtocol) error { + + var _field PaloInternalServiceVersion + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = PaloInternalServiceVersion(v) + } + p.ProtocolVersion = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.QueryId = _field return nil } +func (p *TPipelineFragmentParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BuildHashTableForBroadcastJoin = v + _field = &v } + p.FragmentId = _field return nil } - -func (p *TPipelineInstanceParams) ReadField3(iprot thrift.TProtocol) error { +func (p *TPipelineFragmentParams) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.PerNodeScanRanges = make(map[types.TPlanNodeId][]*TScanRangeParams, size) + _field := make(map[types.TPlanNodeId]int32, size) for i := 0; i < size; i++ { var _key types.TPlanNodeId if v, err := iprot.ReadI32(); err != nil { @@ -20495,695 +27670,851 @@ func (p *TPipelineInstanceParams) ReadField3(iprot thrift.TProtocol) error { _key = v } - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - _val := make([]*TScanRangeParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTScanRangeParams() - if err := _elem.Read(iprot); err != nil { - return err - } - - _val = append(_val, _elem) - } - if err := iprot.ReadListEnd(); err != nil { + var _val int32 + if v, err := iprot.ReadI32(); err != nil { return err + } else { + _val = v } - p.PerNodeScanRanges[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.PerExchNumSenders = _field return nil } - -func (p *TPipelineInstanceParams) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *TPipelineFragmentParams) ReadField5(iprot thrift.TProtocol) error { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { + return err + } + p.DescTbl = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField6(iprot thrift.TProtocol) error { + _field := types.NewTResourceInfo() + if err := _field.Read(iprot); err != nil { return err - } else { - p.SenderId = &v } + p.ResourceInfo = _field return nil } +func (p *TPipelineFragmentParams) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*datasinks.TPlanFragmentDestination, 0, size) + values := make([]datasinks.TPlanFragmentDestination, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TPipelineInstanceParams) ReadField5(iprot thrift.TProtocol) error { - p.RuntimeFilterParams = NewTRuntimeFilterParams() - if err := p.RuntimeFilterParams.Read(iprot); err != nil { + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { return err } + p.Destinations = _field return nil } +func (p *TPipelineFragmentParams) ReadField8(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) ReadField6(iprot thrift.TProtocol) error { + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BackendNum = &v + _field = &v } + p.NumSenders = _field return nil } +func (p *TPipelineFragmentParams) ReadField9(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) ReadField7(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } - p.PerNodeSharedScans = make(map[types.TPlanNodeId]bool, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - - var _val bool - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - _val = v - } - - p.PerNodeSharedScans[_key] = _val + p.SendQueryStatisticsWithEveryBatch = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField10(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err } - if err := iprot.ReadMapEnd(); err != nil { + p.Coord = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField11(iprot thrift.TProtocol) error { + _field := NewTQueryGlobals() + if err := _field.Read(iprot); err != nil { return err } + p.QueryGlobals = _field return nil } - -func (p *TPipelineInstanceParams) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TPipelineInstanceParams"); err != nil { - goto WriteStructBeginError +func (p *TPipelineFragmentParams) ReadField12(iprot thrift.TProtocol) error { + _field := NewTQueryOptions() + if err := _field.Read(iprot); err != nil { + return err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } + p.QueryOptions = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField13(iprot thrift.TProtocol) error { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + p.ImportLabel = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField14(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } + p.DbName = _field return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } +func (p *TPipelineFragmentParams) ReadField15(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("fragment_instance_id", thrift.STRUCT, 1); err != nil { - goto WriteFieldBeginError + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - if err := p.FragmentInstanceId.Write(oprot); err != nil { + p.LoadJobId = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField16(iprot thrift.TProtocol) error { + _field := NewTLoadErrorHubInfo() + if err := _field.Read(iprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + p.LoadErrorHubInfo = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField17(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } + p.FragmentNumOnHost = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } +func (p *TPipelineFragmentParams) ReadField18(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetBuildHashTableForBroadcastJoin() { - if err = oprot.WriteFieldBegin("build_hash_table_for_broadcast_join", thrift.BOOL, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.BuildHashTableForBroadcastJoin); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.BackendId = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } +func (p *TPipelineFragmentParams) ReadField19(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) writeField3(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("per_node_scan_ranges", thrift.MAP, 3); err != nil { - goto WriteFieldBeginError + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v } - if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.PerNodeScanRanges)); err != nil { + p.NeedWaitExecutionTrigger = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField20(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } - for k, v := range p.PerNodeScanRanges { + _field := make([]*types.TUniqueId, 0, size) + values := make([]types.TUniqueId, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() - if err := oprot.WriteI32(k); err != nil { + if err := _elem.Read(iprot); err != nil { return err } - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return err - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } + _field = append(_field, _elem) } - if err := oprot.WriteMapEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.InstancesSharingHashTable = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TPipelineFragmentParams) ReadField21(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetSenderId() { - if err = oprot.WriteFieldBegin("sender_id", thrift.I32, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.SenderId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v } + p.IsSimplifiedParam = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } - -func (p *TPipelineInstanceParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetRuntimeFilterParams() { - if err = oprot.WriteFieldBegin("runtime_filter_params", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.RuntimeFilterParams.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParams) ReadField22(iprot thrift.TProtocol) error { + _field := NewTGlobalDict() + if err := _field.Read(iprot); err != nil { + return err + } + p.GlobalDict = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField23(iprot thrift.TProtocol) error { + _field := planner.NewTPlanFragment() + if err := _field.Read(iprot); err != nil { + return err } + p.Fragment = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *TPipelineFragmentParams) ReadField24(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TPipelineInstanceParams, 0, size) + values := make([]TPipelineInstanceParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TPipelineInstanceParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendNum() { - if err = oprot.WriteFieldBegin("backend_num", thrift.I32, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.BackendNum); err != nil { + if err := _elem.Read(iprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err } + p.LocalParams = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } +func (p *TPipelineFragmentParams) ReadField26(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TPipelineWorkloadGroup, 0, size) + values := make([]TPipelineWorkloadGroup, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TPipelineInstanceParams) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetPerNodeSharedScans() { - if err = oprot.WriteFieldBegin("per_node_shared_scans", thrift.MAP, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.BOOL, len(p.PerNodeSharedScans)); err != nil { + if err := _elem.Read(iprot); err != nil { return err } - for k, v := range p.PerNodeSharedScans { - - if err := oprot.WriteI32(k); err != nil { - return err - } - if err := oprot.WriteBool(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + _field = append(_field, _elem) } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.WorkloadGroups = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } - -func (p *TPipelineInstanceParams) String() string { - if p == nil { - return "" +func (p *TPipelineFragmentParams) ReadField27(iprot thrift.TProtocol) error { + _field := NewTTxnParams() + if err := _field.Read(iprot); err != nil { + return err } - return fmt.Sprintf("TPipelineInstanceParams(%+v)", *p) + p.TxnConf = _field + return nil } +func (p *TPipelineFragmentParams) ReadField28(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) DeepEqual(ano *TPipelineInstanceParams) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.FragmentInstanceId) { - return false - } - if !p.Field2DeepEqual(ano.BuildHashTableForBroadcastJoin) { - return false - } - if !p.Field3DeepEqual(ano.PerNodeScanRanges) { - return false - } - if !p.Field4DeepEqual(ano.SenderId) { - return false + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - if !p.Field5DeepEqual(ano.RuntimeFilterParams) { - return false + p.TableName = _field + return nil +} +func (p *TPipelineFragmentParams) ReadField29(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err } - if !p.Field6DeepEqual(ano.BackendNum) { - return false + _field := make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) + values := make([]plannodes.TFileScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val } - if !p.Field7DeepEqual(ano.PerNodeSharedScans) { - return false + if err := iprot.ReadMapEnd(); err != nil { + return err } - return true + p.FileScanParams = _field + return nil } +func (p *TPipelineFragmentParams) ReadField30(iprot thrift.TProtocol) error { -func (p *TPipelineInstanceParams) Field1DeepEqual(src *types.TUniqueId) bool { - - if !p.FragmentInstanceId.DeepEqual(src) { - return false + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v } - return true + p.GroupCommit = _field + return nil } -func (p *TPipelineInstanceParams) Field2DeepEqual(src bool) bool { +func (p *TPipelineFragmentParams) ReadField31(iprot thrift.TProtocol) error { - if p.BuildHashTableForBroadcastJoin != src { - return false + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return true + p.LoadStreamPerNode = _field + return nil } -func (p *TPipelineInstanceParams) Field3DeepEqual(src map[types.TPlanNodeId][]*TScanRangeParams) bool { +func (p *TPipelineFragmentParams) ReadField32(iprot thrift.TProtocol) error { - if len(p.PerNodeScanRanges) != len(src) { - return false - } - for k, v := range p.PerNodeScanRanges { - _src := src[k] - if len(v) != len(_src) { - return false - } - for i, v := range v { - _src1 := _src[i] - if !v.DeepEqual(_src1) { - return false - } - } + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return true + p.TotalLoadStreams = _field + return nil } -func (p *TPipelineInstanceParams) Field4DeepEqual(src *int32) bool { +func (p *TPipelineFragmentParams) ReadField33(iprot thrift.TProtocol) error { - if p.SenderId == src { - return true - } else if p.SenderId == nil || src == nil { - return false - } - if *p.SenderId != *src { - return false + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return true + p.NumLocalSink = _field + return nil } -func (p *TPipelineInstanceParams) Field5DeepEqual(src *TRuntimeFilterParams) bool { +func (p *TPipelineFragmentParams) ReadField34(iprot thrift.TProtocol) error { - if !p.RuntimeFilterParams.DeepEqual(src) { - return false + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return true + p.NumBuckets = _field + return nil } -func (p *TPipelineInstanceParams) Field6DeepEqual(src *int32) bool { +func (p *TPipelineFragmentParams) ReadField35(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } - if p.BackendNum == src { - return true - } else if p.BackendNum == nil || src == nil { - return false + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val } - if *p.BackendNum != *src { - return false + if err := iprot.ReadMapEnd(); err != nil { + return err } - return true + p.BucketSeqToInstanceIdx = _field + return nil } -func (p *TPipelineInstanceParams) Field7DeepEqual(src map[types.TPlanNodeId]bool) bool { - - if len(p.PerNodeSharedScans) != len(src) { - return false +func (p *TPipelineFragmentParams) ReadField36(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err } - for k, v := range p.PerNodeSharedScans { - _src := src[k] - if v != _src { - return false + _field := make(map[types.TPlanNodeId]bool, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v } - } - return true -} -type TPipelineWorkloadGroup struct { - Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"` - Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"` - Properties map[string]string `thrift:"properties,3,optional" frugal:"3,optional,map" json:"properties,omitempty"` - Version *int64 `thrift:"version,4,optional" frugal:"4,optional,i64" json:"version,omitempty"` -} + var _val bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _val = v + } -func NewTPipelineWorkloadGroup() *TPipelineWorkloadGroup { - return &TPipelineWorkloadGroup{} + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.PerNodeSharedScans = _field + return nil } +func (p *TPipelineFragmentParams) ReadField37(iprot thrift.TProtocol) error { -func (p *TPipelineWorkloadGroup) InitDefault() { - *p = TPipelineWorkloadGroup{} + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ParallelInstances = _field + return nil } +func (p *TPipelineFragmentParams) ReadField38(iprot thrift.TProtocol) error { -var TPipelineWorkloadGroup_Id_DEFAULT int64 - -func (p *TPipelineWorkloadGroup) GetId() (v int64) { - if !p.IsSetId() { - return TPipelineWorkloadGroup_Id_DEFAULT + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } - return *p.Id + p.TotalInstances = _field + return nil } +func (p *TPipelineFragmentParams) ReadField39(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -var TPipelineWorkloadGroup_Name_DEFAULT string + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } -func (p *TPipelineWorkloadGroup) GetName() (v string) { - if !p.IsSetName() { - return TPipelineWorkloadGroup_Name_DEFAULT + _field[_key] = _val } - return *p.Name + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.ShuffleIdxToInstanceIdx = _field + return nil } +func (p *TPipelineFragmentParams) ReadField40(iprot thrift.TProtocol) error { -var TPipelineWorkloadGroup_Properties_DEFAULT map[string]string - -func (p *TPipelineWorkloadGroup) GetProperties() (v map[string]string) { - if !p.IsSetProperties() { - return TPipelineWorkloadGroup_Properties_DEFAULT + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v } - return p.Properties + p.IsNereids = _field + return nil } +func (p *TPipelineFragmentParams) ReadField41(iprot thrift.TProtocol) error { -var TPipelineWorkloadGroup_Version_DEFAULT int64 - -func (p *TPipelineWorkloadGroup) GetVersion() (v int64) { - if !p.IsSetVersion() { - return TPipelineWorkloadGroup_Version_DEFAULT + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - return *p.Version -} -func (p *TPipelineWorkloadGroup) SetId(val *int64) { - p.Id = val -} -func (p *TPipelineWorkloadGroup) SetName(val *string) { - p.Name = val -} -func (p *TPipelineWorkloadGroup) SetProperties(val map[string]string) { - p.Properties = val -} -func (p *TPipelineWorkloadGroup) SetVersion(val *int64) { - p.Version = val + p.WalId = _field + return nil } +func (p *TPipelineFragmentParams) ReadField42(iprot thrift.TProtocol) error { -var fieldIDToName_TPipelineWorkloadGroup = map[int16]string{ - 1: "id", - 2: "name", - 3: "properties", - 4: "version", + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ContentLength = _field + return nil } - -func (p *TPipelineWorkloadGroup) IsSetId() bool { - return p.Id != nil +func (p *TPipelineFragmentParams) ReadField43(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.CurrentConnectFe = _field + return nil } +func (p *TPipelineFragmentParams) ReadField44(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { -func (p *TPipelineWorkloadGroup) IsSetName() bool { - return p.Name != nil -} + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } -func (p *TPipelineWorkloadGroup) IsSetProperties() bool { - return p.Properties != nil + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TopnFilterSourceNodeIds = _field + return nil } +func (p *TPipelineFragmentParams) ReadField1000(iprot thrift.TProtocol) error { -func (p *TPipelineWorkloadGroup) IsSetVersion() bool { - return p.Version != nil + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsMowTable = _field + return nil } -func (p *TPipelineWorkloadGroup) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType +func (p *TPipelineFragmentParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError + if err = oprot.WriteStructBegin("TPipelineFragmentParams"); err != nil { + goto WriteStructBeginError } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } + if err = p.writeField26(oprot); err != nil { + fieldId = 26 + goto WriteFieldError + } + if err = p.writeField27(oprot); err != nil { + fieldId = 27 + goto WriteFieldError + } + if err = p.writeField28(oprot); err != nil { + fieldId = 28 + goto WriteFieldError + } + if err = p.writeField29(oprot); err != nil { + fieldId = 29 + goto WriteFieldError + } + if err = p.writeField30(oprot); err != nil { + fieldId = 30 + goto WriteFieldError + } + if err = p.writeField31(oprot); err != nil { + fieldId = 31 + goto WriteFieldError + } + if err = p.writeField32(oprot); err != nil { + fieldId = 32 + goto WriteFieldError + } + if err = p.writeField33(oprot); err != nil { + fieldId = 33 + goto WriteFieldError + } + if err = p.writeField34(oprot); err != nil { + fieldId = 34 + goto WriteFieldError } - if fieldTypeId == thrift.STOP { - break + if err = p.writeField35(oprot); err != nil { + fieldId = 35 + goto WriteFieldError } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.MAP { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = p.writeField36(oprot); err != nil { + fieldId = 36 + goto WriteFieldError } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + if err = p.writeField37(oprot); err != nil { + fieldId = 37 + goto WriteFieldError + } + if err = p.writeField38(oprot); err != nil { + fieldId = 38 + goto WriteFieldError + } + if err = p.writeField39(oprot); err != nil { + fieldId = 39 + goto WriteFieldError + } + if err = p.writeField40(oprot); err != nil { + fieldId = 40 + goto WriteFieldError + } + if err = p.writeField41(oprot); err != nil { + fieldId = 41 + goto WriteFieldError + } + if err = p.writeField42(oprot); err != nil { + fieldId = 42 + goto WriteFieldError + } + if err = p.writeField43(oprot); err != nil { + fieldId = 43 + goto WriteFieldError + } + if err = p.writeField44(oprot); err != nil { + fieldId = 44 + goto WriteFieldError + } + if err = p.writeField1000(oprot); err != nil { + fieldId = 1000 + goto WriteFieldError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineWorkloadGroup[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPipelineWorkloadGroup) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TPipelineFragmentParams) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { return err - } else { - p.Id = &v + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TPipelineFragmentParams) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryId.Write(oprot); err != nil { return err - } else { - p.Name = &v + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) ReadField3(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.Properties = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v +func (p *TPipelineFragmentParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentId() { + if err = oprot.WriteFieldBegin("fragment_id", thrift.I32, 3); err != nil { + goto WriteFieldBeginError } - - var _val string - if v, err := iprot.ReadString(); err != nil { + if err := oprot.WriteI32(*p.FragmentId); err != nil { return err - } else { - _val = v } - - p.Properties[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Version = &v +func (p *TPipelineFragmentParams) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("per_exch_num_senders", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError } - return nil -} - -func (p *TPipelineWorkloadGroup) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TPipelineWorkloadGroup"); err != nil { - goto WriteStructBeginError + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.PerExchNumSenders)); err != nil { + return err } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError + for k, v := range p.PerExchNumSenders { + if err := oprot.WriteI32(k); err != nil { + return err } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError + if err := oprot.WriteI32(v); err != nil { + return err } - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError + if err := oprot.WriteMapEnd(); err != nil { + return err } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetId() { - if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { +func (p *TPipelineFragmentParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetDescTbl() { + if err = oprot.WriteFieldBegin("desc_tbl", thrift.STRUCT, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.Id); err != nil { + if err := p.DescTbl.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21192,17 +28523,17 @@ func (p *TPipelineWorkloadGroup) writeField1(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetName() { - if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { +func (p *TPipelineFragmentParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetResourceInfo() { + if err = oprot.WriteFieldBegin("resource_info", thrift.STRUCT, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Name); err != nil { + if err := p.ResourceInfo.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21211,30 +28542,42 @@ func (p *TPipelineWorkloadGroup) writeField2(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TPipelineFragmentParams) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("destinations", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Destinations)); err != nil { + return err + } + for _, v := range p.Destinations { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetProperties() { - if err = oprot.WriteFieldBegin("properties", thrift.MAP, 3); err != nil { +func (p *TPipelineFragmentParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetNumSenders() { + if err = oprot.WriteFieldBegin("num_senders", thrift.I32, 8); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { - return err - } - for k, v := range p.Properties { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteI32(*p.NumSenders); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21243,17 +28586,17 @@ func (p *TPipelineWorkloadGroup) writeField3(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TPipelineWorkloadGroup) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetVersion() { - if err = oprot.WriteFieldBegin("version", thrift.I64, 4); err != nil { +func (p *TPipelineFragmentParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetSendQueryStatisticsWithEveryBatch() { + if err = oprot.WriteFieldBegin("send_query_statistics_with_every_batch", thrift.BOOL, 9); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.Version); err != nil { + if err := oprot.WriteBool(*p.SendQueryStatisticsWithEveryBatch); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -21262,1925 +28605,2023 @@ func (p *TPipelineWorkloadGroup) writeField4(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TPipelineWorkloadGroup) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TPipelineWorkloadGroup(%+v)", *p) -} - -func (p *TPipelineWorkloadGroup) DeepEqual(ano *TPipelineWorkloadGroup) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.Id) { - return false - } - if !p.Field2DeepEqual(ano.Name) { - return false - } - if !p.Field3DeepEqual(ano.Properties) { - return false - } - if !p.Field4DeepEqual(ano.Version) { - return false - } - return true -} - -func (p *TPipelineWorkloadGroup) Field1DeepEqual(src *int64) bool { - - if p.Id == src { - return true - } else if p.Id == nil || src == nil { - return false - } - if *p.Id != *src { - return false - } - return true -} -func (p *TPipelineWorkloadGroup) Field2DeepEqual(src *string) bool { - - if p.Name == src { - return true - } else if p.Name == nil || src == nil { - return false - } - if strings.Compare(*p.Name, *src) != 0 { - return false - } - return true -} -func (p *TPipelineWorkloadGroup) Field3DeepEqual(src map[string]string) bool { - - if len(p.Properties) != len(src) { - return false - } - for k, v := range p.Properties { - _src := src[k] - if strings.Compare(v, _src) != 0 { - return false - } - } - return true -} -func (p *TPipelineWorkloadGroup) Field4DeepEqual(src *int64) bool { - - if p.Version == src { - return true - } else if p.Version == nil || src == nil { - return false - } - if *p.Version != *src { - return false - } - return true -} - -type TPipelineFragmentParams struct { - ProtocolVersion PaloInternalServiceVersion `thrift:"protocol_version,1,required" frugal:"1,required,PaloInternalServiceVersion" json:"protocol_version"` - QueryId *types.TUniqueId `thrift:"query_id,2,required" frugal:"2,required,types.TUniqueId" json:"query_id"` - FragmentId *int32 `thrift:"fragment_id,3,optional" frugal:"3,optional,i32" json:"fragment_id,omitempty"` - PerExchNumSenders map[types.TPlanNodeId]int32 `thrift:"per_exch_num_senders,4,required" frugal:"4,required,map" json:"per_exch_num_senders"` - DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,5,optional" frugal:"5,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` - ResourceInfo *types.TResourceInfo `thrift:"resource_info,6,optional" frugal:"6,optional,types.TResourceInfo" json:"resource_info,omitempty"` - Destinations []*datasinks.TPlanFragmentDestination `thrift:"destinations,7" frugal:"7,default,list" json:"destinations"` - NumSenders *int32 `thrift:"num_senders,8,optional" frugal:"8,optional,i32" json:"num_senders,omitempty"` - SendQueryStatisticsWithEveryBatch *bool `thrift:"send_query_statistics_with_every_batch,9,optional" frugal:"9,optional,bool" json:"send_query_statistics_with_every_batch,omitempty"` - Coord *types.TNetworkAddress `thrift:"coord,10,optional" frugal:"10,optional,types.TNetworkAddress" json:"coord,omitempty"` - QueryGlobals *TQueryGlobals `thrift:"query_globals,11,optional" frugal:"11,optional,TQueryGlobals" json:"query_globals,omitempty"` - QueryOptions *TQueryOptions `thrift:"query_options,12,optional" frugal:"12,optional,TQueryOptions" json:"query_options,omitempty"` - ImportLabel *string `thrift:"import_label,13,optional" frugal:"13,optional,string" json:"import_label,omitempty"` - DbName *string `thrift:"db_name,14,optional" frugal:"14,optional,string" json:"db_name,omitempty"` - LoadJobId *int64 `thrift:"load_job_id,15,optional" frugal:"15,optional,i64" json:"load_job_id,omitempty"` - LoadErrorHubInfo *TLoadErrorHubInfo `thrift:"load_error_hub_info,16,optional" frugal:"16,optional,TLoadErrorHubInfo" json:"load_error_hub_info,omitempty"` - FragmentNumOnHost *int32 `thrift:"fragment_num_on_host,17,optional" frugal:"17,optional,i32" json:"fragment_num_on_host,omitempty"` - BackendId *int64 `thrift:"backend_id,18,optional" frugal:"18,optional,i64" json:"backend_id,omitempty"` - NeedWaitExecutionTrigger bool `thrift:"need_wait_execution_trigger,19,optional" frugal:"19,optional,bool" json:"need_wait_execution_trigger,omitempty"` - InstancesSharingHashTable []*types.TUniqueId `thrift:"instances_sharing_hash_table,20,optional" frugal:"20,optional,list" json:"instances_sharing_hash_table,omitempty"` - IsSimplifiedParam bool `thrift:"is_simplified_param,21,optional" frugal:"21,optional,bool" json:"is_simplified_param,omitempty"` - GlobalDict *TGlobalDict `thrift:"global_dict,22,optional" frugal:"22,optional,TGlobalDict" json:"global_dict,omitempty"` - Fragment *planner.TPlanFragment `thrift:"fragment,23,optional" frugal:"23,optional,planner.TPlanFragment" json:"fragment,omitempty"` - LocalParams []*TPipelineInstanceParams `thrift:"local_params,24" frugal:"24,default,list" json:"local_params"` - WorkloadGroups []*TPipelineWorkloadGroup `thrift:"workload_groups,26,optional" frugal:"26,optional,list" json:"workload_groups,omitempty"` - TxnConf *TTxnParams `thrift:"txn_conf,27,optional" frugal:"27,optional,TTxnParams" json:"txn_conf,omitempty"` - TableName *string `thrift:"table_name,28,optional" frugal:"28,optional,string" json:"table_name,omitempty"` - FileScanParams map[types.TPlanNodeId]*plannodes.TFileScanRangeParams `thrift:"file_scan_params,29,optional" frugal:"29,optional,map" json:"file_scan_params,omitempty"` - GroupCommit bool `thrift:"group_commit,30,optional" frugal:"30,optional,bool" json:"group_commit,omitempty"` -} - -func NewTPipelineFragmentParams() *TPipelineFragmentParams { - return &TPipelineFragmentParams{ - - NeedWaitExecutionTrigger: false, - IsSimplifiedParam: false, - GroupCommit: false, - } -} - -func (p *TPipelineFragmentParams) InitDefault() { - *p = TPipelineFragmentParams{ - - NeedWaitExecutionTrigger: false, - IsSimplifiedParam: false, - GroupCommit: false, - } -} - -func (p *TPipelineFragmentParams) GetProtocolVersion() (v PaloInternalServiceVersion) { - return p.ProtocolVersion -} - -var TPipelineFragmentParams_QueryId_DEFAULT *types.TUniqueId - -func (p *TPipelineFragmentParams) GetQueryId() (v *types.TUniqueId) { - if !p.IsSetQueryId() { - return TPipelineFragmentParams_QueryId_DEFAULT - } - return p.QueryId -} - -var TPipelineFragmentParams_FragmentId_DEFAULT int32 - -func (p *TPipelineFragmentParams) GetFragmentId() (v int32) { - if !p.IsSetFragmentId() { - return TPipelineFragmentParams_FragmentId_DEFAULT - } - return *p.FragmentId -} - -func (p *TPipelineFragmentParams) GetPerExchNumSenders() (v map[types.TPlanNodeId]int32) { - return p.PerExchNumSenders -} - -var TPipelineFragmentParams_DescTbl_DEFAULT *descriptors.TDescriptorTable - -func (p *TPipelineFragmentParams) GetDescTbl() (v *descriptors.TDescriptorTable) { - if !p.IsSetDescTbl() { - return TPipelineFragmentParams_DescTbl_DEFAULT - } - return p.DescTbl -} - -var TPipelineFragmentParams_ResourceInfo_DEFAULT *types.TResourceInfo - -func (p *TPipelineFragmentParams) GetResourceInfo() (v *types.TResourceInfo) { - if !p.IsSetResourceInfo() { - return TPipelineFragmentParams_ResourceInfo_DEFAULT - } - return p.ResourceInfo -} - -func (p *TPipelineFragmentParams) GetDestinations() (v []*datasinks.TPlanFragmentDestination) { - return p.Destinations -} - -var TPipelineFragmentParams_NumSenders_DEFAULT int32 - -func (p *TPipelineFragmentParams) GetNumSenders() (v int32) { - if !p.IsSetNumSenders() { - return TPipelineFragmentParams_NumSenders_DEFAULT - } - return *p.NumSenders -} - -var TPipelineFragmentParams_SendQueryStatisticsWithEveryBatch_DEFAULT bool - -func (p *TPipelineFragmentParams) GetSendQueryStatisticsWithEveryBatch() (v bool) { - if !p.IsSetSendQueryStatisticsWithEveryBatch() { - return TPipelineFragmentParams_SendQueryStatisticsWithEveryBatch_DEFAULT - } - return *p.SendQueryStatisticsWithEveryBatch -} - -var TPipelineFragmentParams_Coord_DEFAULT *types.TNetworkAddress - -func (p *TPipelineFragmentParams) GetCoord() (v *types.TNetworkAddress) { - if !p.IsSetCoord() { - return TPipelineFragmentParams_Coord_DEFAULT - } - return p.Coord + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -var TPipelineFragmentParams_QueryGlobals_DEFAULT *TQueryGlobals - -func (p *TPipelineFragmentParams) GetQueryGlobals() (v *TQueryGlobals) { - if !p.IsSetQueryGlobals() { - return TPipelineFragmentParams_QueryGlobals_DEFAULT +func (p *TPipelineFragmentParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetCoord() { + if err = oprot.WriteFieldBegin("coord", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.Coord.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.QueryGlobals + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -var TPipelineFragmentParams_QueryOptions_DEFAULT *TQueryOptions - -func (p *TPipelineFragmentParams) GetQueryOptions() (v *TQueryOptions) { - if !p.IsSetQueryOptions() { - return TPipelineFragmentParams_QueryOptions_DEFAULT +func (p *TPipelineFragmentParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryGlobals() { + if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryGlobals.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.QueryOptions + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -var TPipelineFragmentParams_ImportLabel_DEFAULT string - -func (p *TPipelineFragmentParams) GetImportLabel() (v string) { - if !p.IsSetImportLabel() { - return TPipelineFragmentParams_ImportLabel_DEFAULT +func (p *TPipelineFragmentParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryOptions() { + if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 12); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryOptions.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.ImportLabel + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } -var TPipelineFragmentParams_DbName_DEFAULT string - -func (p *TPipelineFragmentParams) GetDbName() (v string) { - if !p.IsSetDbName() { - return TPipelineFragmentParams_DbName_DEFAULT +func (p *TPipelineFragmentParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetImportLabel() { + if err = oprot.WriteFieldBegin("import_label", thrift.STRING, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ImportLabel); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.DbName + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -var TPipelineFragmentParams_LoadJobId_DEFAULT int64 - -func (p *TPipelineFragmentParams) GetLoadJobId() (v int64) { - if !p.IsSetLoadJobId() { - return TPipelineFragmentParams_LoadJobId_DEFAULT +func (p *TPipelineFragmentParams) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DbName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.LoadJobId + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } -var TPipelineFragmentParams_LoadErrorHubInfo_DEFAULT *TLoadErrorHubInfo - -func (p *TPipelineFragmentParams) GetLoadErrorHubInfo() (v *TLoadErrorHubInfo) { - if !p.IsSetLoadErrorHubInfo() { - return TPipelineFragmentParams_LoadErrorHubInfo_DEFAULT +func (p *TPipelineFragmentParams) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadJobId() { + if err = oprot.WriteFieldBegin("load_job_id", thrift.I64, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LoadJobId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.LoadErrorHubInfo + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } -var TPipelineFragmentParams_FragmentNumOnHost_DEFAULT int32 - -func (p *TPipelineFragmentParams) GetFragmentNumOnHost() (v int32) { - if !p.IsSetFragmentNumOnHost() { - return TPipelineFragmentParams_FragmentNumOnHost_DEFAULT +func (p *TPipelineFragmentParams) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadErrorHubInfo() { + if err = oprot.WriteFieldBegin("load_error_hub_info", thrift.STRUCT, 16); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadErrorHubInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.FragmentNumOnHost + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } -var TPipelineFragmentParams_BackendId_DEFAULT int64 - -func (p *TPipelineFragmentParams) GetBackendId() (v int64) { - if !p.IsSetBackendId() { - return TPipelineFragmentParams_BackendId_DEFAULT +func (p *TPipelineFragmentParams) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentNumOnHost() { + if err = oprot.WriteFieldBegin("fragment_num_on_host", thrift.I32, 17); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FragmentNumOnHost); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.BackendId + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } -var TPipelineFragmentParams_NeedWaitExecutionTrigger_DEFAULT bool = false - -func (p *TPipelineFragmentParams) GetNeedWaitExecutionTrigger() (v bool) { - if !p.IsSetNeedWaitExecutionTrigger() { - return TPipelineFragmentParams_NeedWaitExecutionTrigger_DEFAULT +func (p *TPipelineFragmentParams) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.NeedWaitExecutionTrigger + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } -var TPipelineFragmentParams_InstancesSharingHashTable_DEFAULT []*types.TUniqueId - -func (p *TPipelineFragmentParams) GetInstancesSharingHashTable() (v []*types.TUniqueId) { - if !p.IsSetInstancesSharingHashTable() { - return TPipelineFragmentParams_InstancesSharingHashTable_DEFAULT +func (p *TPipelineFragmentParams) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetNeedWaitExecutionTrigger() { + if err = oprot.WriteFieldBegin("need_wait_execution_trigger", thrift.BOOL, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.NeedWaitExecutionTrigger); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.InstancesSharingHashTable + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) } -var TPipelineFragmentParams_IsSimplifiedParam_DEFAULT bool = false - -func (p *TPipelineFragmentParams) GetIsSimplifiedParam() (v bool) { - if !p.IsSetIsSimplifiedParam() { - return TPipelineFragmentParams_IsSimplifiedParam_DEFAULT +func (p *TPipelineFragmentParams) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetInstancesSharingHashTable() { + if err = oprot.WriteFieldBegin("instances_sharing_hash_table", thrift.LIST, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.InstancesSharingHashTable)); err != nil { + return err + } + for _, v := range p.InstancesSharingHashTable { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.IsSimplifiedParam + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } -var TPipelineFragmentParams_GlobalDict_DEFAULT *TGlobalDict - -func (p *TPipelineFragmentParams) GetGlobalDict() (v *TGlobalDict) { - if !p.IsSetGlobalDict() { - return TPipelineFragmentParams_GlobalDict_DEFAULT +func (p *TPipelineFragmentParams) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetIsSimplifiedParam() { + if err = oprot.WriteFieldBegin("is_simplified_param", thrift.BOOL, 21); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsSimplifiedParam); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.GlobalDict + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) } -var TPipelineFragmentParams_Fragment_DEFAULT *planner.TPlanFragment - -func (p *TPipelineFragmentParams) GetFragment() (v *planner.TPlanFragment) { - if !p.IsSetFragment() { - return TPipelineFragmentParams_Fragment_DEFAULT +func (p *TPipelineFragmentParams) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetGlobalDict() { + if err = oprot.WriteFieldBegin("global_dict", thrift.STRUCT, 22); err != nil { + goto WriteFieldBeginError + } + if err := p.GlobalDict.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.Fragment -} - -func (p *TPipelineFragmentParams) GetLocalParams() (v []*TPipelineInstanceParams) { - return p.LocalParams + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) } -var TPipelineFragmentParams_WorkloadGroups_DEFAULT []*TPipelineWorkloadGroup - -func (p *TPipelineFragmentParams) GetWorkloadGroups() (v []*TPipelineWorkloadGroup) { - if !p.IsSetWorkloadGroups() { - return TPipelineFragmentParams_WorkloadGroups_DEFAULT +func (p *TPipelineFragmentParams) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetFragment() { + if err = oprot.WriteFieldBegin("fragment", thrift.STRUCT, 23); err != nil { + goto WriteFieldBeginError + } + if err := p.Fragment.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.WorkloadGroups + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) } -var TPipelineFragmentParams_TxnConf_DEFAULT *TTxnParams - -func (p *TPipelineFragmentParams) GetTxnConf() (v *TTxnParams) { - if !p.IsSetTxnConf() { - return TPipelineFragmentParams_TxnConf_DEFAULT +func (p *TPipelineFragmentParams) writeField24(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("local_params", thrift.LIST, 24); err != nil { + goto WriteFieldBeginError } - return p.TxnConf + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.LocalParams)); err != nil { + return err + } + for _, v := range p.LocalParams { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) } -var TPipelineFragmentParams_TableName_DEFAULT string - -func (p *TPipelineFragmentParams) GetTableName() (v string) { - if !p.IsSetTableName() { - return TPipelineFragmentParams_TableName_DEFAULT +func (p *TPipelineFragmentParams) writeField26(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadGroups() { + if err = oprot.WriteFieldBegin("workload_groups", thrift.LIST, 26); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.WorkloadGroups)); err != nil { + return err + } + for _, v := range p.WorkloadGroups { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.TableName + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) } -var TPipelineFragmentParams_FileScanParams_DEFAULT map[types.TPlanNodeId]*plannodes.TFileScanRangeParams - -func (p *TPipelineFragmentParams) GetFileScanParams() (v map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { - if !p.IsSetFileScanParams() { - return TPipelineFragmentParams_FileScanParams_DEFAULT +func (p *TPipelineFragmentParams) writeField27(oprot thrift.TProtocol) (err error) { + if p.IsSetTxnConf() { + if err = oprot.WriteFieldBegin("txn_conf", thrift.STRUCT, 27); err != nil { + goto WriteFieldBeginError + } + if err := p.TxnConf.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.FileScanParams + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) } -var TPipelineFragmentParams_GroupCommit_DEFAULT bool = false - -func (p *TPipelineFragmentParams) GetGroupCommit() (v bool) { - if !p.IsSetGroupCommit() { - return TPipelineFragmentParams_GroupCommit_DEFAULT +func (p *TPipelineFragmentParams) writeField28(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 28); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.GroupCommit -} -func (p *TPipelineFragmentParams) SetProtocolVersion(val PaloInternalServiceVersion) { - p.ProtocolVersion = val -} -func (p *TPipelineFragmentParams) SetQueryId(val *types.TUniqueId) { - p.QueryId = val -} -func (p *TPipelineFragmentParams) SetFragmentId(val *int32) { - p.FragmentId = val -} -func (p *TPipelineFragmentParams) SetPerExchNumSenders(val map[types.TPlanNodeId]int32) { - p.PerExchNumSenders = val -} -func (p *TPipelineFragmentParams) SetDescTbl(val *descriptors.TDescriptorTable) { - p.DescTbl = val -} -func (p *TPipelineFragmentParams) SetResourceInfo(val *types.TResourceInfo) { - p.ResourceInfo = val -} -func (p *TPipelineFragmentParams) SetDestinations(val []*datasinks.TPlanFragmentDestination) { - p.Destinations = val -} -func (p *TPipelineFragmentParams) SetNumSenders(val *int32) { - p.NumSenders = val -} -func (p *TPipelineFragmentParams) SetSendQueryStatisticsWithEveryBatch(val *bool) { - p.SendQueryStatisticsWithEveryBatch = val -} -func (p *TPipelineFragmentParams) SetCoord(val *types.TNetworkAddress) { - p.Coord = val -} -func (p *TPipelineFragmentParams) SetQueryGlobals(val *TQueryGlobals) { - p.QueryGlobals = val -} -func (p *TPipelineFragmentParams) SetQueryOptions(val *TQueryOptions) { - p.QueryOptions = val -} -func (p *TPipelineFragmentParams) SetImportLabel(val *string) { - p.ImportLabel = val -} -func (p *TPipelineFragmentParams) SetDbName(val *string) { - p.DbName = val -} -func (p *TPipelineFragmentParams) SetLoadJobId(val *int64) { - p.LoadJobId = val -} -func (p *TPipelineFragmentParams) SetLoadErrorHubInfo(val *TLoadErrorHubInfo) { - p.LoadErrorHubInfo = val -} -func (p *TPipelineFragmentParams) SetFragmentNumOnHost(val *int32) { - p.FragmentNumOnHost = val -} -func (p *TPipelineFragmentParams) SetBackendId(val *int64) { - p.BackendId = val -} -func (p *TPipelineFragmentParams) SetNeedWaitExecutionTrigger(val bool) { - p.NeedWaitExecutionTrigger = val -} -func (p *TPipelineFragmentParams) SetInstancesSharingHashTable(val []*types.TUniqueId) { - p.InstancesSharingHashTable = val -} -func (p *TPipelineFragmentParams) SetIsSimplifiedParam(val bool) { - p.IsSimplifiedParam = val -} -func (p *TPipelineFragmentParams) SetGlobalDict(val *TGlobalDict) { - p.GlobalDict = val -} -func (p *TPipelineFragmentParams) SetFragment(val *planner.TPlanFragment) { - p.Fragment = val -} -func (p *TPipelineFragmentParams) SetLocalParams(val []*TPipelineInstanceParams) { - p.LocalParams = val -} -func (p *TPipelineFragmentParams) SetWorkloadGroups(val []*TPipelineWorkloadGroup) { - p.WorkloadGroups = val -} -func (p *TPipelineFragmentParams) SetTxnConf(val *TTxnParams) { - p.TxnConf = val + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) } -func (p *TPipelineFragmentParams) SetTableName(val *string) { - p.TableName = val + +func (p *TPipelineFragmentParams) writeField29(oprot thrift.TProtocol) (err error) { + if p.IsSetFileScanParams() { + if err = oprot.WriteFieldBegin("file_scan_params", thrift.MAP, 29); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.FileScanParams)); err != nil { + return err + } + for k, v := range p.FileScanParams { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) } -func (p *TPipelineFragmentParams) SetFileScanParams(val map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { - p.FileScanParams = val + +func (p *TPipelineFragmentParams) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommit() { + if err = oprot.WriteFieldBegin("group_commit", thrift.BOOL, 30); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.GroupCommit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) } -func (p *TPipelineFragmentParams) SetGroupCommit(val bool) { - p.GroupCommit = val + +func (p *TPipelineFragmentParams) writeField31(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadStreamPerNode() { + if err = oprot.WriteFieldBegin("load_stream_per_node", thrift.I32, 31); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.LoadStreamPerNode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 31 end error: ", p), err) } -var fieldIDToName_TPipelineFragmentParams = map[int16]string{ - 1: "protocol_version", - 2: "query_id", - 3: "fragment_id", - 4: "per_exch_num_senders", - 5: "desc_tbl", - 6: "resource_info", - 7: "destinations", - 8: "num_senders", - 9: "send_query_statistics_with_every_batch", - 10: "coord", - 11: "query_globals", - 12: "query_options", - 13: "import_label", - 14: "db_name", - 15: "load_job_id", - 16: "load_error_hub_info", - 17: "fragment_num_on_host", - 18: "backend_id", - 19: "need_wait_execution_trigger", - 20: "instances_sharing_hash_table", - 21: "is_simplified_param", - 22: "global_dict", - 23: "fragment", - 24: "local_params", - 26: "workload_groups", - 27: "txn_conf", - 28: "table_name", - 29: "file_scan_params", - 30: "group_commit", +func (p *TPipelineFragmentParams) writeField32(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalLoadStreams() { + if err = oprot.WriteFieldBegin("total_load_streams", thrift.I32, 32); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.TotalLoadStreams); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 32 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetQueryId() bool { - return p.QueryId != nil +func (p *TPipelineFragmentParams) writeField33(oprot thrift.TProtocol) (err error) { + if p.IsSetNumLocalSink() { + if err = oprot.WriteFieldBegin("num_local_sink", thrift.I32, 33); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NumLocalSink); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 33 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetFragmentId() bool { - return p.FragmentId != nil +func (p *TPipelineFragmentParams) writeField34(oprot thrift.TProtocol) (err error) { + if p.IsSetNumBuckets() { + if err = oprot.WriteFieldBegin("num_buckets", thrift.I32, 34); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NumBuckets); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetDescTbl() bool { - return p.DescTbl != nil +func (p *TPipelineFragmentParams) writeField35(oprot thrift.TProtocol) (err error) { + if p.IsSetBucketSeqToInstanceIdx() { + if err = oprot.WriteFieldBegin("bucket_seq_to_instance_idx", thrift.MAP, 35); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.BucketSeqToInstanceIdx)); err != nil { + return err + } + for k, v := range p.BucketSeqToInstanceIdx { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 35 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetResourceInfo() bool { - return p.ResourceInfo != nil +func (p *TPipelineFragmentParams) writeField36(oprot thrift.TProtocol) (err error) { + if p.IsSetPerNodeSharedScans() { + if err = oprot.WriteFieldBegin("per_node_shared_scans", thrift.MAP, 36); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.BOOL, len(p.PerNodeSharedScans)); err != nil { + return err + } + for k, v := range p.PerNodeSharedScans { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteBool(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 36 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetNumSenders() bool { - return p.NumSenders != nil +func (p *TPipelineFragmentParams) writeField37(oprot thrift.TProtocol) (err error) { + if p.IsSetParallelInstances() { + if err = oprot.WriteFieldBegin("parallel_instances", thrift.I32, 37); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ParallelInstances); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 37 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 37 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetSendQueryStatisticsWithEveryBatch() bool { - return p.SendQueryStatisticsWithEveryBatch != nil +func (p *TPipelineFragmentParams) writeField38(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalInstances() { + if err = oprot.WriteFieldBegin("total_instances", thrift.I32, 38); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.TotalInstances); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 38 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 38 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetCoord() bool { - return p.Coord != nil +func (p *TPipelineFragmentParams) writeField39(oprot thrift.TProtocol) (err error) { + if p.IsSetShuffleIdxToInstanceIdx() { + if err = oprot.WriteFieldBegin("shuffle_idx_to_instance_idx", thrift.MAP, 39); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.ShuffleIdxToInstanceIdx)); err != nil { + return err + } + for k, v := range p.ShuffleIdxToInstanceIdx { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 39 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 39 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetQueryGlobals() bool { - return p.QueryGlobals != nil +func (p *TPipelineFragmentParams) writeField40(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNereids() { + if err = oprot.WriteFieldBegin("is_nereids", thrift.BOOL, 40); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsNereids); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 40 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 40 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetQueryOptions() bool { - return p.QueryOptions != nil +func (p *TPipelineFragmentParams) writeField41(oprot thrift.TProtocol) (err error) { + if p.IsSetWalId() { + if err = oprot.WriteFieldBegin("wal_id", thrift.I64, 41); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.WalId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 41 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 41 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetImportLabel() bool { - return p.ImportLabel != nil +func (p *TPipelineFragmentParams) writeField42(oprot thrift.TProtocol) (err error) { + if p.IsSetContentLength() { + if err = oprot.WriteFieldBegin("content_length", thrift.I64, 42); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ContentLength); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 42 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 42 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetDbName() bool { - return p.DbName != nil +func (p *TPipelineFragmentParams) writeField43(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentConnectFe() { + if err = oprot.WriteFieldBegin("current_connect_fe", thrift.STRUCT, 43); err != nil { + goto WriteFieldBeginError + } + if err := p.CurrentConnectFe.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 43 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 43 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetLoadJobId() bool { - return p.LoadJobId != nil +func (p *TPipelineFragmentParams) writeField44(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 44); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetLoadErrorHubInfo() bool { - return p.LoadErrorHubInfo != nil +func (p *TPipelineFragmentParams) writeField1000(oprot thrift.TProtocol) (err error) { + if p.IsSetIsMowTable() { + if err = oprot.WriteFieldBegin("is_mow_table", thrift.BOOL, 1000); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsMowTable); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err) } -func (p *TPipelineFragmentParams) IsSetFragmentNumOnHost() bool { - return p.FragmentNumOnHost != nil -} +func (p *TPipelineFragmentParams) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPipelineFragmentParams(%+v)", *p) -func (p *TPipelineFragmentParams) IsSetBackendId() bool { - return p.BackendId != nil } -func (p *TPipelineFragmentParams) IsSetNeedWaitExecutionTrigger() bool { - return p.NeedWaitExecutionTrigger != TPipelineFragmentParams_NeedWaitExecutionTrigger_DEFAULT +func (p *TPipelineFragmentParams) DeepEqual(ano *TPipelineFragmentParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ProtocolVersion) { + return false + } + if !p.Field2DeepEqual(ano.QueryId) { + return false + } + if !p.Field3DeepEqual(ano.FragmentId) { + return false + } + if !p.Field4DeepEqual(ano.PerExchNumSenders) { + return false + } + if !p.Field5DeepEqual(ano.DescTbl) { + return false + } + if !p.Field6DeepEqual(ano.ResourceInfo) { + return false + } + if !p.Field7DeepEqual(ano.Destinations) { + return false + } + if !p.Field8DeepEqual(ano.NumSenders) { + return false + } + if !p.Field9DeepEqual(ano.SendQueryStatisticsWithEveryBatch) { + return false + } + if !p.Field10DeepEqual(ano.Coord) { + return false + } + if !p.Field11DeepEqual(ano.QueryGlobals) { + return false + } + if !p.Field12DeepEqual(ano.QueryOptions) { + return false + } + if !p.Field13DeepEqual(ano.ImportLabel) { + return false + } + if !p.Field14DeepEqual(ano.DbName) { + return false + } + if !p.Field15DeepEqual(ano.LoadJobId) { + return false + } + if !p.Field16DeepEqual(ano.LoadErrorHubInfo) { + return false + } + if !p.Field17DeepEqual(ano.FragmentNumOnHost) { + return false + } + if !p.Field18DeepEqual(ano.BackendId) { + return false + } + if !p.Field19DeepEqual(ano.NeedWaitExecutionTrigger) { + return false + } + if !p.Field20DeepEqual(ano.InstancesSharingHashTable) { + return false + } + if !p.Field21DeepEqual(ano.IsSimplifiedParam) { + return false + } + if !p.Field22DeepEqual(ano.GlobalDict) { + return false + } + if !p.Field23DeepEqual(ano.Fragment) { + return false + } + if !p.Field24DeepEqual(ano.LocalParams) { + return false + } + if !p.Field26DeepEqual(ano.WorkloadGroups) { + return false + } + if !p.Field27DeepEqual(ano.TxnConf) { + return false + } + if !p.Field28DeepEqual(ano.TableName) { + return false + } + if !p.Field29DeepEqual(ano.FileScanParams) { + return false + } + if !p.Field30DeepEqual(ano.GroupCommit) { + return false + } + if !p.Field31DeepEqual(ano.LoadStreamPerNode) { + return false + } + if !p.Field32DeepEqual(ano.TotalLoadStreams) { + return false + } + if !p.Field33DeepEqual(ano.NumLocalSink) { + return false + } + if !p.Field34DeepEqual(ano.NumBuckets) { + return false + } + if !p.Field35DeepEqual(ano.BucketSeqToInstanceIdx) { + return false + } + if !p.Field36DeepEqual(ano.PerNodeSharedScans) { + return false + } + if !p.Field37DeepEqual(ano.ParallelInstances) { + return false + } + if !p.Field38DeepEqual(ano.TotalInstances) { + return false + } + if !p.Field39DeepEqual(ano.ShuffleIdxToInstanceIdx) { + return false + } + if !p.Field40DeepEqual(ano.IsNereids) { + return false + } + if !p.Field41DeepEqual(ano.WalId) { + return false + } + if !p.Field42DeepEqual(ano.ContentLength) { + return false + } + if !p.Field43DeepEqual(ano.CurrentConnectFe) { + return false + } + if !p.Field44DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } + if !p.Field1000DeepEqual(ano.IsMowTable) { + return false + } + return true } -func (p *TPipelineFragmentParams) IsSetInstancesSharingHashTable() bool { - return p.InstancesSharingHashTable != nil -} +func (p *TPipelineFragmentParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { -func (p *TPipelineFragmentParams) IsSetIsSimplifiedParam() bool { - return p.IsSimplifiedParam != TPipelineFragmentParams_IsSimplifiedParam_DEFAULT + if p.ProtocolVersion != src { + return false + } + return true } +func (p *TPipelineFragmentParams) Field2DeepEqual(src *types.TUniqueId) bool { -func (p *TPipelineFragmentParams) IsSetGlobalDict() bool { - return p.GlobalDict != nil + if !p.QueryId.DeepEqual(src) { + return false + } + return true } +func (p *TPipelineFragmentParams) Field3DeepEqual(src *int32) bool { -func (p *TPipelineFragmentParams) IsSetFragment() bool { - return p.Fragment != nil + if p.FragmentId == src { + return true + } else if p.FragmentId == nil || src == nil { + return false + } + if *p.FragmentId != *src { + return false + } + return true } +func (p *TPipelineFragmentParams) Field4DeepEqual(src map[types.TPlanNodeId]int32) bool { -func (p *TPipelineFragmentParams) IsSetWorkloadGroups() bool { - return p.WorkloadGroups != nil + if len(p.PerExchNumSenders) != len(src) { + return false + } + for k, v := range p.PerExchNumSenders { + _src := src[k] + if v != _src { + return false + } + } + return true } +func (p *TPipelineFragmentParams) Field5DeepEqual(src *descriptors.TDescriptorTable) bool { -func (p *TPipelineFragmentParams) IsSetTxnConf() bool { - return p.TxnConf != nil + if !p.DescTbl.DeepEqual(src) { + return false + } + return true } +func (p *TPipelineFragmentParams) Field6DeepEqual(src *types.TResourceInfo) bool { -func (p *TPipelineFragmentParams) IsSetTableName() bool { - return p.TableName != nil + if !p.ResourceInfo.DeepEqual(src) { + return false + } + return true } +func (p *TPipelineFragmentParams) Field7DeepEqual(src []*datasinks.TPlanFragmentDestination) bool { -func (p *TPipelineFragmentParams) IsSetFileScanParams() bool { - return p.FileScanParams != nil + if len(p.Destinations) != len(src) { + return false + } + for i, v := range p.Destinations { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } +func (p *TPipelineFragmentParams) Field8DeepEqual(src *int32) bool { -func (p *TPipelineFragmentParams) IsSetGroupCommit() bool { - return p.GroupCommit != TPipelineFragmentParams_GroupCommit_DEFAULT + if p.NumSenders == src { + return true + } else if p.NumSenders == nil || src == nil { + return false + } + if *p.NumSenders != *src { + return false + } + return true } +func (p *TPipelineFragmentParams) Field9DeepEqual(src *bool) bool { -func (p *TPipelineFragmentParams) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - var issetProtocolVersion bool = false - var issetQueryId bool = false - var issetPerExchNumSenders bool = false - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError + if p.SendQueryStatisticsWithEveryBatch == src { + return true + } else if p.SendQueryStatisticsWithEveryBatch == nil || src == nil { + return false } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - issetProtocolVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - issetQueryId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.MAP { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - issetPerExchNumSenders = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.LIST { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.I32 { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.STRING { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.STRING { - if err = p.ReadField14(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.I64 { - if err = p.ReadField15(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 16: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField16(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 17: - if fieldTypeId == thrift.I32 { - if err = p.ReadField17(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.I64 { - if err = p.ReadField18(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField19(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.LIST { - if err = p.ReadField20(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField21(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField22(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 23: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField23(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 24: - if fieldTypeId == thrift.LIST { - if err = p.ReadField24(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 26: - if fieldTypeId == thrift.LIST { - if err = p.ReadField26(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 27: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField27(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 28: - if fieldTypeId == thrift.STRING { - if err = p.ReadField28(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 29: - if fieldTypeId == thrift.MAP { - if err = p.ReadField29(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 30: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField30(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if *p.SendQueryStatisticsWithEveryBatch != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field10DeepEqual(src *types.TNetworkAddress) bool { + + if !p.Coord.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field11DeepEqual(src *TQueryGlobals) bool { + + if !p.QueryGlobals.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field12DeepEqual(src *TQueryOptions) bool { + + if !p.QueryOptions.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field13DeepEqual(src *string) bool { + + if p.ImportLabel == src { + return true + } else if p.ImportLabel == nil || src == nil { + return false + } + if strings.Compare(*p.ImportLabel, *src) != 0 { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field14DeepEqual(src *string) bool { + + if p.DbName == src { + return true + } else if p.DbName == nil || src == nil { + return false + } + if strings.Compare(*p.DbName, *src) != 0 { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field15DeepEqual(src *int64) bool { + + if p.LoadJobId == src { + return true + } else if p.LoadJobId == nil || src == nil { + return false + } + if *p.LoadJobId != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field16DeepEqual(src *TLoadErrorHubInfo) bool { + + if !p.LoadErrorHubInfo.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field17DeepEqual(src *int32) bool { + + if p.FragmentNumOnHost == src { + return true + } else if p.FragmentNumOnHost == nil || src == nil { + return false + } + if *p.FragmentNumOnHost != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field18DeepEqual(src *int64) bool { + + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field19DeepEqual(src bool) bool { + + if p.NeedWaitExecutionTrigger != src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field20DeepEqual(src []*types.TUniqueId) bool { + + if len(p.InstancesSharingHashTable) != len(src) { + return false + } + for i, v := range p.InstancesSharingHashTable { + _src := src[i] + if !v.DeepEqual(_src) { + return false } + } + return true +} +func (p *TPipelineFragmentParams) Field21DeepEqual(src bool) bool { - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + if p.IsSimplifiedParam != src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field22DeepEqual(src *TGlobalDict) bool { + + if !p.GlobalDict.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field23DeepEqual(src *planner.TPlanFragment) bool { + + if !p.Fragment.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field24DeepEqual(src []*TPipelineInstanceParams) bool { + + if len(p.LocalParams) != len(src) { + return false + } + for i, v := range p.LocalParams { + _src := src[i] + if !v.DeepEqual(_src) { + return false } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError + return true +} +func (p *TPipelineFragmentParams) Field26DeepEqual(src []*TPipelineWorkloadGroup) bool { + + if len(p.WorkloadGroups) != len(src) { + return false + } + for i, v := range p.WorkloadGroups { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } } + return true +} +func (p *TPipelineFragmentParams) Field27DeepEqual(src *TTxnParams) bool { - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError + if !p.TxnConf.DeepEqual(src) { + return false } + return true +} +func (p *TPipelineFragmentParams) Field28DeepEqual(src *string) bool { - if !issetQueryId { - fieldId = 2 - goto RequiredFieldNotSetError + if p.TableName == src { + return true + } else if p.TableName == nil || src == nil { + return false + } + if strings.Compare(*p.TableName, *src) != 0 { + return false } + return true +} +func (p *TPipelineFragmentParams) Field29DeepEqual(src map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) bool { - if !issetPerExchNumSenders { - fieldId = 4 - goto RequiredFieldNotSetError + if len(p.FileScanParams) != len(src) { + return false } - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParams[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + for k, v := range p.FileScanParams { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TPipelineFragmentParams) Field30DeepEqual(src bool) bool { -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineFragmentParams[fieldId])) + if p.GroupCommit != src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field31DeepEqual(src *int32) bool { + + if p.LoadStreamPerNode == src { + return true + } else if p.LoadStreamPerNode == nil || src == nil { + return false + } + if *p.LoadStreamPerNode != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field32DeepEqual(src *int32) bool { + + if p.TotalLoadStreams == src { + return true + } else if p.TotalLoadStreams == nil || src == nil { + return false + } + if *p.TotalLoadStreams != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field33DeepEqual(src *int32) bool { + + if p.NumLocalSink == src { + return true + } else if p.NumLocalSink == nil || src == nil { + return false + } + if *p.NumLocalSink != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field34DeepEqual(src *int32) bool { + + if p.NumBuckets == src { + return true + } else if p.NumBuckets == nil || src == nil { + return false + } + if *p.NumBuckets != *src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field35DeepEqual(src map[int32]int32) bool { + + if len(p.BucketSeqToInstanceIdx) != len(src) { + return false + } + for k, v := range p.BucketSeqToInstanceIdx { + _src := src[k] + if v != _src { + return false + } + } + return true } +func (p *TPipelineFragmentParams) Field36DeepEqual(src map[types.TPlanNodeId]bool) bool { -func (p *TPipelineFragmentParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.ProtocolVersion = PaloInternalServiceVersion(v) + if len(p.PerNodeSharedScans) != len(src) { + return false } - return nil + for k, v := range p.PerNodeSharedScans { + _src := src[k] + if v != _src { + return false + } + } + return true } +func (p *TPipelineFragmentParams) Field37DeepEqual(src *int32) bool { -func (p *TPipelineFragmentParams) ReadField2(iprot thrift.TProtocol) error { - p.QueryId = types.NewTUniqueId() - if err := p.QueryId.Read(iprot); err != nil { - return err + if p.ParallelInstances == src { + return true + } else if p.ParallelInstances == nil || src == nil { + return false } - return nil + if *p.ParallelInstances != *src { + return false + } + return true } +func (p *TPipelineFragmentParams) Field38DeepEqual(src *int32) bool { -func (p *TPipelineFragmentParams) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.FragmentId = &v + if p.TotalInstances == src { + return true + } else if p.TotalInstances == nil || src == nil { + return false } - return nil + if *p.TotalInstances != *src { + return false + } + return true } +func (p *TPipelineFragmentParams) Field39DeepEqual(src map[int32]int32) bool { -func (p *TPipelineFragmentParams) ReadField4(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err + if len(p.ShuffleIdxToInstanceIdx) != len(src) { + return false } - p.PerExchNumSenders = make(map[types.TPlanNodeId]int32, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v + for k, v := range p.ShuffleIdxToInstanceIdx { + _src := src[k] + if v != _src { + return false } + } + return true +} +func (p *TPipelineFragmentParams) Field40DeepEqual(src bool) bool { - var _val int32 - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _val = v - } + if p.IsNereids != src { + return false + } + return true +} +func (p *TPipelineFragmentParams) Field41DeepEqual(src *int64) bool { - p.PerExchNumSenders[_key] = _val + if p.WalId == src { + return true + } else if p.WalId == nil || src == nil { + return false } - if err := iprot.ReadMapEnd(); err != nil { - return err + if *p.WalId != *src { + return false } - return nil + return true } +func (p *TPipelineFragmentParams) Field42DeepEqual(src *int64) bool { -func (p *TPipelineFragmentParams) ReadField5(iprot thrift.TProtocol) error { - p.DescTbl = descriptors.NewTDescriptorTable() - if err := p.DescTbl.Read(iprot); err != nil { - return err + if p.ContentLength == src { + return true + } else if p.ContentLength == nil || src == nil { + return false } - return nil + if *p.ContentLength != *src { + return false + } + return true } +func (p *TPipelineFragmentParams) Field43DeepEqual(src *types.TNetworkAddress) bool { -func (p *TPipelineFragmentParams) ReadField6(iprot thrift.TProtocol) error { - p.ResourceInfo = types.NewTResourceInfo() - if err := p.ResourceInfo.Read(iprot); err != nil { - return err + if !p.CurrentConnectFe.DeepEqual(src) { + return false } - return nil + return true } +func (p *TPipelineFragmentParams) Field44DeepEqual(src []int32) bool { -func (p *TPipelineFragmentParams) ReadField7(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err + if len(p.TopnFilterSourceNodeIds) != len(src) { + return false } - p.Destinations = make([]*datasinks.TPlanFragmentDestination, 0, size) - for i := 0; i < size; i++ { - _elem := datasinks.NewTPlanFragmentDestination() - if err := _elem.Read(iprot); err != nil { - return err + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false } - - p.Destinations = append(p.Destinations, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err } - return nil + return true } +func (p *TPipelineFragmentParams) Field1000DeepEqual(src *bool) bool { -func (p *TPipelineFragmentParams) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.NumSenders = &v + if p.IsMowTable == src { + return true + } else if p.IsMowTable == nil || src == nil { + return false } - return nil + if *p.IsMowTable != *src { + return false + } + return true } -func (p *TPipelineFragmentParams) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.SendQueryStatisticsWithEveryBatch = &v - } - return nil +type TPipelineFragmentParamsList struct { + ParamsList []*TPipelineFragmentParams `thrift:"params_list,1,optional" frugal:"1,optional,list" json:"params_list,omitempty"` + DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,2,optional" frugal:"2,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` + FileScanParams map[types.TPlanNodeId]*plannodes.TFileScanRangeParams `thrift:"file_scan_params,3,optional" frugal:"3,optional,map" json:"file_scan_params,omitempty"` + Coord *types.TNetworkAddress `thrift:"coord,4,optional" frugal:"4,optional,types.TNetworkAddress" json:"coord,omitempty"` + QueryGlobals *TQueryGlobals `thrift:"query_globals,5,optional" frugal:"5,optional,TQueryGlobals" json:"query_globals,omitempty"` + ResourceInfo *types.TResourceInfo `thrift:"resource_info,6,optional" frugal:"6,optional,types.TResourceInfo" json:"resource_info,omitempty"` + FragmentNumOnHost *int32 `thrift:"fragment_num_on_host,7,optional" frugal:"7,optional,i32" json:"fragment_num_on_host,omitempty"` + QueryOptions *TQueryOptions `thrift:"query_options,8,optional" frugal:"8,optional,TQueryOptions" json:"query_options,omitempty"` + IsNereids bool `thrift:"is_nereids,9,optional" frugal:"9,optional,bool" json:"is_nereids,omitempty"` + WorkloadGroups []*TPipelineWorkloadGroup `thrift:"workload_groups,10,optional" frugal:"10,optional,list" json:"workload_groups,omitempty"` + QueryId *types.TUniqueId `thrift:"query_id,11,optional" frugal:"11,optional,types.TUniqueId" json:"query_id,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,12,optional" frugal:"12,optional,list" json:"topn_filter_source_node_ids,omitempty"` + RuntimeFilterMergeAddr *types.TNetworkAddress `thrift:"runtime_filter_merge_addr,13,optional" frugal:"13,optional,types.TNetworkAddress" json:"runtime_filter_merge_addr,omitempty"` } -func (p *TPipelineFragmentParams) ReadField10(iprot thrift.TProtocol) error { - p.Coord = types.NewTNetworkAddress() - if err := p.Coord.Read(iprot); err != nil { - return err +func NewTPipelineFragmentParamsList() *TPipelineFragmentParamsList { + return &TPipelineFragmentParamsList{ + + IsNereids: true, } - return nil } -func (p *TPipelineFragmentParams) ReadField11(iprot thrift.TProtocol) error { - p.QueryGlobals = NewTQueryGlobals() - if err := p.QueryGlobals.Read(iprot); err != nil { - return err - } - return nil +func (p *TPipelineFragmentParamsList) InitDefault() { + p.IsNereids = true } -func (p *TPipelineFragmentParams) ReadField12(iprot thrift.TProtocol) error { - p.QueryOptions = NewTQueryOptions() - if err := p.QueryOptions.Read(iprot); err != nil { - return err +var TPipelineFragmentParamsList_ParamsList_DEFAULT []*TPipelineFragmentParams + +func (p *TPipelineFragmentParamsList) GetParamsList() (v []*TPipelineFragmentParams) { + if !p.IsSetParamsList() { + return TPipelineFragmentParamsList_ParamsList_DEFAULT } - return nil + return p.ParamsList } -func (p *TPipelineFragmentParams) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.ImportLabel = &v +var TPipelineFragmentParamsList_DescTbl_DEFAULT *descriptors.TDescriptorTable + +func (p *TPipelineFragmentParamsList) GetDescTbl() (v *descriptors.TDescriptorTable) { + if !p.IsSetDescTbl() { + return TPipelineFragmentParamsList_DescTbl_DEFAULT } - return nil + return p.DescTbl } -func (p *TPipelineFragmentParams) ReadField14(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.DbName = &v +var TPipelineFragmentParamsList_FileScanParams_DEFAULT map[types.TPlanNodeId]*plannodes.TFileScanRangeParams + +func (p *TPipelineFragmentParamsList) GetFileScanParams() (v map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + if !p.IsSetFileScanParams() { + return TPipelineFragmentParamsList_FileScanParams_DEFAULT } - return nil + return p.FileScanParams } -func (p *TPipelineFragmentParams) ReadField15(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.LoadJobId = &v +var TPipelineFragmentParamsList_Coord_DEFAULT *types.TNetworkAddress + +func (p *TPipelineFragmentParamsList) GetCoord() (v *types.TNetworkAddress) { + if !p.IsSetCoord() { + return TPipelineFragmentParamsList_Coord_DEFAULT } - return nil + return p.Coord } -func (p *TPipelineFragmentParams) ReadField16(iprot thrift.TProtocol) error { - p.LoadErrorHubInfo = NewTLoadErrorHubInfo() - if err := p.LoadErrorHubInfo.Read(iprot); err != nil { - return err +var TPipelineFragmentParamsList_QueryGlobals_DEFAULT *TQueryGlobals + +func (p *TPipelineFragmentParamsList) GetQueryGlobals() (v *TQueryGlobals) { + if !p.IsSetQueryGlobals() { + return TPipelineFragmentParamsList_QueryGlobals_DEFAULT } - return nil + return p.QueryGlobals } -func (p *TPipelineFragmentParams) ReadField17(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - p.FragmentNumOnHost = &v +var TPipelineFragmentParamsList_ResourceInfo_DEFAULT *types.TResourceInfo + +func (p *TPipelineFragmentParamsList) GetResourceInfo() (v *types.TResourceInfo) { + if !p.IsSetResourceInfo() { + return TPipelineFragmentParamsList_ResourceInfo_DEFAULT } - return nil + return p.ResourceInfo } -func (p *TPipelineFragmentParams) ReadField18(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.BackendId = &v +var TPipelineFragmentParamsList_FragmentNumOnHost_DEFAULT int32 + +func (p *TPipelineFragmentParamsList) GetFragmentNumOnHost() (v int32) { + if !p.IsSetFragmentNumOnHost() { + return TPipelineFragmentParamsList_FragmentNumOnHost_DEFAULT } - return nil + return *p.FragmentNumOnHost } -func (p *TPipelineFragmentParams) ReadField19(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.NeedWaitExecutionTrigger = v +var TPipelineFragmentParamsList_QueryOptions_DEFAULT *TQueryOptions + +func (p *TPipelineFragmentParamsList) GetQueryOptions() (v *TQueryOptions) { + if !p.IsSetQueryOptions() { + return TPipelineFragmentParamsList_QueryOptions_DEFAULT } - return nil + return p.QueryOptions } -func (p *TPipelineFragmentParams) ReadField20(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.InstancesSharingHashTable = make([]*types.TUniqueId, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTUniqueId() - if err := _elem.Read(iprot); err != nil { - return err - } +var TPipelineFragmentParamsList_IsNereids_DEFAULT bool = true - p.InstancesSharingHashTable = append(p.InstancesSharingHashTable, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err +func (p *TPipelineFragmentParamsList) GetIsNereids() (v bool) { + if !p.IsSetIsNereids() { + return TPipelineFragmentParamsList_IsNereids_DEFAULT } - return nil + return p.IsNereids } -func (p *TPipelineFragmentParams) ReadField21(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.IsSimplifiedParam = v +var TPipelineFragmentParamsList_WorkloadGroups_DEFAULT []*TPipelineWorkloadGroup + +func (p *TPipelineFragmentParamsList) GetWorkloadGroups() (v []*TPipelineWorkloadGroup) { + if !p.IsSetWorkloadGroups() { + return TPipelineFragmentParamsList_WorkloadGroups_DEFAULT } - return nil + return p.WorkloadGroups } -func (p *TPipelineFragmentParams) ReadField22(iprot thrift.TProtocol) error { - p.GlobalDict = NewTGlobalDict() - if err := p.GlobalDict.Read(iprot); err != nil { - return err +var TPipelineFragmentParamsList_QueryId_DEFAULT *types.TUniqueId + +func (p *TPipelineFragmentParamsList) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TPipelineFragmentParamsList_QueryId_DEFAULT } - return nil + return p.QueryId } -func (p *TPipelineFragmentParams) ReadField23(iprot thrift.TProtocol) error { - p.Fragment = planner.NewTPlanFragment() - if err := p.Fragment.Read(iprot); err != nil { - return err +var TPipelineFragmentParamsList_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TPipelineFragmentParamsList) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPipelineFragmentParamsList_TopnFilterSourceNodeIds_DEFAULT } - return nil + return p.TopnFilterSourceNodeIds } -func (p *TPipelineFragmentParams) ReadField24(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.LocalParams = make([]*TPipelineInstanceParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTPipelineInstanceParams() - if err := _elem.Read(iprot); err != nil { - return err - } +var TPipelineFragmentParamsList_RuntimeFilterMergeAddr_DEFAULT *types.TNetworkAddress - p.LocalParams = append(p.LocalParams, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err +func (p *TPipelineFragmentParamsList) GetRuntimeFilterMergeAddr() (v *types.TNetworkAddress) { + if !p.IsSetRuntimeFilterMergeAddr() { + return TPipelineFragmentParamsList_RuntimeFilterMergeAddr_DEFAULT } - return nil + return p.RuntimeFilterMergeAddr +} +func (p *TPipelineFragmentParamsList) SetParamsList(val []*TPipelineFragmentParams) { + p.ParamsList = val +} +func (p *TPipelineFragmentParamsList) SetDescTbl(val *descriptors.TDescriptorTable) { + p.DescTbl = val +} +func (p *TPipelineFragmentParamsList) SetFileScanParams(val map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + p.FileScanParams = val +} +func (p *TPipelineFragmentParamsList) SetCoord(val *types.TNetworkAddress) { + p.Coord = val +} +func (p *TPipelineFragmentParamsList) SetQueryGlobals(val *TQueryGlobals) { + p.QueryGlobals = val +} +func (p *TPipelineFragmentParamsList) SetResourceInfo(val *types.TResourceInfo) { + p.ResourceInfo = val +} +func (p *TPipelineFragmentParamsList) SetFragmentNumOnHost(val *int32) { + p.FragmentNumOnHost = val +} +func (p *TPipelineFragmentParamsList) SetQueryOptions(val *TQueryOptions) { + p.QueryOptions = val +} +func (p *TPipelineFragmentParamsList) SetIsNereids(val bool) { + p.IsNereids = val +} +func (p *TPipelineFragmentParamsList) SetWorkloadGroups(val []*TPipelineWorkloadGroup) { + p.WorkloadGroups = val +} +func (p *TPipelineFragmentParamsList) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TPipelineFragmentParamsList) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} +func (p *TPipelineFragmentParamsList) SetRuntimeFilterMergeAddr(val *types.TNetworkAddress) { + p.RuntimeFilterMergeAddr = val } -func (p *TPipelineFragmentParams) ReadField26(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.WorkloadGroups = make([]*TPipelineWorkloadGroup, 0, size) - for i := 0; i < size; i++ { - _elem := NewTPipelineWorkloadGroup() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.WorkloadGroups = append(p.WorkloadGroups, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil +var fieldIDToName_TPipelineFragmentParamsList = map[int16]string{ + 1: "params_list", + 2: "desc_tbl", + 3: "file_scan_params", + 4: "coord", + 5: "query_globals", + 6: "resource_info", + 7: "fragment_num_on_host", + 8: "query_options", + 9: "is_nereids", + 10: "workload_groups", + 11: "query_id", + 12: "topn_filter_source_node_ids", + 13: "runtime_filter_merge_addr", } -func (p *TPipelineFragmentParams) ReadField27(iprot thrift.TProtocol) error { - p.TxnConf = NewTTxnParams() - if err := p.TxnConf.Read(iprot); err != nil { - return err - } - return nil +func (p *TPipelineFragmentParamsList) IsSetParamsList() bool { + return p.ParamsList != nil } -func (p *TPipelineFragmentParams) ReadField28(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.TableName = &v - } - return nil +func (p *TPipelineFragmentParamsList) IsSetDescTbl() bool { + return p.DescTbl != nil } -func (p *TPipelineFragmentParams) ReadField29(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.FileScanParams = make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - _val := plannodes.NewTFileScanRangeParams() - if err := _val.Read(iprot); err != nil { - return err - } +func (p *TPipelineFragmentParamsList) IsSetFileScanParams() bool { + return p.FileScanParams != nil +} - p.FileScanParams[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil +func (p *TPipelineFragmentParamsList) IsSetCoord() bool { + return p.Coord != nil } -func (p *TPipelineFragmentParams) ReadField30(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.GroupCommit = v - } - return nil +func (p *TPipelineFragmentParamsList) IsSetQueryGlobals() bool { + return p.QueryGlobals != nil } -func (p *TPipelineFragmentParams) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TPipelineFragmentParams"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError - } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError - } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError - } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError - } - if err = p.writeField17(oprot); err != nil { - fieldId = 17 - goto WriteFieldError - } - if err = p.writeField18(oprot); err != nil { - fieldId = 18 - goto WriteFieldError - } - if err = p.writeField19(oprot); err != nil { - fieldId = 19 - goto WriteFieldError - } - if err = p.writeField20(oprot); err != nil { - fieldId = 20 - goto WriteFieldError - } - if err = p.writeField21(oprot); err != nil { - fieldId = 21 - goto WriteFieldError - } - if err = p.writeField22(oprot); err != nil { - fieldId = 22 - goto WriteFieldError - } - if err = p.writeField23(oprot); err != nil { - fieldId = 23 - goto WriteFieldError - } - if err = p.writeField24(oprot); err != nil { - fieldId = 24 - goto WriteFieldError - } - if err = p.writeField26(oprot); err != nil { - fieldId = 26 - goto WriteFieldError - } - if err = p.writeField27(oprot); err != nil { - fieldId = 27 - goto WriteFieldError - } - if err = p.writeField28(oprot); err != nil { - fieldId = 28 - goto WriteFieldError - } - if err = p.writeField29(oprot); err != nil { - fieldId = 29 - goto WriteFieldError - } - if err = p.writeField30(oprot); err != nil { - fieldId = 30 - goto WriteFieldError - } +func (p *TPipelineFragmentParamsList) IsSetResourceInfo() bool { + return p.ResourceInfo != nil +} - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError - } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +func (p *TPipelineFragmentParamsList) IsSetFragmentNumOnHost() bool { + return p.FragmentNumOnHost != nil } -func (p *TPipelineFragmentParams) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("protocol_version", thrift.I32, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(p.ProtocolVersion)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +func (p *TPipelineFragmentParamsList) IsSetQueryOptions() bool { + return p.QueryOptions != nil } -func (p *TPipelineFragmentParams) writeField2(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 2); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +func (p *TPipelineFragmentParamsList) IsSetIsNereids() bool { + return p.IsNereids != TPipelineFragmentParamsList_IsNereids_DEFAULT } -func (p *TPipelineFragmentParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentId() { - if err = oprot.WriteFieldBegin("fragment_id", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.FragmentId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +func (p *TPipelineFragmentParamsList) IsSetWorkloadGroups() bool { + return p.WorkloadGroups != nil +} + +func (p *TPipelineFragmentParamsList) IsSetQueryId() bool { + return p.QueryId != nil } -func (p *TPipelineFragmentParams) writeField4(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("per_exch_num_senders", thrift.MAP, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.PerExchNumSenders)); err != nil { - return err - } - for k, v := range p.PerExchNumSenders { +func (p *TPipelineFragmentParamsList) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil +} - if err := oprot.WriteI32(k); err != nil { - return err - } +func (p *TPipelineFragmentParamsList) IsSetRuntimeFilterMergeAddr() bool { + return p.RuntimeFilterMergeAddr != nil +} - if err := oprot.WriteI32(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError +func (p *TPipelineFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} -func (p *TPipelineFragmentParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetDescTbl() { - if err = oprot.WriteFieldBegin("desc_tbl", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.DescTbl.Write(oprot); err != nil { - return err + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if fieldTypeId == thrift.STOP { + break } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} -func (p *TPipelineFragmentParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetResourceInfo() { - if err = oprot.WriteFieldBegin("resource_info", thrift.STRUCT, 6); err != nil { - goto WriteFieldBeginError - } - if err := p.ResourceInfo.Write(oprot); err != nil { - return err + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.LIST { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.LIST { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPipelineFragmentParams) writeField7(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("destinations", thrift.LIST, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Destinations)); err != nil { +func (p *TPipelineFragmentParamsList) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { return err } - for _, v := range p.Destinations { - if err := v.Write(oprot); err != nil { + _field := make([]*TPipelineFragmentParams, 0, size) + values := make([]TPipelineFragmentParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { return err } + + _field = append(_field, _elem) } - if err := oprot.WriteListEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + p.ParamsList = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetNumSenders() { - if err = oprot.WriteFieldBegin("num_senders", thrift.I32, 8); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.NumSenders); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField2(iprot thrift.TProtocol) error { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { + return err } + p.DescTbl = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetSendQueryStatisticsWithEveryBatch() { - if err = oprot.WriteFieldBegin("send_query_statistics_with_every_batch", thrift.BOOL, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.SendQueryStatisticsWithEveryBatch); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TPipelineFragmentParams) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetCoord() { - if err = oprot.WriteFieldBegin("coord", thrift.STRUCT, 10); err != nil { - goto WriteFieldBeginError - } - if err := p.Coord.Write(oprot); err != nil { + _field := make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) + values := make([]plannodes.TFileScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { return err + } else { + _key = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err } + p.FileScanParams = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryGlobals() { - if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 11); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryGlobals.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err } + p.Coord = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetQueryOptions() { - if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 12); err != nil { - goto WriteFieldBeginError - } - if err := p.QueryOptions.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField5(iprot thrift.TProtocol) error { + _field := NewTQueryGlobals() + if err := _field.Read(iprot); err != nil { + return err } + p.QueryGlobals = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetImportLabel() { - if err = oprot.WriteFieldBegin("import_label", thrift.STRING, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.ImportLabel); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField6(iprot thrift.TProtocol) error { + _field := types.NewTResourceInfo() + if err := _field.Read(iprot); err != nil { + return err } + p.ResourceInfo = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } +func (p *TPipelineFragmentParamsList) ReadField7(iprot thrift.TProtocol) error { -func (p *TPipelineFragmentParams) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetDbName() { - if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 14); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.DbName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } + p.FragmentNumOnHost = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadJobId() { - if err = oprot.WriteFieldBegin("load_job_id", thrift.I64, 15); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.LoadJobId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField8(iprot thrift.TProtocol) error { + _field := NewTQueryOptions() + if err := _field.Read(iprot); err != nil { + return err } + p.QueryOptions = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } +func (p *TPipelineFragmentParamsList) ReadField9(iprot thrift.TProtocol) error { -func (p *TPipelineFragmentParams) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadErrorHubInfo() { - if err = oprot.WriteFieldBegin("load_error_hub_info", thrift.STRUCT, 16); err != nil { - goto WriteFieldBeginError - } - if err := p.LoadErrorHubInfo.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v } + p.IsNereids = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } +func (p *TPipelineFragmentParamsList) ReadField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TPipelineWorkloadGroup, 0, size) + values := make([]TPipelineWorkloadGroup, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() -func (p *TPipelineFragmentParams) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetFragmentNumOnHost() { - if err = oprot.WriteFieldBegin("fragment_num_on_host", thrift.I32, 17); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(*p.FragmentNumOnHost); err != nil { + if err := _elem.Read(iprot); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err } + p.WorkloadGroups = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetBackendId() { - if err = oprot.WriteFieldBegin("backend_id", thrift.I64, 18); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.BackendId); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField11(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err } + p.QueryId = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) } +func (p *TPipelineFragmentParamsList) ReadField12(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { -func (p *TPipelineFragmentParams) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetNeedWaitExecutionTrigger() { - if err = oprot.WriteFieldBegin("need_wait_execution_trigger", thrift.BOOL, 19); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(p.NeedWaitExecutionTrigger); err != nil { + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { return err + } else { + _elem = v } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } + + _field = append(_field, _elem) } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TopnFilterSourceNodeIds = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) } - -func (p *TPipelineFragmentParams) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetInstancesSharingHashTable() { - if err = oprot.WriteFieldBegin("instances_sharing_hash_table", thrift.LIST, 20); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.InstancesSharingHashTable)); err != nil { - return err - } - for _, v := range p.InstancesSharingHashTable { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TPipelineFragmentParamsList) ReadField13(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err } + p.RuntimeFilterMergeAddr = _field return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) } -func (p *TPipelineFragmentParams) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetIsSimplifiedParam() { - if err = oprot.WriteFieldBegin("is_simplified_param", thrift.BOOL, 21); err != nil { - goto WriteFieldBeginError +func (p *TPipelineFragmentParamsList) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPipelineFragmentParamsList"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError } - if err := oprot.WriteBool(p.IsSimplifiedParam); err != nil { - return err + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) -} - -func (p *TPipelineFragmentParams) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetGlobalDict() { - if err = oprot.WriteFieldBegin("global_dict", thrift.STRUCT, 22); err != nil { - goto WriteFieldBeginError + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError } - if err := p.GlobalDict.Write(oprot); err != nil { - return err + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) -} - -func (p *TPipelineFragmentParams) writeField23(oprot thrift.TProtocol) (err error) { - if p.IsSetFragment() { - if err = oprot.WriteFieldBegin("fragment", thrift.STRUCT, 23); err != nil { - goto WriteFieldBeginError + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError } - if err := p.Fragment.Write(oprot); err != nil { - return err + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) -} - -func (p *TPipelineFragmentParams) writeField24(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("local_params", thrift.LIST, 24); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.LocalParams)); err != nil { - return err - } - for _, v := range p.LocalParams { - if err := v.Write(oprot); err != nil { - return err + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError } } - if err := oprot.WriteListEnd(); err != nil { - return err + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPipelineFragmentParams) writeField26(oprot thrift.TProtocol) (err error) { - if p.IsSetWorkloadGroups() { - if err = oprot.WriteFieldBegin("workload_groups", thrift.LIST, 26); err != nil { +func (p *TPipelineFragmentParamsList) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetParamsList() { + if err = oprot.WriteFieldBegin("params_list", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.WorkloadGroups)); err != nil { + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ParamsList)); err != nil { return err } - for _, v := range p.WorkloadGroups { + for _, v := range p.ParamsList { if err := v.Write(oprot); err != nil { return err } @@ -23194,36 +30635,17 @@ func (p *TPipelineFragmentParams) writeField26(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err) -} - -func (p *TPipelineFragmentParams) writeField27(oprot thrift.TProtocol) (err error) { - if p.IsSetTxnConf() { - if err = oprot.WriteFieldBegin("txn_conf", thrift.STRUCT, 27); err != nil { - goto WriteFieldBeginError - } - if err := p.TxnConf.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPipelineFragmentParams) writeField28(oprot thrift.TProtocol) (err error) { - if p.IsSetTableName() { - if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 28); err != nil { +func (p *TPipelineFragmentParamsList) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDescTbl() { + if err = oprot.WriteFieldBegin("desc_tbl", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.TableName); err != nil { + if err := p.DescTbl.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -23232,25 +30654,23 @@ func (p *TPipelineFragmentParams) writeField28(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPipelineFragmentParams) writeField29(oprot thrift.TProtocol) (err error) { +func (p *TPipelineFragmentParamsList) writeField3(oprot thrift.TProtocol) (err error) { if p.IsSetFileScanParams() { - if err = oprot.WriteFieldBegin("file_scan_params", thrift.MAP, 29); err != nil { + if err = oprot.WriteFieldBegin("file_scan_params", thrift.MAP, 3); err != nil { goto WriteFieldBeginError } if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.FileScanParams)); err != nil { return err } for k, v := range p.FileScanParams { - if err := oprot.WriteI32(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -23264,17 +30684,17 @@ func (p *TPipelineFragmentParams) writeField29(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TPipelineFragmentParams) writeField30(oprot thrift.TProtocol) (err error) { - if p.IsSetGroupCommit() { - if err = oprot.WriteFieldBegin("group_commit", thrift.BOOL, 30); err != nil { +func (p *TPipelineFragmentParamsList) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetCoord() { + if err = oprot.WriteFieldBegin("coord", thrift.STRUCT, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteBool(p.GroupCommit); err != nil { + if err := p.Coord.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -23283,305 +30703,260 @@ func (p *TPipelineFragmentParams) writeField30(oprot thrift.TProtocol) (err erro } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) -} - -func (p *TPipelineFragmentParams) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TPipelineFragmentParams(%+v)", *p) -} - -func (p *TPipelineFragmentParams) DeepEqual(ano *TPipelineFragmentParams) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.ProtocolVersion) { - return false - } - if !p.Field2DeepEqual(ano.QueryId) { - return false - } - if !p.Field3DeepEqual(ano.FragmentId) { - return false - } - if !p.Field4DeepEqual(ano.PerExchNumSenders) { - return false - } - if !p.Field5DeepEqual(ano.DescTbl) { - return false - } - if !p.Field6DeepEqual(ano.ResourceInfo) { - return false - } - if !p.Field7DeepEqual(ano.Destinations) { - return false - } - if !p.Field8DeepEqual(ano.NumSenders) { - return false - } - if !p.Field9DeepEqual(ano.SendQueryStatisticsWithEveryBatch) { - return false - } - if !p.Field10DeepEqual(ano.Coord) { - return false - } - if !p.Field11DeepEqual(ano.QueryGlobals) { - return false - } - if !p.Field12DeepEqual(ano.QueryOptions) { - return false - } - if !p.Field13DeepEqual(ano.ImportLabel) { - return false - } - if !p.Field14DeepEqual(ano.DbName) { - return false - } - if !p.Field15DeepEqual(ano.LoadJobId) { - return false - } - if !p.Field16DeepEqual(ano.LoadErrorHubInfo) { - return false - } - if !p.Field17DeepEqual(ano.FragmentNumOnHost) { - return false - } - if !p.Field18DeepEqual(ano.BackendId) { - return false - } - if !p.Field19DeepEqual(ano.NeedWaitExecutionTrigger) { - return false - } - if !p.Field20DeepEqual(ano.InstancesSharingHashTable) { - return false - } - if !p.Field21DeepEqual(ano.IsSimplifiedParam) { - return false - } - if !p.Field22DeepEqual(ano.GlobalDict) { - return false - } - if !p.Field23DeepEqual(ano.Fragment) { - return false - } - if !p.Field24DeepEqual(ano.LocalParams) { - return false - } - if !p.Field26DeepEqual(ano.WorkloadGroups) { - return false - } - if !p.Field27DeepEqual(ano.TxnConf) { - return false - } - if !p.Field28DeepEqual(ano.TableName) { - return false - } - if !p.Field29DeepEqual(ano.FileScanParams) { - return false - } - if !p.Field30DeepEqual(ano.GroupCommit) { - return false - } - return true + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field1DeepEqual(src PaloInternalServiceVersion) bool { - - if p.ProtocolVersion != src { - return false +func (p *TPipelineFragmentParamsList) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryGlobals() { + if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryGlobals.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field2DeepEqual(src *types.TUniqueId) bool { - if !p.QueryId.DeepEqual(src) { - return false +func (p *TPipelineFragmentParamsList) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetResourceInfo() { + if err = oprot.WriteFieldBegin("resource_info", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.ResourceInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field3DeepEqual(src *int32) bool { - if p.FragmentId == src { - return true - } else if p.FragmentId == nil || src == nil { - return false - } - if *p.FragmentId != *src { - return false +func (p *TPipelineFragmentParamsList) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentNumOnHost() { + if err = oprot.WriteFieldBegin("fragment_num_on_host", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FragmentNumOnHost); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field4DeepEqual(src map[types.TPlanNodeId]int32) bool { - if len(p.PerExchNumSenders) != len(src) { - return false - } - for k, v := range p.PerExchNumSenders { - _src := src[k] - if v != _src { - return false +func (p *TPipelineFragmentParamsList) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryOptions() { + if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryOptions.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field5DeepEqual(src *descriptors.TDescriptorTable) bool { - if !p.DescTbl.DeepEqual(src) { - return false +func (p *TPipelineFragmentParamsList) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNereids() { + if err = oprot.WriteFieldBegin("is_nereids", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsNereids); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field6DeepEqual(src *types.TResourceInfo) bool { - if !p.ResourceInfo.DeepEqual(src) { - return false +func (p *TPipelineFragmentParamsList) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadGroups() { + if err = oprot.WriteFieldBegin("workload_groups", thrift.LIST, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.WorkloadGroups)); err != nil { + return err + } + for _, v := range p.WorkloadGroups { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field7DeepEqual(src []*datasinks.TPlanFragmentDestination) bool { - if len(p.Destinations) != len(src) { - return false - } - for i, v := range p.Destinations { - _src := src[i] - if !v.DeepEqual(_src) { - return false +func (p *TPipelineFragmentParamsList) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field8DeepEqual(src *int32) bool { - if p.NumSenders == src { - return true - } else if p.NumSenders == nil || src == nil { - return false - } - if *p.NumSenders != *src { - return false +func (p *TPipelineFragmentParamsList) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field9DeepEqual(src *bool) bool { - if p.SendQueryStatisticsWithEveryBatch == src { - return true - } else if p.SendQueryStatisticsWithEveryBatch == nil || src == nil { - return false - } - if *p.SendQueryStatisticsWithEveryBatch != *src { - return false +func (p *TPipelineFragmentParamsList) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterMergeAddr() { + if err = oprot.WriteFieldBegin("runtime_filter_merge_addr", thrift.STRUCT, 13); err != nil { + goto WriteFieldBeginError + } + if err := p.RuntimeFilterMergeAddr.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -func (p *TPipelineFragmentParams) Field10DeepEqual(src *types.TNetworkAddress) bool { - if !p.Coord.DeepEqual(src) { - return false +func (p *TPipelineFragmentParamsList) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TPipelineFragmentParamsList(%+v)", *p) + } -func (p *TPipelineFragmentParams) Field11DeepEqual(src *TQueryGlobals) bool { - if !p.QueryGlobals.DeepEqual(src) { +func (p *TPipelineFragmentParamsList) DeepEqual(ano *TPipelineFragmentParamsList) bool { + if p == ano { + return true + } else if p == nil || ano == nil { return false } - return true -} -func (p *TPipelineFragmentParams) Field12DeepEqual(src *TQueryOptions) bool { - - if !p.QueryOptions.DeepEqual(src) { + if !p.Field1DeepEqual(ano.ParamsList) { return false } - return true -} -func (p *TPipelineFragmentParams) Field13DeepEqual(src *string) bool { - - if p.ImportLabel == src { - return true - } else if p.ImportLabel == nil || src == nil { + if !p.Field2DeepEqual(ano.DescTbl) { return false } - if strings.Compare(*p.ImportLabel, *src) != 0 { + if !p.Field3DeepEqual(ano.FileScanParams) { return false } - return true -} -func (p *TPipelineFragmentParams) Field14DeepEqual(src *string) bool { - - if p.DbName == src { - return true - } else if p.DbName == nil || src == nil { + if !p.Field4DeepEqual(ano.Coord) { return false } - if strings.Compare(*p.DbName, *src) != 0 { + if !p.Field5DeepEqual(ano.QueryGlobals) { return false } - return true -} -func (p *TPipelineFragmentParams) Field15DeepEqual(src *int64) bool { - - if p.LoadJobId == src { - return true - } else if p.LoadJobId == nil || src == nil { + if !p.Field6DeepEqual(ano.ResourceInfo) { return false } - if *p.LoadJobId != *src { + if !p.Field7DeepEqual(ano.FragmentNumOnHost) { return false } - return true -} -func (p *TPipelineFragmentParams) Field16DeepEqual(src *TLoadErrorHubInfo) bool { - - if !p.LoadErrorHubInfo.DeepEqual(src) { + if !p.Field8DeepEqual(ano.QueryOptions) { return false } - return true -} -func (p *TPipelineFragmentParams) Field17DeepEqual(src *int32) bool { - - if p.FragmentNumOnHost == src { - return true - } else if p.FragmentNumOnHost == nil || src == nil { + if !p.Field9DeepEqual(ano.IsNereids) { return false } - if *p.FragmentNumOnHost != *src { + if !p.Field10DeepEqual(ano.WorkloadGroups) { return false } - return true -} -func (p *TPipelineFragmentParams) Field18DeepEqual(src *int64) bool { - - if p.BackendId == src { - return true - } else if p.BackendId == nil || src == nil { + if !p.Field11DeepEqual(ano.QueryId) { return false } - if *p.BackendId != *src { + if !p.Field12DeepEqual(ano.TopnFilterSourceNodeIds) { return false } - return true -} -func (p *TPipelineFragmentParams) Field19DeepEqual(src bool) bool { - - if p.NeedWaitExecutionTrigger != src { + if !p.Field13DeepEqual(ano.RuntimeFilterMergeAddr) { return false } return true } -func (p *TPipelineFragmentParams) Field20DeepEqual(src []*types.TUniqueId) bool { - if len(p.InstancesSharingHashTable) != len(src) { +func (p *TPipelineFragmentParamsList) Field1DeepEqual(src []*TPipelineFragmentParams) bool { + + if len(p.ParamsList) != len(src) { return false } - for i, v := range p.InstancesSharingHashTable { + for i, v := range p.ParamsList { _src := src[i] if !v.DeepEqual(_src) { return false @@ -23589,289 +30964,110 @@ func (p *TPipelineFragmentParams) Field20DeepEqual(src []*types.TUniqueId) bool } return true } -func (p *TPipelineFragmentParams) Field21DeepEqual(src bool) bool { +func (p *TPipelineFragmentParamsList) Field2DeepEqual(src *descriptors.TDescriptorTable) bool { - if p.IsSimplifiedParam != src { + if !p.DescTbl.DeepEqual(src) { return false } return true } -func (p *TPipelineFragmentParams) Field22DeepEqual(src *TGlobalDict) bool { +func (p *TPipelineFragmentParamsList) Field3DeepEqual(src map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) bool { - if !p.GlobalDict.DeepEqual(src) { + if len(p.FileScanParams) != len(src) { return false } - return true -} -func (p *TPipelineFragmentParams) Field23DeepEqual(src *planner.TPlanFragment) bool { - - if !p.Fragment.DeepEqual(src) { - return false + for k, v := range p.FileScanParams { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } } return true } -func (p *TPipelineFragmentParams) Field24DeepEqual(src []*TPipelineInstanceParams) bool { +func (p *TPipelineFragmentParamsList) Field4DeepEqual(src *types.TNetworkAddress) bool { - if len(p.LocalParams) != len(src) { + if !p.Coord.DeepEqual(src) { return false } - for i, v := range p.LocalParams { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TPipelineFragmentParams) Field26DeepEqual(src []*TPipelineWorkloadGroup) bool { +func (p *TPipelineFragmentParamsList) Field5DeepEqual(src *TQueryGlobals) bool { - if len(p.WorkloadGroups) != len(src) { + if !p.QueryGlobals.DeepEqual(src) { return false } - for i, v := range p.WorkloadGroups { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TPipelineFragmentParams) Field27DeepEqual(src *TTxnParams) bool { +func (p *TPipelineFragmentParamsList) Field6DeepEqual(src *types.TResourceInfo) bool { - if !p.TxnConf.DeepEqual(src) { + if !p.ResourceInfo.DeepEqual(src) { return false } return true } -func (p *TPipelineFragmentParams) Field28DeepEqual(src *string) bool { +func (p *TPipelineFragmentParamsList) Field7DeepEqual(src *int32) bool { - if p.TableName == src { + if p.FragmentNumOnHost == src { return true - } else if p.TableName == nil || src == nil { + } else if p.FragmentNumOnHost == nil || src == nil { return false } - if strings.Compare(*p.TableName, *src) != 0 { + if *p.FragmentNumOnHost != *src { return false } return true } -func (p *TPipelineFragmentParams) Field29DeepEqual(src map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) bool { +func (p *TPipelineFragmentParamsList) Field8DeepEqual(src *TQueryOptions) bool { - if len(p.FileScanParams) != len(src) { + if !p.QueryOptions.DeepEqual(src) { return false } - for k, v := range p.FileScanParams { - _src := src[k] - if !v.DeepEqual(_src) { - return false - } - } return true } -func (p *TPipelineFragmentParams) Field30DeepEqual(src bool) bool { +func (p *TPipelineFragmentParamsList) Field9DeepEqual(src bool) bool { - if p.GroupCommit != src { + if p.IsNereids != src { return false } return true } +func (p *TPipelineFragmentParamsList) Field10DeepEqual(src []*TPipelineWorkloadGroup) bool { -type TPipelineFragmentParamsList struct { - ParamsList []*TPipelineFragmentParams `thrift:"params_list,1,optional" frugal:"1,optional,list" json:"params_list,omitempty"` -} - -func NewTPipelineFragmentParamsList() *TPipelineFragmentParamsList { - return &TPipelineFragmentParamsList{} -} - -func (p *TPipelineFragmentParamsList) InitDefault() { - *p = TPipelineFragmentParamsList{} -} - -var TPipelineFragmentParamsList_ParamsList_DEFAULT []*TPipelineFragmentParams - -func (p *TPipelineFragmentParamsList) GetParamsList() (v []*TPipelineFragmentParams) { - if !p.IsSetParamsList() { - return TPipelineFragmentParamsList_ParamsList_DEFAULT - } - return p.ParamsList -} -func (p *TPipelineFragmentParamsList) SetParamsList(val []*TPipelineFragmentParams) { - p.ParamsList = val -} - -var fieldIDToName_TPipelineFragmentParamsList = map[int16]string{ - 1: "params_list", -} - -func (p *TPipelineFragmentParamsList) IsSetParamsList() bool { - return p.ParamsList != nil -} - -func (p *TPipelineFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError - } - } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TPipelineFragmentParamsList) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ParamsList = make([]*TPipelineFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTPipelineFragmentParams() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.ParamsList = append(p.ParamsList, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TPipelineFragmentParamsList) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TPipelineFragmentParamsList"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError + if len(p.WorkloadGroups) != len(src) { + return false } - return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) -} - -func (p *TPipelineFragmentParamsList) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetParamsList() { - if err = oprot.WriteFieldBegin("params_list", thrift.LIST, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ParamsList)); err != nil { - return err - } - for _, v := range p.ParamsList { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + for i, v := range p.WorkloadGroups { + _src := src[i] + if !v.DeepEqual(_src) { + return false } } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TPipelineFragmentParamsList) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TPipelineFragmentParamsList(%+v)", *p) + return true } +func (p *TPipelineFragmentParamsList) Field11DeepEqual(src *types.TUniqueId) bool { -func (p *TPipelineFragmentParamsList) DeepEqual(ano *TPipelineFragmentParamsList) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.ParamsList) { + if !p.QueryId.DeepEqual(src) { return false } return true } +func (p *TPipelineFragmentParamsList) Field12DeepEqual(src []int32) bool { -func (p *TPipelineFragmentParamsList) Field1DeepEqual(src []*TPipelineFragmentParams) bool { - - if len(p.ParamsList) != len(src) { + if len(p.TopnFilterSourceNodeIds) != len(src) { return false } - for i, v := range p.ParamsList { + for i, v := range p.TopnFilterSourceNodeIds { _src := src[i] - if !v.DeepEqual(_src) { + if v != _src { return false } } return true } +func (p *TPipelineFragmentParamsList) Field13DeepEqual(src *types.TNetworkAddress) bool { + + if !p.RuntimeFilterMergeAddr.DeepEqual(src) { + return false + } + return true +} diff --git a/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go b/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go index d98de333..92ad27e1 100644 --- a/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go +++ b/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package palointernalservice @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/data" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/datasinks" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors" @@ -2214,2974 +2215,5885 @@ func (p *TQueryOptions) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + case 87: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField87(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryOptions[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TQueryOptions) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.AbortOnError = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MaxErrors = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.DisableCodegen = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField4(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.BatchSize = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.NumNodes = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MaxScanRangeLength = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField7(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.NumScannerThreads = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField8(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MaxIoBuffers = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField9(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.AllowUnsupportedFormats = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField10(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.DefaultOrderByLimit = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField12(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MemLimit = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField13(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.AbortOnDefaultLimitExceeded = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField14(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.QueryTimeout = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField15(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.IsReportSuccess = v - - } - return offset, nil -} - -func (p *TQueryOptions) FastReadField16(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.CodegenLevel = v - - } - return offset, nil -} - + case 88: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField88(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 89: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField89(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 90: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField90(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 91: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField91(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 92: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField92(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 93: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField93(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 94: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField94(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 95: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField95(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 96: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField96(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 97: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField97(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 98: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField98(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 99: + if fieldTypeId == thrift.DOUBLE { + l, err = p.FastReadField99(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 100: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField100(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 101: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField101(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 102: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField102(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 103: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField103(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 104: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField104(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 105: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField105(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 106: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField106(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 107: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField107(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 108: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField108(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 109: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField109(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 110: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField110(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 111: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField111(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 112: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField112(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 113: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField113(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 114: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField114(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 115: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField115(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 116: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField116(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 117: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField117(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 118: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField118(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 119: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField119(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 120: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField120(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 121: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField121(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 122: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField122(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 123: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField123(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 124: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField124(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 125: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField125(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 126: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField126(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 127: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField127(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 128: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField128(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 129: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField129(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 130: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField130(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 131: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField131(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 132: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField132(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 133: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField133(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 134: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField134(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 135: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField135(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 136: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField136(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 137: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField137(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 138: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField138(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 139: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField139(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 140: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField140(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 141: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField141(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryOptions[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryOptions) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.AbortOnError = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MaxErrors = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DisableCodegen = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BatchSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.NumNodes = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MaxScanRangeLength = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.NumScannerThreads = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MaxIoBuffers = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.AllowUnsupportedFormats = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DefaultOrderByLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MemLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.AbortOnDefaultLimitExceeded = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.QueryTimeout = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsReportSuccess = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.CodegenLevel = v + + } + return offset, nil +} + func (p *TQueryOptions) FastReadField17(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.KuduLatestObservedTs = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.QueryType = TQueryType(v) + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MinReservation = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField20(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MaxReservation = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField21(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InitialReservationTotalClaims = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField22(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BufferPoolLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField23(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DefaultSpillableBufferSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField24(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MinSpillableBufferSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField25(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MaxRowSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField26(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DisableStreamPreaggregations = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField27(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MtDop = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField28(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.LoadMemLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField29(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxScanKeyNum = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField30(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MaxPushdownConditionsPerColumn = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField31(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableSpilling = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField32(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableEnableExchangeNodeParallelMerge = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField33(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RuntimeFilterWaitTimeMs = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField34(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RuntimeFilterMaxInNum = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField42(buf []byte) (int, error) { + offset := 0 + + tmp := NewTResourceLimit() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.ResourceLimit = tmp + return offset, nil +} + +func (p *TQueryOptions) FastReadField43(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ReturnObjectDataAsBinary = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField44(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TrimTailingSpacesForExternalTableQuery = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField45(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableFunctionPushdown = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField46(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentTransmissionCompressionCodec = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField48(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableLocalExchange = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField49(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SkipStorageEngineMerge = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField50(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SkipDeletePredicate = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField51(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableNewShuffleHashMethod = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField52(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BeExecVersion = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField53(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionedHashJoinRowsThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField54(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EnableShareHashTableForBroadcastJoin = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField55(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.CheckOverflowForDecimal = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField56(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SkipDeleteBitmap = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField57(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnablePipelineEngine = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField58(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RepeatMaxNum = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField59(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ExternalSortBytesThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField60(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionedHashAggRowsThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField61(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableFileCache = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField62(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InsertTimeout = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField63(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ExecutionTimeout = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField64(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DryRunQuery = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField65(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableCommonExprPushdown = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField66(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParallelInstance = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField67(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MysqlRowBinaryFormat = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField68(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ExternalAggBytesThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField69(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ExternalAggPartitionBits = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField70(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FileCacheBasePath = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField71(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableParquetLazyMat = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField72(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableOrcLazyMat = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField73(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ScanQueueMemLimit = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField74(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableScanNodeRunSerial = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField75(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableInsertStrict = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField76(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableInvertedIndexQuery = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField77(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.TruncateCharOrVarcharColumns = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField78(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableHashJoinEarlyStartProbe = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField79(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnablePipelineXEngine = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField80(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableMemtableOnSinkNode = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField81(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableDeleteSubPredicateV2 = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField82(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FeProcessUuid = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField83(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InvertedIndexConjunctionOptThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField84(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableProfile = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField85(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnablePageCache = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField86(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.AnalyzeTimeout = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField87(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FasterFloatConvert = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField88(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableDecimal256 = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField89(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableLocalShuffle = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField90(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SkipMissingVersion = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField91(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RuntimeFilterWaitInfinitely = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField92(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.WaitFullBlockScheduleTimes = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField93(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InvertedIndexMaxExpansions = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField94(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InvertedIndexSkipThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField95(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableParallelScan = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField96(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParallelScanMaxScannersCount = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField97(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParallelScanMinRowsPerScanner = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField98(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SkipBadTablet = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField99(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadDouble(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ScannerScaleUpRatio = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField100(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableDistinctStreamingAggregation = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField101(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableJoinSpill = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField102(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableSortSpill = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField103(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableAggSpill = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField104(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MinRevocableMem = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField105(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SpillStreamingAggMemLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField106(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DataQueueMaxBlocks = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField107(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableCommonExprPushdownForInvertedIndex = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField108(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LocalExchangeFreeBlocksLimit = &v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField109(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableForceSpill = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField110(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableParquetFilterByMinMax = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField111(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableOrcFilterByMinMax = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField112(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MaxColumnReaderNum = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField113(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableLocalMergeSort = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField114(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableParallelResultSink = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField115(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableShortCircuitQueryAccessColumnStore = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField116(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableNoNeedReadDataOpt = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField117(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ReadCsvEmptyLineAsNull = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField118(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.SerdeDialect = TSerdeDialect(v) + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField119(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableMatchWithoutInvertedIndex = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField120(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableFallbackOnMissingInvertedIndex = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField121(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.KeepCarriageReturn = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField122(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RuntimeBloomFilterMinSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField123(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.HiveParquetUseColumnNames = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField124(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.HiveOrcUseColumnNames = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField125(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableSegmentCache = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField126(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RuntimeBloomFilterMaxSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField127(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.InListValueCountThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField128(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableVerboseProfile = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField129(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RpcVerboseProfileMaxInstanceCount = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField130(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableAdaptivePipelineTaskSerialReadOnLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField131(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.AdaptivePipelineTaskSerialReadOnLimit = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField132(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParallelPrepareThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField133(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionTopnMaxPartitions = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField134(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionTopnPrePartitionRows = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField135(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableParallelOutfile = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField136(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnablePhraseQuerySequentialOpt = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField137(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableAutoCreateWhenOverwrite = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField138(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.OrcTinyStripeThresholdBytes = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField139(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.OrcOnceMaxReadBytes = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField140(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.OrcMaxMergeDistanceBytes = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField141(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IgnoreRuntimeFilterError = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.DisableFileCache = v + + } + return offset, nil +} + +// for compatibility +func (p *TQueryOptions) FastWrite(buf []byte) int { + return 0 +} + +func (p *TQueryOptions) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryOptions") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) + offset += p.fastWriteField24(buf[offset:], binaryWriter) + offset += p.fastWriteField25(buf[offset:], binaryWriter) + offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) + offset += p.fastWriteField31(buf[offset:], binaryWriter) + offset += p.fastWriteField32(buf[offset:], binaryWriter) + offset += p.fastWriteField33(buf[offset:], binaryWriter) + offset += p.fastWriteField34(buf[offset:], binaryWriter) + offset += p.fastWriteField43(buf[offset:], binaryWriter) + offset += p.fastWriteField44(buf[offset:], binaryWriter) + offset += p.fastWriteField45(buf[offset:], binaryWriter) + offset += p.fastWriteField48(buf[offset:], binaryWriter) + offset += p.fastWriteField49(buf[offset:], binaryWriter) + offset += p.fastWriteField50(buf[offset:], binaryWriter) + offset += p.fastWriteField51(buf[offset:], binaryWriter) + offset += p.fastWriteField52(buf[offset:], binaryWriter) + offset += p.fastWriteField53(buf[offset:], binaryWriter) + offset += p.fastWriteField54(buf[offset:], binaryWriter) + offset += p.fastWriteField55(buf[offset:], binaryWriter) + offset += p.fastWriteField56(buf[offset:], binaryWriter) + offset += p.fastWriteField57(buf[offset:], binaryWriter) + offset += p.fastWriteField58(buf[offset:], binaryWriter) + offset += p.fastWriteField59(buf[offset:], binaryWriter) + offset += p.fastWriteField60(buf[offset:], binaryWriter) + offset += p.fastWriteField61(buf[offset:], binaryWriter) + offset += p.fastWriteField62(buf[offset:], binaryWriter) + offset += p.fastWriteField63(buf[offset:], binaryWriter) + offset += p.fastWriteField64(buf[offset:], binaryWriter) + offset += p.fastWriteField65(buf[offset:], binaryWriter) + offset += p.fastWriteField66(buf[offset:], binaryWriter) + offset += p.fastWriteField67(buf[offset:], binaryWriter) + offset += p.fastWriteField68(buf[offset:], binaryWriter) + offset += p.fastWriteField69(buf[offset:], binaryWriter) + offset += p.fastWriteField71(buf[offset:], binaryWriter) + offset += p.fastWriteField72(buf[offset:], binaryWriter) + offset += p.fastWriteField73(buf[offset:], binaryWriter) + offset += p.fastWriteField74(buf[offset:], binaryWriter) + offset += p.fastWriteField75(buf[offset:], binaryWriter) + offset += p.fastWriteField76(buf[offset:], binaryWriter) + offset += p.fastWriteField77(buf[offset:], binaryWriter) + offset += p.fastWriteField78(buf[offset:], binaryWriter) + offset += p.fastWriteField79(buf[offset:], binaryWriter) + offset += p.fastWriteField80(buf[offset:], binaryWriter) + offset += p.fastWriteField81(buf[offset:], binaryWriter) + offset += p.fastWriteField82(buf[offset:], binaryWriter) + offset += p.fastWriteField83(buf[offset:], binaryWriter) + offset += p.fastWriteField84(buf[offset:], binaryWriter) + offset += p.fastWriteField85(buf[offset:], binaryWriter) + offset += p.fastWriteField86(buf[offset:], binaryWriter) + offset += p.fastWriteField87(buf[offset:], binaryWriter) + offset += p.fastWriteField88(buf[offset:], binaryWriter) + offset += p.fastWriteField89(buf[offset:], binaryWriter) + offset += p.fastWriteField90(buf[offset:], binaryWriter) + offset += p.fastWriteField91(buf[offset:], binaryWriter) + offset += p.fastWriteField92(buf[offset:], binaryWriter) + offset += p.fastWriteField93(buf[offset:], binaryWriter) + offset += p.fastWriteField94(buf[offset:], binaryWriter) + offset += p.fastWriteField95(buf[offset:], binaryWriter) + offset += p.fastWriteField96(buf[offset:], binaryWriter) + offset += p.fastWriteField97(buf[offset:], binaryWriter) + offset += p.fastWriteField98(buf[offset:], binaryWriter) + offset += p.fastWriteField99(buf[offset:], binaryWriter) + offset += p.fastWriteField100(buf[offset:], binaryWriter) + offset += p.fastWriteField101(buf[offset:], binaryWriter) + offset += p.fastWriteField102(buf[offset:], binaryWriter) + offset += p.fastWriteField103(buf[offset:], binaryWriter) + offset += p.fastWriteField104(buf[offset:], binaryWriter) + offset += p.fastWriteField105(buf[offset:], binaryWriter) + offset += p.fastWriteField106(buf[offset:], binaryWriter) + offset += p.fastWriteField107(buf[offset:], binaryWriter) + offset += p.fastWriteField108(buf[offset:], binaryWriter) + offset += p.fastWriteField109(buf[offset:], binaryWriter) + offset += p.fastWriteField110(buf[offset:], binaryWriter) + offset += p.fastWriteField111(buf[offset:], binaryWriter) + offset += p.fastWriteField112(buf[offset:], binaryWriter) + offset += p.fastWriteField113(buf[offset:], binaryWriter) + offset += p.fastWriteField114(buf[offset:], binaryWriter) + offset += p.fastWriteField115(buf[offset:], binaryWriter) + offset += p.fastWriteField116(buf[offset:], binaryWriter) + offset += p.fastWriteField117(buf[offset:], binaryWriter) + offset += p.fastWriteField119(buf[offset:], binaryWriter) + offset += p.fastWriteField120(buf[offset:], binaryWriter) + offset += p.fastWriteField121(buf[offset:], binaryWriter) + offset += p.fastWriteField122(buf[offset:], binaryWriter) + offset += p.fastWriteField123(buf[offset:], binaryWriter) + offset += p.fastWriteField124(buf[offset:], binaryWriter) + offset += p.fastWriteField125(buf[offset:], binaryWriter) + offset += p.fastWriteField126(buf[offset:], binaryWriter) + offset += p.fastWriteField127(buf[offset:], binaryWriter) + offset += p.fastWriteField128(buf[offset:], binaryWriter) + offset += p.fastWriteField129(buf[offset:], binaryWriter) + offset += p.fastWriteField130(buf[offset:], binaryWriter) + offset += p.fastWriteField131(buf[offset:], binaryWriter) + offset += p.fastWriteField132(buf[offset:], binaryWriter) + offset += p.fastWriteField133(buf[offset:], binaryWriter) + offset += p.fastWriteField134(buf[offset:], binaryWriter) + offset += p.fastWriteField135(buf[offset:], binaryWriter) + offset += p.fastWriteField136(buf[offset:], binaryWriter) + offset += p.fastWriteField137(buf[offset:], binaryWriter) + offset += p.fastWriteField138(buf[offset:], binaryWriter) + offset += p.fastWriteField139(buf[offset:], binaryWriter) + offset += p.fastWriteField140(buf[offset:], binaryWriter) + offset += p.fastWriteField141(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField42(buf[offset:], binaryWriter) + offset += p.fastWriteField46(buf[offset:], binaryWriter) + offset += p.fastWriteField70(buf[offset:], binaryWriter) + offset += p.fastWriteField118(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TQueryOptions) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TQueryOptions") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() + l += p.field24Length() + l += p.field25Length() + l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field30Length() + l += p.field31Length() + l += p.field32Length() + l += p.field33Length() + l += p.field34Length() + l += p.field42Length() + l += p.field43Length() + l += p.field44Length() + l += p.field45Length() + l += p.field46Length() + l += p.field48Length() + l += p.field49Length() + l += p.field50Length() + l += p.field51Length() + l += p.field52Length() + l += p.field53Length() + l += p.field54Length() + l += p.field55Length() + l += p.field56Length() + l += p.field57Length() + l += p.field58Length() + l += p.field59Length() + l += p.field60Length() + l += p.field61Length() + l += p.field62Length() + l += p.field63Length() + l += p.field64Length() + l += p.field65Length() + l += p.field66Length() + l += p.field67Length() + l += p.field68Length() + l += p.field69Length() + l += p.field70Length() + l += p.field71Length() + l += p.field72Length() + l += p.field73Length() + l += p.field74Length() + l += p.field75Length() + l += p.field76Length() + l += p.field77Length() + l += p.field78Length() + l += p.field79Length() + l += p.field80Length() + l += p.field81Length() + l += p.field82Length() + l += p.field83Length() + l += p.field84Length() + l += p.field85Length() + l += p.field86Length() + l += p.field87Length() + l += p.field88Length() + l += p.field89Length() + l += p.field90Length() + l += p.field91Length() + l += p.field92Length() + l += p.field93Length() + l += p.field94Length() + l += p.field95Length() + l += p.field96Length() + l += p.field97Length() + l += p.field98Length() + l += p.field99Length() + l += p.field100Length() + l += p.field101Length() + l += p.field102Length() + l += p.field103Length() + l += p.field104Length() + l += p.field105Length() + l += p.field106Length() + l += p.field107Length() + l += p.field108Length() + l += p.field109Length() + l += p.field110Length() + l += p.field111Length() + l += p.field112Length() + l += p.field113Length() + l += p.field114Length() + l += p.field115Length() + l += p.field116Length() + l += p.field117Length() + l += p.field118Length() + l += p.field119Length() + l += p.field120Length() + l += p.field121Length() + l += p.field122Length() + l += p.field123Length() + l += p.field124Length() + l += p.field125Length() + l += p.field126Length() + l += p.field127Length() + l += p.field128Length() + l += p.field129Length() + l += p.field130Length() + l += p.field131Length() + l += p.field132Length() + l += p.field133Length() + l += p.field134Length() + l += p.field135Length() + l += p.field136Length() + l += p.field137Length() + l += p.field138Length() + l += p.field139Length() + l += p.field140Length() + l += p.field141Length() + l += p.field1000Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TQueryOptions) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAbortOnError() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "abort_on_error", thrift.BOOL, 1) + offset += bthrift.Binary.WriteBool(buf[offset:], p.AbortOnError) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.KuduLatestObservedTs = v +func (p *TQueryOptions) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxErrors() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_errors", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], p.MaxErrors) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDisableCodegen() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_codegen", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], p.DisableCodegen) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBatchSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "batch_size", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], p.BatchSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumNodes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_nodes", thrift.I32, 5) + offset += bthrift.Binary.WriteI32(buf[offset:], p.NumNodes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxScanRangeLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_scan_range_length", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxScanRangeLength) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumScannerThreads() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_scanner_threads", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], p.NumScannerThreads) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxIoBuffers() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_io_buffers", thrift.I32, 8) + offset += bthrift.Binary.WriteI32(buf[offset:], p.MaxIoBuffers) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAllowUnsupportedFormats() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "allow_unsupported_formats", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], p.AllowUnsupportedFormats) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDefaultOrderByLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "default_order_by_limit", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], p.DefaultOrderByLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mem_limit", thrift.I64, 12) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MemLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAbortOnDefaultLimitExceeded() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "abort_on_default_limit_exceeded", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], p.AbortOnDefaultLimitExceeded) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_timeout", thrift.I32, 14) + offset += bthrift.Binary.WriteI32(buf[offset:], p.QueryTimeout) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsReportSuccess() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_report_success", thrift.BOOL, 15) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsReportSuccess) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCodegenLevel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "codegen_level", thrift.I32, 16) + offset += bthrift.Binary.WriteI32(buf[offset:], p.CodegenLevel) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetKuduLatestObservedTs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "kudu_latest_observed_ts", thrift.I64, 17) + offset += bthrift.Binary.WriteI64(buf[offset:], p.KuduLatestObservedTs) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField18(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetQueryType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_type", thrift.I32, 18) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.QueryType)) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.QueryType = TQueryType(v) - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField19(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMinReservation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_reservation", thrift.I64, 19) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MinReservation) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MinReservation = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField20(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMaxReservation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_reservation", thrift.I64, 20) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxReservation) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MaxReservation = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField21(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInitialReservationTotalClaims() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "initial_reservation_total_claims", thrift.I64, 21) + offset += bthrift.Binary.WriteI64(buf[offset:], p.InitialReservationTotalClaims) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.InitialReservationTotalClaims = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField22(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetBufferPoolLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "buffer_pool_limit", thrift.I64, 22) + offset += bthrift.Binary.WriteI64(buf[offset:], p.BufferPoolLimit) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.BufferPoolLimit = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField23(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetDefaultSpillableBufferSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "default_spillable_buffer_size", thrift.I64, 23) + offset += bthrift.Binary.WriteI64(buf[offset:], p.DefaultSpillableBufferSize) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.DefaultSpillableBufferSize = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField24(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMinSpillableBufferSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_spillable_buffer_size", thrift.I64, 24) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MinSpillableBufferSize) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MinSpillableBufferSize = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField25(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMaxRowSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_row_size", thrift.I64, 25) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxRowSize) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.MaxRowSize = v +func (p *TQueryOptions) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDisableStreamPreaggregations() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_stream_preaggregations", thrift.BOOL, 26) + offset += bthrift.Binary.WriteBool(buf[offset:], p.DisableStreamPreaggregations) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField26(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMtDop() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mt_dop", thrift.I32, 27) + offset += bthrift.Binary.WriteI32(buf[offset:], p.MtDop) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.DisableStreamPreaggregations = v +func (p *TQueryOptions) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_mem_limit", thrift.I64, 28) + offset += bthrift.Binary.WriteI64(buf[offset:], p.LoadMemLimit) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField27(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMaxScanKeyNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_scan_key_num", thrift.I32, 29) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxScanKeyNum) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.MtDop = v +func (p *TQueryOptions) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxPushdownConditionsPerColumn() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_pushdown_conditions_per_column", thrift.I32, 30) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxPushdownConditionsPerColumn) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField28(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableSpilling() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_spilling", thrift.BOOL, 31) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableSpilling) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.LoadMemLimit = v +func (p *TQueryOptions) fastWriteField32(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableEnableExchangeNodeParallelMerge() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_enable_exchange_node_parallel_merge", thrift.BOOL, 32) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableEnableExchangeNodeParallelMerge) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField29(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField33(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetRuntimeFilterWaitTimeMs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_wait_time_ms", thrift.I32, 33) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeFilterWaitTimeMs) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.MaxScanKeyNum = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField30(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetRuntimeFilterMaxInNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_max_in_num", thrift.I32, 34) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeFilterMaxInNum) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.MaxPushdownConditionsPerColumn = &v + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} +func (p *TQueryOptions) fastWriteField42(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetResourceLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resource_limit", thrift.STRUCT, 42) + offset += p.ResourceLimit.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField31(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField43(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetReturnObjectDataAsBinary() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "return_object_data_as_binary", thrift.BOOL, 43) + offset += bthrift.Binary.WriteBool(buf[offset:], p.ReturnObjectDataAsBinary) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.EnableSpilling = v +func (p *TQueryOptions) fastWriteField44(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrimTailingSpacesForExternalTableQuery() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trim_tailing_spaces_for_external_table_query", thrift.BOOL, 44) + offset += bthrift.Binary.WriteBool(buf[offset:], p.TrimTailingSpacesForExternalTableQuery) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField32(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField45(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableFunctionPushdown() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_function_pushdown", thrift.BOOL, 45) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableFunctionPushdown) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableEnableExchangeNodeParallelMerge = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField33(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField46(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetFragmentTransmissionCompressionCodec() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_transmission_compression_codec", thrift.STRING, 46) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FragmentTransmissionCompressionCodec) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.RuntimeFilterWaitTimeMs = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField34(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField48(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableLocalExchange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_local_exchange", thrift.BOOL, 48) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableLocalExchange) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.RuntimeFilterMaxInNum = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField42(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField49(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetSkipStorageEngineMerge() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_storage_engine_merge", thrift.BOOL, 49) + offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipStorageEngineMerge) - tmp := NewTResourceLimit() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.ResourceLimit = tmp - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField43(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField50(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetSkipDeletePredicate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_delete_predicate", thrift.BOOL, 50) + offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipDeletePredicate) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ReturnObjectDataAsBinary = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField44(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField51(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableNewShuffleHashMethod() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_new_shuffle_hash_method", thrift.BOOL, 51) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableNewShuffleHashMethod) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.TrimTailingSpacesForExternalTableQuery = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField45(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField52(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetBeExecVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_exec_version", thrift.I32, 52) + offset += bthrift.Binary.WriteI32(buf[offset:], p.BeExecVersion) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.EnableFunctionPushdown = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField46(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField53(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetPartitionedHashJoinRowsThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitioned_hash_join_rows_threshold", thrift.I32, 53) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionedHashJoinRowsThreshold) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FragmentTransmissionCompressionCodec = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField48(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField54(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableShareHashTableForBroadcastJoin() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_share_hash_table_for_broadcast_join", thrift.BOOL, 54) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableShareHashTableForBroadcastJoin) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.EnableLocalExchange = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField49(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField55(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetCheckOverflowForDecimal() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "check_overflow_for_decimal", thrift.BOOL, 55) + offset += bthrift.Binary.WriteBool(buf[offset:], p.CheckOverflowForDecimal) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.SkipStorageEngineMerge = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField50(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField56(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetSkipDeleteBitmap() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_delete_bitmap", thrift.BOOL, 56) + offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipDeleteBitmap) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.SkipDeletePredicate = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField51(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField57(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnablePipelineEngine() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_pipeline_engine", thrift.BOOL, 57) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePipelineEngine) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.EnableNewShuffleHashMethod = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField52(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField58(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetRepeatMaxNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "repeat_max_num", thrift.I32, 58) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RepeatMaxNum) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.BeExecVersion = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField53(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField59(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetExternalSortBytesThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "external_sort_bytes_threshold", thrift.I64, 59) + offset += bthrift.Binary.WriteI64(buf[offset:], p.ExternalSortBytesThreshold) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.PartitionedHashJoinRowsThreshold = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField54(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField60(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetPartitionedHashAggRowsThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitioned_hash_agg_rows_threshold", thrift.I32, 60) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionedHashAggRowsThreshold) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.EnableShareHashTableForBroadcastJoin = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField55(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField61(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableFileCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_file_cache", thrift.BOOL, 61) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableFileCache) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.CheckOverflowForDecimal = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField56(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField62(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInsertTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "insert_timeout", thrift.I32, 62) + offset += bthrift.Binary.WriteI32(buf[offset:], p.InsertTimeout) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.SkipDeleteBitmap = v +func (p *TQueryOptions) fastWriteField63(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetExecutionTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "execution_timeout", thrift.I32, 63) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ExecutionTimeout) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField57(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField64(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetDryRunQuery() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dry_run_query", thrift.BOOL, 64) + offset += bthrift.Binary.WriteBool(buf[offset:], p.DryRunQuery) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnablePipelineEngine = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField58(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField65(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableCommonExprPushdown() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_common_expr_pushdown", thrift.BOOL, 65) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableCommonExprPushdown) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.RepeatMaxNum = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField59(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField66(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetParallelInstance() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_instance", thrift.I32, 66) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ParallelInstance) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ExternalSortBytesThreshold = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField60(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField67(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetMysqlRowBinaryFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mysql_row_binary_format", thrift.BOOL, 67) + offset += bthrift.Binary.WriteBool(buf[offset:], p.MysqlRowBinaryFormat) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.PartitionedHashAggRowsThreshold = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField61(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField68(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetExternalAggBytesThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "external_agg_bytes_threshold", thrift.I64, 68) + offset += bthrift.Binary.WriteI64(buf[offset:], p.ExternalAggBytesThreshold) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableFileCache = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField62(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField69(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetExternalAggPartitionBits() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "external_agg_partition_bits", thrift.I32, 69) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ExternalAggPartitionBits) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.InsertTimeout = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField63(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField70(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetFileCacheBasePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_cache_base_path", thrift.STRING, 70) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FileCacheBasePath) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ExecutionTimeout = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField64(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField71(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableParquetLazyMat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_parquet_lazy_mat", thrift.BOOL, 71) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableParquetLazyMat) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.DryRunQuery = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField65(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField72(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableOrcLazyMat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_orc_lazy_mat", thrift.BOOL, 72) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableOrcLazyMat) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableCommonExprPushdown = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField66(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField73(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetScanQueueMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_queue_mem_limit", thrift.I64, 73) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ScanQueueMemLimit) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ParallelInstance = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField67(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField74(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableScanNodeRunSerial() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_scan_node_run_serial", thrift.BOOL, 74) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableScanNodeRunSerial) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.MysqlRowBinaryFormat = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField68(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField75(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableInsertStrict() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_insert_strict", thrift.BOOL, 75) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableInsertStrict) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ExternalAggBytesThreshold = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField69(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField76(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableInvertedIndexQuery() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_inverted_index_query", thrift.BOOL, 76) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableInvertedIndexQuery) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ExternalAggPartitionBits = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField70(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField77(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetTruncateCharOrVarcharColumns() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "truncate_char_or_varchar_columns", thrift.BOOL, 77) + offset += bthrift.Binary.WriteBool(buf[offset:], p.TruncateCharOrVarcharColumns) - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FileCacheBasePath = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField71(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField78(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableHashJoinEarlyStartProbe() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_hash_join_early_start_probe", thrift.BOOL, 78) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableHashJoinEarlyStartProbe) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableParquetLazyMat = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField72(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField79(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnablePipelineXEngine() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_pipeline_x_engine", thrift.BOOL, 79) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePipelineXEngine) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - p.EnableOrcLazyMat = v +func (p *TQueryOptions) fastWriteField80(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableMemtableOnSinkNode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_memtable_on_sink_node", thrift.BOOL, 80) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableMemtableOnSinkNode) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField73(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField81(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableDeleteSubPredicateV2() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_delete_sub_predicate_v2", thrift.BOOL, 81) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableDeleteSubPredicateV2) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ScanQueueMemLimit = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField74(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField82(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetFeProcessUuid() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_process_uuid", thrift.I64, 82) + offset += bthrift.Binary.WriteI64(buf[offset:], p.FeProcessUuid) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableScanNodeRunSerial = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField75(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField83(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInvertedIndexConjunctionOptThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_conjunction_opt_threshold", thrift.I32, 83) + offset += bthrift.Binary.WriteI32(buf[offset:], p.InvertedIndexConjunctionOptThreshold) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableInsertStrict = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField76(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField84(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_profile", thrift.BOOL, 84) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableProfile) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableInvertedIndexQuery = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField77(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField85(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnablePageCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_page_cache", thrift.BOOL, 85) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePageCache) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.TruncateCharOrVarcharColumns = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField78(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField86(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetAnalyzeTimeout() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "analyze_timeout", thrift.I32, 86) + offset += bthrift.Binary.WriteI32(buf[offset:], p.AnalyzeTimeout) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableHashJoinEarlyStartProbe = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField79(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField87(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetFasterFloatConvert() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "faster_float_convert", thrift.BOOL, 87) + offset += bthrift.Binary.WriteBool(buf[offset:], p.FasterFloatConvert) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnablePipelineXEngine = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField80(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField88(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableDecimal256() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_decimal256", thrift.BOOL, 88) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableDecimal256) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableMemtableOnSinkNode = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField81(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField89(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetEnableLocalShuffle() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_local_shuffle", thrift.BOOL, 89) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableLocalShuffle) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableDeleteSubPredicateV2 = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField82(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField90(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetSkipMissingVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_missing_version", thrift.BOOL, 90) + offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipMissingVersion) - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.FeProcessUuid = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField83(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField91(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetRuntimeFilterWaitInfinitely() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_wait_infinitely", thrift.BOOL, 91) + offset += bthrift.Binary.WriteBool(buf[offset:], p.RuntimeFilterWaitInfinitely) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.InvertedIndexConjunctionOptThreshold = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField84(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField92(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetWaitFullBlockScheduleTimes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "wait_full_block_schedule_times", thrift.I32, 92) + offset += bthrift.Binary.WriteI32(buf[offset:], p.WaitFullBlockScheduleTimes) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnableProfile = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField85(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField93(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInvertedIndexMaxExpansions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_max_expansions", thrift.I32, 93) + offset += bthrift.Binary.WriteI32(buf[offset:], p.InvertedIndexMaxExpansions) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.EnablePageCache = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TQueryOptions) FastReadField86(buf []byte) (int, error) { +func (p *TQueryOptions) fastWriteField94(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInvertedIndexSkipThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_skip_threshold", thrift.I32, 94) + offset += bthrift.Binary.WriteI32(buf[offset:], p.InvertedIndexSkipThreshold) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.AnalyzeTimeout = v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil -} - -// for compatibility -func (p *TQueryOptions) FastWrite(buf []byte) int { - return 0 + return offset } -func (p *TQueryOptions) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField95(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryOptions") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) - offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) - offset += p.fastWriteField17(buf[offset:], binaryWriter) - offset += p.fastWriteField19(buf[offset:], binaryWriter) - offset += p.fastWriteField20(buf[offset:], binaryWriter) - offset += p.fastWriteField21(buf[offset:], binaryWriter) - offset += p.fastWriteField22(buf[offset:], binaryWriter) - offset += p.fastWriteField23(buf[offset:], binaryWriter) - offset += p.fastWriteField24(buf[offset:], binaryWriter) - offset += p.fastWriteField25(buf[offset:], binaryWriter) - offset += p.fastWriteField26(buf[offset:], binaryWriter) - offset += p.fastWriteField27(buf[offset:], binaryWriter) - offset += p.fastWriteField28(buf[offset:], binaryWriter) - offset += p.fastWriteField29(buf[offset:], binaryWriter) - offset += p.fastWriteField30(buf[offset:], binaryWriter) - offset += p.fastWriteField31(buf[offset:], binaryWriter) - offset += p.fastWriteField32(buf[offset:], binaryWriter) - offset += p.fastWriteField33(buf[offset:], binaryWriter) - offset += p.fastWriteField34(buf[offset:], binaryWriter) - offset += p.fastWriteField43(buf[offset:], binaryWriter) - offset += p.fastWriteField44(buf[offset:], binaryWriter) - offset += p.fastWriteField45(buf[offset:], binaryWriter) - offset += p.fastWriteField48(buf[offset:], binaryWriter) - offset += p.fastWriteField49(buf[offset:], binaryWriter) - offset += p.fastWriteField50(buf[offset:], binaryWriter) - offset += p.fastWriteField51(buf[offset:], binaryWriter) - offset += p.fastWriteField52(buf[offset:], binaryWriter) - offset += p.fastWriteField53(buf[offset:], binaryWriter) - offset += p.fastWriteField54(buf[offset:], binaryWriter) - offset += p.fastWriteField55(buf[offset:], binaryWriter) - offset += p.fastWriteField56(buf[offset:], binaryWriter) - offset += p.fastWriteField57(buf[offset:], binaryWriter) - offset += p.fastWriteField58(buf[offset:], binaryWriter) - offset += p.fastWriteField59(buf[offset:], binaryWriter) - offset += p.fastWriteField60(buf[offset:], binaryWriter) - offset += p.fastWriteField61(buf[offset:], binaryWriter) - offset += p.fastWriteField62(buf[offset:], binaryWriter) - offset += p.fastWriteField63(buf[offset:], binaryWriter) - offset += p.fastWriteField64(buf[offset:], binaryWriter) - offset += p.fastWriteField65(buf[offset:], binaryWriter) - offset += p.fastWriteField66(buf[offset:], binaryWriter) - offset += p.fastWriteField67(buf[offset:], binaryWriter) - offset += p.fastWriteField68(buf[offset:], binaryWriter) - offset += p.fastWriteField69(buf[offset:], binaryWriter) - offset += p.fastWriteField71(buf[offset:], binaryWriter) - offset += p.fastWriteField72(buf[offset:], binaryWriter) - offset += p.fastWriteField73(buf[offset:], binaryWriter) - offset += p.fastWriteField74(buf[offset:], binaryWriter) - offset += p.fastWriteField75(buf[offset:], binaryWriter) - offset += p.fastWriteField76(buf[offset:], binaryWriter) - offset += p.fastWriteField77(buf[offset:], binaryWriter) - offset += p.fastWriteField78(buf[offset:], binaryWriter) - offset += p.fastWriteField79(buf[offset:], binaryWriter) - offset += p.fastWriteField80(buf[offset:], binaryWriter) - offset += p.fastWriteField81(buf[offset:], binaryWriter) - offset += p.fastWriteField82(buf[offset:], binaryWriter) - offset += p.fastWriteField83(buf[offset:], binaryWriter) - offset += p.fastWriteField84(buf[offset:], binaryWriter) - offset += p.fastWriteField85(buf[offset:], binaryWriter) - offset += p.fastWriteField86(buf[offset:], binaryWriter) - offset += p.fastWriteField18(buf[offset:], binaryWriter) - offset += p.fastWriteField42(buf[offset:], binaryWriter) - offset += p.fastWriteField46(buf[offset:], binaryWriter) - offset += p.fastWriteField70(buf[offset:], binaryWriter) + if p.IsSetEnableParallelScan() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_parallel_scan", thrift.BOOL, 95) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableParallelScan) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField96(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParallelScanMaxScannersCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_scan_max_scanners_count", thrift.I32, 96) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ParallelScanMaxScannersCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TQueryOptions) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TQueryOptions") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field12Length() - l += p.field13Length() - l += p.field14Length() - l += p.field15Length() - l += p.field16Length() - l += p.field17Length() - l += p.field18Length() - l += p.field19Length() - l += p.field20Length() - l += p.field21Length() - l += p.field22Length() - l += p.field23Length() - l += p.field24Length() - l += p.field25Length() - l += p.field26Length() - l += p.field27Length() - l += p.field28Length() - l += p.field29Length() - l += p.field30Length() - l += p.field31Length() - l += p.field32Length() - l += p.field33Length() - l += p.field34Length() - l += p.field42Length() - l += p.field43Length() - l += p.field44Length() - l += p.field45Length() - l += p.field46Length() - l += p.field48Length() - l += p.field49Length() - l += p.field50Length() - l += p.field51Length() - l += p.field52Length() - l += p.field53Length() - l += p.field54Length() - l += p.field55Length() - l += p.field56Length() - l += p.field57Length() - l += p.field58Length() - l += p.field59Length() - l += p.field60Length() - l += p.field61Length() - l += p.field62Length() - l += p.field63Length() - l += p.field64Length() - l += p.field65Length() - l += p.field66Length() - l += p.field67Length() - l += p.field68Length() - l += p.field69Length() - l += p.field70Length() - l += p.field71Length() - l += p.field72Length() - l += p.field73Length() - l += p.field74Length() - l += p.field75Length() - l += p.field76Length() - l += p.field77Length() - l += p.field78Length() - l += p.field79Length() - l += p.field80Length() - l += p.field81Length() - l += p.field82Length() - l += p.field83Length() - l += p.field84Length() - l += p.field85Length() - l += p.field86Length() +func (p *TQueryOptions) fastWriteField97(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParallelScanMinRowsPerScanner() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_scan_min_rows_per_scanner", thrift.I64, 97) + offset += bthrift.Binary.WriteI64(buf[offset:], p.ParallelScanMinRowsPerScanner) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l + return offset } -func (p *TQueryOptions) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField98(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAbortOnError() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "abort_on_error", thrift.BOOL, 1) - offset += bthrift.Binary.WriteBool(buf[offset:], p.AbortOnError) + if p.IsSetSkipBadTablet() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_bad_tablet", thrift.BOOL, 98) + offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipBadTablet) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField99(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxErrors() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_errors", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], p.MaxErrors) + if p.IsSetScannerScaleUpRatio() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scanner_scale_up_ratio", thrift.DOUBLE, 99) + offset += bthrift.Binary.WriteDouble(buf[offset:], p.ScannerScaleUpRatio) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField100(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDisableCodegen() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_codegen", thrift.BOOL, 3) - offset += bthrift.Binary.WriteBool(buf[offset:], p.DisableCodegen) + if p.IsSetEnableDistinctStreamingAggregation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_distinct_streaming_aggregation", thrift.BOOL, 100) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableDistinctStreamingAggregation) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField101(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetBatchSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "batch_size", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], p.BatchSize) + if p.IsSetEnableJoinSpill() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_join_spill", thrift.BOOL, 101) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableJoinSpill) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField102(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNumNodes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_nodes", thrift.I32, 5) - offset += bthrift.Binary.WriteI32(buf[offset:], p.NumNodes) + if p.IsSetEnableSortSpill() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_sort_spill", thrift.BOOL, 102) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableSortSpill) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField103(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxScanRangeLength() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_scan_range_length", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxScanRangeLength) + if p.IsSetEnableAggSpill() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_agg_spill", thrift.BOOL, 103) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableAggSpill) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField104(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNumScannerThreads() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_scanner_threads", thrift.I32, 7) - offset += bthrift.Binary.WriteI32(buf[offset:], p.NumScannerThreads) + if p.IsSetMinRevocableMem() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_revocable_mem", thrift.I64, 104) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MinRevocableMem) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField105(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxIoBuffers() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_io_buffers", thrift.I32, 8) - offset += bthrift.Binary.WriteI32(buf[offset:], p.MaxIoBuffers) + if p.IsSetSpillStreamingAggMemLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "spill_streaming_agg_mem_limit", thrift.I64, 105) + offset += bthrift.Binary.WriteI64(buf[offset:], p.SpillStreamingAggMemLimit) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField106(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAllowUnsupportedFormats() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "allow_unsupported_formats", thrift.BOOL, 9) - offset += bthrift.Binary.WriteBool(buf[offset:], p.AllowUnsupportedFormats) + if p.IsSetDataQueueMaxBlocks() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_queue_max_blocks", thrift.I64, 106) + offset += bthrift.Binary.WriteI64(buf[offset:], p.DataQueueMaxBlocks) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField107(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDefaultOrderByLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "default_order_by_limit", thrift.I64, 10) - offset += bthrift.Binary.WriteI64(buf[offset:], p.DefaultOrderByLimit) + if p.IsSetEnableCommonExprPushdownForInvertedIndex() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_common_expr_pushdown_for_inverted_index", thrift.BOOL, 107) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableCommonExprPushdownForInvertedIndex) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField108(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMemLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mem_limit", thrift.I64, 12) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MemLimit) + if p.IsSetLocalExchangeFreeBlocksLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_exchange_free_blocks_limit", thrift.I64, 108) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LocalExchangeFreeBlocksLimit) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField109(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAbortOnDefaultLimitExceeded() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "abort_on_default_limit_exceeded", thrift.BOOL, 13) - offset += bthrift.Binary.WriteBool(buf[offset:], p.AbortOnDefaultLimitExceeded) + if p.IsSetEnableForceSpill() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_force_spill", thrift.BOOL, 109) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableForceSpill) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField110(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_timeout", thrift.I32, 14) - offset += bthrift.Binary.WriteI32(buf[offset:], p.QueryTimeout) + if p.IsSetEnableParquetFilterByMinMax() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_parquet_filter_by_min_max", thrift.BOOL, 110) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableParquetFilterByMinMax) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField111(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetIsReportSuccess() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_report_success", thrift.BOOL, 15) - offset += bthrift.Binary.WriteBool(buf[offset:], p.IsReportSuccess) + if p.IsSetEnableOrcFilterByMinMax() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_orc_filter_by_min_max", thrift.BOOL, 111) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableOrcFilterByMinMax) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField112(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetCodegenLevel() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "codegen_level", thrift.I32, 16) - offset += bthrift.Binary.WriteI32(buf[offset:], p.CodegenLevel) + if p.IsSetMaxColumnReaderNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_column_reader_num", thrift.I32, 112) + offset += bthrift.Binary.WriteI32(buf[offset:], p.MaxColumnReaderNum) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField113(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetKuduLatestObservedTs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "kudu_latest_observed_ts", thrift.I64, 17) - offset += bthrift.Binary.WriteI64(buf[offset:], p.KuduLatestObservedTs) + if p.IsSetEnableLocalMergeSort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_local_merge_sort", thrift.BOOL, 113) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableLocalMergeSort) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField114(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetQueryType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_type", thrift.I32, 18) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.QueryType)) + if p.IsSetEnableParallelResultSink() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_parallel_result_sink", thrift.BOOL, 114) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableParallelResultSink) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField115(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMinReservation() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_reservation", thrift.I64, 19) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MinReservation) + if p.IsSetEnableShortCircuitQueryAccessColumnStore() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_short_circuit_query_access_column_store", thrift.BOOL, 115) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableShortCircuitQueryAccessColumnStore) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField116(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxReservation() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_reservation", thrift.I64, 20) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxReservation) + if p.IsSetEnableNoNeedReadDataOpt() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_no_need_read_data_opt", thrift.BOOL, 116) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableNoNeedReadDataOpt) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField117(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetInitialReservationTotalClaims() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "initial_reservation_total_claims", thrift.I64, 21) - offset += bthrift.Binary.WriteI64(buf[offset:], p.InitialReservationTotalClaims) + if p.IsSetReadCsvEmptyLineAsNull() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "read_csv_empty_line_as_null", thrift.BOOL, 117) + offset += bthrift.Binary.WriteBool(buf[offset:], p.ReadCsvEmptyLineAsNull) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField118(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetBufferPoolLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "buffer_pool_limit", thrift.I64, 22) - offset += bthrift.Binary.WriteI64(buf[offset:], p.BufferPoolLimit) + if p.IsSetSerdeDialect() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "serde_dialect", thrift.I32, 118) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.SerdeDialect)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField119(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDefaultSpillableBufferSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "default_spillable_buffer_size", thrift.I64, 23) - offset += bthrift.Binary.WriteI64(buf[offset:], p.DefaultSpillableBufferSize) + if p.IsSetEnableMatchWithoutInvertedIndex() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_match_without_inverted_index", thrift.BOOL, 119) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableMatchWithoutInvertedIndex) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField120(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMinSpillableBufferSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_spillable_buffer_size", thrift.I64, 24) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MinSpillableBufferSize) + if p.IsSetEnableFallbackOnMissingInvertedIndex() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_fallback_on_missing_inverted_index", thrift.BOOL, 120) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableFallbackOnMissingInvertedIndex) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField25(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField121(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxRowSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_row_size", thrift.I64, 25) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxRowSize) + if p.IsSetKeepCarriageReturn() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "keep_carriage_return", thrift.BOOL, 121) + offset += bthrift.Binary.WriteBool(buf[offset:], p.KeepCarriageReturn) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField122(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDisableStreamPreaggregations() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_stream_preaggregations", thrift.BOOL, 26) - offset += bthrift.Binary.WriteBool(buf[offset:], p.DisableStreamPreaggregations) + if p.IsSetRuntimeBloomFilterMinSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_bloom_filter_min_size", thrift.I32, 122) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeBloomFilterMinSize) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField123(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMtDop() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mt_dop", thrift.I32, 27) - offset += bthrift.Binary.WriteI32(buf[offset:], p.MtDop) + if p.IsSetHiveParquetUseColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hive_parquet_use_column_names", thrift.BOOL, 123) + offset += bthrift.Binary.WriteBool(buf[offset:], p.HiveParquetUseColumnNames) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField124(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadMemLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_mem_limit", thrift.I64, 28) - offset += bthrift.Binary.WriteI64(buf[offset:], p.LoadMemLimit) + if p.IsSetHiveOrcUseColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hive_orc_use_column_names", thrift.BOOL, 124) + offset += bthrift.Binary.WriteBool(buf[offset:], p.HiveOrcUseColumnNames) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField125(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxScanKeyNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_scan_key_num", thrift.I32, 29) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxScanKeyNum) + if p.IsSetEnableSegmentCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_segment_cache", thrift.BOOL, 125) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableSegmentCache) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField126(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRuntimeBloomFilterMaxSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_bloom_filter_max_size", thrift.I32, 126) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeBloomFilterMaxSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField127(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInListValueCountThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "in_list_value_count_threshold", thrift.I32, 127) + offset += bthrift.Binary.WriteI32(buf[offset:], p.InListValueCountThreshold) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField128(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableVerboseProfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_verbose_profile", thrift.BOOL, 128) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableVerboseProfile) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField129(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRpcVerboseProfileMaxInstanceCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rpc_verbose_profile_max_instance_count", thrift.I32, 129) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RpcVerboseProfileMaxInstanceCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField130(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableAdaptivePipelineTaskSerialReadOnLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_adaptive_pipeline_task_serial_read_on_limit", thrift.BOOL, 130) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableAdaptivePipelineTaskSerialReadOnLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField131(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAdaptivePipelineTaskSerialReadOnLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "adaptive_pipeline_task_serial_read_on_limit", thrift.I32, 131) + offset += bthrift.Binary.WriteI32(buf[offset:], p.AdaptivePipelineTaskSerialReadOnLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField132(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParallelPrepareThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_prepare_threshold", thrift.I32, 132) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ParallelPrepareThreshold) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField133(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxPushdownConditionsPerColumn() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_pushdown_conditions_per_column", thrift.I32, 30) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxPushdownConditionsPerColumn) + if p.IsSetPartitionTopnMaxPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_topn_max_partitions", thrift.I32, 133) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionTopnMaxPartitions) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField134(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEnableSpilling() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_spilling", thrift.BOOL, 31) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableSpilling) + if p.IsSetPartitionTopnPrePartitionRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_topn_pre_partition_rows", thrift.I32, 134) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionTopnPrePartitionRows) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField32(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField135(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEnableEnableExchangeNodeParallelMerge() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_enable_exchange_node_parallel_merge", thrift.BOOL, 32) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableEnableExchangeNodeParallelMerge) + if p.IsSetEnableParallelOutfile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_parallel_outfile", thrift.BOOL, 135) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableParallelOutfile) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField33(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField136(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRuntimeFilterWaitTimeMs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_wait_time_ms", thrift.I32, 33) - offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeFilterWaitTimeMs) + if p.IsSetEnablePhraseQuerySequentialOpt() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_phrase_query_sequential_opt", thrift.BOOL, 136) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePhraseQuerySequentialOpt) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField137(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRuntimeFilterMaxInNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_max_in_num", thrift.I32, 34) - offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeFilterMaxInNum) + if p.IsSetEnableAutoCreateWhenOverwrite() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_auto_create_when_overwrite", thrift.BOOL, 137) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableAutoCreateWhenOverwrite) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField42(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField138(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetResourceLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resource_limit", thrift.STRUCT, 42) - offset += p.ResourceLimit.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetOrcTinyStripeThresholdBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "orc_tiny_stripe_threshold_bytes", thrift.I64, 138) + offset += bthrift.Binary.WriteI64(buf[offset:], p.OrcTinyStripeThresholdBytes) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField43(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField139(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetReturnObjectDataAsBinary() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "return_object_data_as_binary", thrift.BOOL, 43) - offset += bthrift.Binary.WriteBool(buf[offset:], p.ReturnObjectDataAsBinary) + if p.IsSetOrcOnceMaxReadBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "orc_once_max_read_bytes", thrift.I64, 139) + offset += bthrift.Binary.WriteI64(buf[offset:], p.OrcOnceMaxReadBytes) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField44(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField140(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTrimTailingSpacesForExternalTableQuery() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trim_tailing_spaces_for_external_table_query", thrift.BOOL, 44) - offset += bthrift.Binary.WriteBool(buf[offset:], p.TrimTailingSpacesForExternalTableQuery) + if p.IsSetOrcMaxMergeDistanceBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "orc_max_merge_distance_bytes", thrift.I64, 140) + offset += bthrift.Binary.WriteI64(buf[offset:], p.OrcMaxMergeDistanceBytes) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField45(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField141(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetEnableFunctionPushdown() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_function_pushdown", thrift.BOOL, 45) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableFunctionPushdown) + if p.IsSetIgnoreRuntimeFilterError() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ignore_runtime_filter_error", thrift.BOOL, 141) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IgnoreRuntimeFilterError) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField46(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueryOptions) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFragmentTransmissionCompressionCodec() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_transmission_compression_codec", thrift.STRING, 46) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FragmentTransmissionCompressionCodec) + if p.IsSetDisableFileCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_file_cache", thrift.BOOL, 1000) + offset += bthrift.Binary.WriteBool(buf[offset:], p.DisableFileCache) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TQueryOptions) fastWriteField48(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableLocalExchange() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_local_exchange", thrift.BOOL, 48) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableLocalExchange) +func (p *TQueryOptions) field1Length() int { + l := 0 + if p.IsSetAbortOnError() { + l += bthrift.Binary.FieldBeginLength("abort_on_error", thrift.BOOL, 1) + l += bthrift.Binary.BoolLength(p.AbortOnError) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField49(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSkipStorageEngineMerge() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_storage_engine_merge", thrift.BOOL, 49) - offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipStorageEngineMerge) +func (p *TQueryOptions) field2Length() int { + l := 0 + if p.IsSetMaxErrors() { + l += bthrift.Binary.FieldBeginLength("max_errors", thrift.I32, 2) + l += bthrift.Binary.I32Length(p.MaxErrors) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField50(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSkipDeletePredicate() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_delete_predicate", thrift.BOOL, 50) - offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipDeletePredicate) +func (p *TQueryOptions) field3Length() int { + l := 0 + if p.IsSetDisableCodegen() { + l += bthrift.Binary.FieldBeginLength("disable_codegen", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(p.DisableCodegen) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field4Length() int { + l := 0 + if p.IsSetBatchSize() { + l += bthrift.Binary.FieldBeginLength("batch_size", thrift.I32, 4) + l += bthrift.Binary.I32Length(p.BatchSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field5Length() int { + l := 0 + if p.IsSetNumNodes() { + l += bthrift.Binary.FieldBeginLength("num_nodes", thrift.I32, 5) + l += bthrift.Binary.I32Length(p.NumNodes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field6Length() int { + l := 0 + if p.IsSetMaxScanRangeLength() { + l += bthrift.Binary.FieldBeginLength("max_scan_range_length", thrift.I64, 6) + l += bthrift.Binary.I64Length(p.MaxScanRangeLength) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field7Length() int { + l := 0 + if p.IsSetNumScannerThreads() { + l += bthrift.Binary.FieldBeginLength("num_scanner_threads", thrift.I32, 7) + l += bthrift.Binary.I32Length(p.NumScannerThreads) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field8Length() int { + l := 0 + if p.IsSetMaxIoBuffers() { + l += bthrift.Binary.FieldBeginLength("max_io_buffers", thrift.I32, 8) + l += bthrift.Binary.I32Length(p.MaxIoBuffers) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field9Length() int { + l := 0 + if p.IsSetAllowUnsupportedFormats() { + l += bthrift.Binary.FieldBeginLength("allow_unsupported_formats", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(p.AllowUnsupportedFormats) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field10Length() int { + l := 0 + if p.IsSetDefaultOrderByLimit() { + l += bthrift.Binary.FieldBeginLength("default_order_by_limit", thrift.I64, 10) + l += bthrift.Binary.I64Length(p.DefaultOrderByLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field12Length() int { + l := 0 + if p.IsSetMemLimit() { + l += bthrift.Binary.FieldBeginLength("mem_limit", thrift.I64, 12) + l += bthrift.Binary.I64Length(p.MemLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field13Length() int { + l := 0 + if p.IsSetAbortOnDefaultLimitExceeded() { + l += bthrift.Binary.FieldBeginLength("abort_on_default_limit_exceeded", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(p.AbortOnDefaultLimitExceeded) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field14Length() int { + l := 0 + if p.IsSetQueryTimeout() { + l += bthrift.Binary.FieldBeginLength("query_timeout", thrift.I32, 14) + l += bthrift.Binary.I32Length(p.QueryTimeout) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field15Length() int { + l := 0 + if p.IsSetIsReportSuccess() { + l += bthrift.Binary.FieldBeginLength("is_report_success", thrift.BOOL, 15) + l += bthrift.Binary.BoolLength(p.IsReportSuccess) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field16Length() int { + l := 0 + if p.IsSetCodegenLevel() { + l += bthrift.Binary.FieldBeginLength("codegen_level", thrift.I32, 16) + l += bthrift.Binary.I32Length(p.CodegenLevel) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field17Length() int { + l := 0 + if p.IsSetKuduLatestObservedTs() { + l += bthrift.Binary.FieldBeginLength("kudu_latest_observed_ts", thrift.I64, 17) + l += bthrift.Binary.I64Length(p.KuduLatestObservedTs) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field18Length() int { + l := 0 + if p.IsSetQueryType() { + l += bthrift.Binary.FieldBeginLength("query_type", thrift.I32, 18) + l += bthrift.Binary.I32Length(int32(p.QueryType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field19Length() int { + l := 0 + if p.IsSetMinReservation() { + l += bthrift.Binary.FieldBeginLength("min_reservation", thrift.I64, 19) + l += bthrift.Binary.I64Length(p.MinReservation) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field20Length() int { + l := 0 + if p.IsSetMaxReservation() { + l += bthrift.Binary.FieldBeginLength("max_reservation", thrift.I64, 20) + l += bthrift.Binary.I64Length(p.MaxReservation) + + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField51(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableNewShuffleHashMethod() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_new_shuffle_hash_method", thrift.BOOL, 51) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableNewShuffleHashMethod) +func (p *TQueryOptions) field21Length() int { + l := 0 + if p.IsSetInitialReservationTotalClaims() { + l += bthrift.Binary.FieldBeginLength("initial_reservation_total_claims", thrift.I64, 21) + l += bthrift.Binary.I64Length(p.InitialReservationTotalClaims) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField52(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBeExecVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_exec_version", thrift.I32, 52) - offset += bthrift.Binary.WriteI32(buf[offset:], p.BeExecVersion) +func (p *TQueryOptions) field22Length() int { + l := 0 + if p.IsSetBufferPoolLimit() { + l += bthrift.Binary.FieldBeginLength("buffer_pool_limit", thrift.I64, 22) + l += bthrift.Binary.I64Length(p.BufferPoolLimit) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField53(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPartitionedHashJoinRowsThreshold() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitioned_hash_join_rows_threshold", thrift.I32, 53) - offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionedHashJoinRowsThreshold) +func (p *TQueryOptions) field23Length() int { + l := 0 + if p.IsSetDefaultSpillableBufferSize() { + l += bthrift.Binary.FieldBeginLength("default_spillable_buffer_size", thrift.I64, 23) + l += bthrift.Binary.I64Length(p.DefaultSpillableBufferSize) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField54(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableShareHashTableForBroadcastJoin() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_share_hash_table_for_broadcast_join", thrift.BOOL, 54) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableShareHashTableForBroadcastJoin) +func (p *TQueryOptions) field24Length() int { + l := 0 + if p.IsSetMinSpillableBufferSize() { + l += bthrift.Binary.FieldBeginLength("min_spillable_buffer_size", thrift.I64, 24) + l += bthrift.Binary.I64Length(p.MinSpillableBufferSize) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField55(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCheckOverflowForDecimal() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "check_overflow_for_decimal", thrift.BOOL, 55) - offset += bthrift.Binary.WriteBool(buf[offset:], p.CheckOverflowForDecimal) +func (p *TQueryOptions) field25Length() int { + l := 0 + if p.IsSetMaxRowSize() { + l += bthrift.Binary.FieldBeginLength("max_row_size", thrift.I64, 25) + l += bthrift.Binary.I64Length(p.MaxRowSize) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField56(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSkipDeleteBitmap() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "skip_delete_bitmap", thrift.BOOL, 56) - offset += bthrift.Binary.WriteBool(buf[offset:], p.SkipDeleteBitmap) +func (p *TQueryOptions) field26Length() int { + l := 0 + if p.IsSetDisableStreamPreaggregations() { + l += bthrift.Binary.FieldBeginLength("disable_stream_preaggregations", thrift.BOOL, 26) + l += bthrift.Binary.BoolLength(p.DisableStreamPreaggregations) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField57(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnablePipelineEngine() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_pipeline_engine", thrift.BOOL, 57) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePipelineEngine) +func (p *TQueryOptions) field27Length() int { + l := 0 + if p.IsSetMtDop() { + l += bthrift.Binary.FieldBeginLength("mt_dop", thrift.I32, 27) + l += bthrift.Binary.I32Length(p.MtDop) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField58(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRepeatMaxNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "repeat_max_num", thrift.I32, 58) - offset += bthrift.Binary.WriteI32(buf[offset:], p.RepeatMaxNum) +func (p *TQueryOptions) field28Length() int { + l := 0 + if p.IsSetLoadMemLimit() { + l += bthrift.Binary.FieldBeginLength("load_mem_limit", thrift.I64, 28) + l += bthrift.Binary.I64Length(p.LoadMemLimit) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField59(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetExternalSortBytesThreshold() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "external_sort_bytes_threshold", thrift.I64, 59) - offset += bthrift.Binary.WriteI64(buf[offset:], p.ExternalSortBytesThreshold) +func (p *TQueryOptions) field29Length() int { + l := 0 + if p.IsSetMaxScanKeyNum() { + l += bthrift.Binary.FieldBeginLength("max_scan_key_num", thrift.I32, 29) + l += bthrift.Binary.I32Length(*p.MaxScanKeyNum) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField60(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPartitionedHashAggRowsThreshold() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitioned_hash_agg_rows_threshold", thrift.I32, 60) - offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionedHashAggRowsThreshold) +func (p *TQueryOptions) field30Length() int { + l := 0 + if p.IsSetMaxPushdownConditionsPerColumn() { + l += bthrift.Binary.FieldBeginLength("max_pushdown_conditions_per_column", thrift.I32, 30) + l += bthrift.Binary.I32Length(*p.MaxPushdownConditionsPerColumn) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField61(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableFileCache() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_file_cache", thrift.BOOL, 61) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableFileCache) +func (p *TQueryOptions) field31Length() int { + l := 0 + if p.IsSetEnableSpilling() { + l += bthrift.Binary.FieldBeginLength("enable_spilling", thrift.BOOL, 31) + l += bthrift.Binary.BoolLength(p.EnableSpilling) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField62(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetInsertTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "insert_timeout", thrift.I32, 62) - offset += bthrift.Binary.WriteI32(buf[offset:], p.InsertTimeout) +func (p *TQueryOptions) field32Length() int { + l := 0 + if p.IsSetEnableEnableExchangeNodeParallelMerge() { + l += bthrift.Binary.FieldBeginLength("enable_enable_exchange_node_parallel_merge", thrift.BOOL, 32) + l += bthrift.Binary.BoolLength(p.EnableEnableExchangeNodeParallelMerge) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField63(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetExecutionTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "execution_timeout", thrift.I32, 63) - offset += bthrift.Binary.WriteI32(buf[offset:], p.ExecutionTimeout) +func (p *TQueryOptions) field33Length() int { + l := 0 + if p.IsSetRuntimeFilterWaitTimeMs() { + l += bthrift.Binary.FieldBeginLength("runtime_filter_wait_time_ms", thrift.I32, 33) + l += bthrift.Binary.I32Length(p.RuntimeFilterWaitTimeMs) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField64(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDryRunQuery() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dry_run_query", thrift.BOOL, 64) - offset += bthrift.Binary.WriteBool(buf[offset:], p.DryRunQuery) +func (p *TQueryOptions) field34Length() int { + l := 0 + if p.IsSetRuntimeFilterMaxInNum() { + l += bthrift.Binary.FieldBeginLength("runtime_filter_max_in_num", thrift.I32, 34) + l += bthrift.Binary.I32Length(p.RuntimeFilterMaxInNum) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField65(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableCommonExprPushdown() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_common_expr_pushdown", thrift.BOOL, 65) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableCommonExprPushdown) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TQueryOptions) field42Length() int { + l := 0 + if p.IsSetResourceLimit() { + l += bthrift.Binary.FieldBeginLength("resource_limit", thrift.STRUCT, 42) + l += p.ResourceLimit.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField66(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetParallelInstance() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_instance", thrift.I32, 66) - offset += bthrift.Binary.WriteI32(buf[offset:], p.ParallelInstance) +func (p *TQueryOptions) field43Length() int { + l := 0 + if p.IsSetReturnObjectDataAsBinary() { + l += bthrift.Binary.FieldBeginLength("return_object_data_as_binary", thrift.BOOL, 43) + l += bthrift.Binary.BoolLength(p.ReturnObjectDataAsBinary) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField67(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetMysqlRowBinaryFormat() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mysql_row_binary_format", thrift.BOOL, 67) - offset += bthrift.Binary.WriteBool(buf[offset:], p.MysqlRowBinaryFormat) +func (p *TQueryOptions) field44Length() int { + l := 0 + if p.IsSetTrimTailingSpacesForExternalTableQuery() { + l += bthrift.Binary.FieldBeginLength("trim_tailing_spaces_for_external_table_query", thrift.BOOL, 44) + l += bthrift.Binary.BoolLength(p.TrimTailingSpacesForExternalTableQuery) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField68(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetExternalAggBytesThreshold() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "external_agg_bytes_threshold", thrift.I64, 68) - offset += bthrift.Binary.WriteI64(buf[offset:], p.ExternalAggBytesThreshold) +func (p *TQueryOptions) field45Length() int { + l := 0 + if p.IsSetEnableFunctionPushdown() { + l += bthrift.Binary.FieldBeginLength("enable_function_pushdown", thrift.BOOL, 45) + l += bthrift.Binary.BoolLength(*p.EnableFunctionPushdown) + + l += bthrift.Binary.FieldEndLength() + } + return l +} - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TQueryOptions) field46Length() int { + l := 0 + if p.IsSetFragmentTransmissionCompressionCodec() { + l += bthrift.Binary.FieldBeginLength("fragment_transmission_compression_codec", thrift.STRING, 46) + l += bthrift.Binary.StringLengthNocopy(*p.FragmentTransmissionCompressionCodec) + + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField69(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetExternalAggPartitionBits() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "external_agg_partition_bits", thrift.I32, 69) - offset += bthrift.Binary.WriteI32(buf[offset:], p.ExternalAggPartitionBits) +func (p *TQueryOptions) field48Length() int { + l := 0 + if p.IsSetEnableLocalExchange() { + l += bthrift.Binary.FieldBeginLength("enable_local_exchange", thrift.BOOL, 48) + l += bthrift.Binary.BoolLength(*p.EnableLocalExchange) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField70(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFileCacheBasePath() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_cache_base_path", thrift.STRING, 70) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FileCacheBasePath) +func (p *TQueryOptions) field49Length() int { + l := 0 + if p.IsSetSkipStorageEngineMerge() { + l += bthrift.Binary.FieldBeginLength("skip_storage_engine_merge", thrift.BOOL, 49) + l += bthrift.Binary.BoolLength(p.SkipStorageEngineMerge) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField71(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableParquetLazyMat() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_parquet_lazy_mat", thrift.BOOL, 71) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableParquetLazyMat) +func (p *TQueryOptions) field50Length() int { + l := 0 + if p.IsSetSkipDeletePredicate() { + l += bthrift.Binary.FieldBeginLength("skip_delete_predicate", thrift.BOOL, 50) + l += bthrift.Binary.BoolLength(p.SkipDeletePredicate) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField72(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableOrcLazyMat() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_orc_lazy_mat", thrift.BOOL, 72) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableOrcLazyMat) +func (p *TQueryOptions) field51Length() int { + l := 0 + if p.IsSetEnableNewShuffleHashMethod() { + l += bthrift.Binary.FieldBeginLength("enable_new_shuffle_hash_method", thrift.BOOL, 51) + l += bthrift.Binary.BoolLength(*p.EnableNewShuffleHashMethod) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField73(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetScanQueueMemLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_queue_mem_limit", thrift.I64, 73) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ScanQueueMemLimit) +func (p *TQueryOptions) field52Length() int { + l := 0 + if p.IsSetBeExecVersion() { + l += bthrift.Binary.FieldBeginLength("be_exec_version", thrift.I32, 52) + l += bthrift.Binary.I32Length(p.BeExecVersion) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField74(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableScanNodeRunSerial() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_scan_node_run_serial", thrift.BOOL, 74) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableScanNodeRunSerial) +func (p *TQueryOptions) field53Length() int { + l := 0 + if p.IsSetPartitionedHashJoinRowsThreshold() { + l += bthrift.Binary.FieldBeginLength("partitioned_hash_join_rows_threshold", thrift.I32, 53) + l += bthrift.Binary.I32Length(p.PartitionedHashJoinRowsThreshold) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField75(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableInsertStrict() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_insert_strict", thrift.BOOL, 75) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableInsertStrict) +func (p *TQueryOptions) field54Length() int { + l := 0 + if p.IsSetEnableShareHashTableForBroadcastJoin() { + l += bthrift.Binary.FieldBeginLength("enable_share_hash_table_for_broadcast_join", thrift.BOOL, 54) + l += bthrift.Binary.BoolLength(*p.EnableShareHashTableForBroadcastJoin) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField76(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableInvertedIndexQuery() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_inverted_index_query", thrift.BOOL, 76) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableInvertedIndexQuery) +func (p *TQueryOptions) field55Length() int { + l := 0 + if p.IsSetCheckOverflowForDecimal() { + l += bthrift.Binary.FieldBeginLength("check_overflow_for_decimal", thrift.BOOL, 55) + l += bthrift.Binary.BoolLength(p.CheckOverflowForDecimal) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField77(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTruncateCharOrVarcharColumns() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "truncate_char_or_varchar_columns", thrift.BOOL, 77) - offset += bthrift.Binary.WriteBool(buf[offset:], p.TruncateCharOrVarcharColumns) +func (p *TQueryOptions) field56Length() int { + l := 0 + if p.IsSetSkipDeleteBitmap() { + l += bthrift.Binary.FieldBeginLength("skip_delete_bitmap", thrift.BOOL, 56) + l += bthrift.Binary.BoolLength(p.SkipDeleteBitmap) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField78(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableHashJoinEarlyStartProbe() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_hash_join_early_start_probe", thrift.BOOL, 78) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableHashJoinEarlyStartProbe) +func (p *TQueryOptions) field57Length() int { + l := 0 + if p.IsSetEnablePipelineEngine() { + l += bthrift.Binary.FieldBeginLength("enable_pipeline_engine", thrift.BOOL, 57) + l += bthrift.Binary.BoolLength(p.EnablePipelineEngine) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField79(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnablePipelineXEngine() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_pipeline_x_engine", thrift.BOOL, 79) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePipelineXEngine) +func (p *TQueryOptions) field58Length() int { + l := 0 + if p.IsSetRepeatMaxNum() { + l += bthrift.Binary.FieldBeginLength("repeat_max_num", thrift.I32, 58) + l += bthrift.Binary.I32Length(p.RepeatMaxNum) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField80(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableMemtableOnSinkNode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_memtable_on_sink_node", thrift.BOOL, 80) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableMemtableOnSinkNode) +func (p *TQueryOptions) field59Length() int { + l := 0 + if p.IsSetExternalSortBytesThreshold() { + l += bthrift.Binary.FieldBeginLength("external_sort_bytes_threshold", thrift.I64, 59) + l += bthrift.Binary.I64Length(p.ExternalSortBytesThreshold) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField81(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableDeleteSubPredicateV2() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_delete_sub_predicate_v2", thrift.BOOL, 81) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableDeleteSubPredicateV2) +func (p *TQueryOptions) field60Length() int { + l := 0 + if p.IsSetPartitionedHashAggRowsThreshold() { + l += bthrift.Binary.FieldBeginLength("partitioned_hash_agg_rows_threshold", thrift.I32, 60) + l += bthrift.Binary.I32Length(p.PartitionedHashAggRowsThreshold) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField82(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFeProcessUuid() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_process_uuid", thrift.I64, 82) - offset += bthrift.Binary.WriteI64(buf[offset:], p.FeProcessUuid) +func (p *TQueryOptions) field61Length() int { + l := 0 + if p.IsSetEnableFileCache() { + l += bthrift.Binary.FieldBeginLength("enable_file_cache", thrift.BOOL, 61) + l += bthrift.Binary.BoolLength(p.EnableFileCache) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField83(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetInvertedIndexConjunctionOptThreshold() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_conjunction_opt_threshold", thrift.I32, 83) - offset += bthrift.Binary.WriteI32(buf[offset:], p.InvertedIndexConjunctionOptThreshold) +func (p *TQueryOptions) field62Length() int { + l := 0 + if p.IsSetInsertTimeout() { + l += bthrift.Binary.FieldBeginLength("insert_timeout", thrift.I32, 62) + l += bthrift.Binary.I32Length(p.InsertTimeout) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField84(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnableProfile() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_profile", thrift.BOOL, 84) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableProfile) +func (p *TQueryOptions) field63Length() int { + l := 0 + if p.IsSetExecutionTimeout() { + l += bthrift.Binary.FieldBeginLength("execution_timeout", thrift.I32, 63) + l += bthrift.Binary.I32Length(p.ExecutionTimeout) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField85(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetEnablePageCache() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_page_cache", thrift.BOOL, 85) - offset += bthrift.Binary.WriteBool(buf[offset:], p.EnablePageCache) +func (p *TQueryOptions) field64Length() int { + l := 0 + if p.IsSetDryRunQuery() { + l += bthrift.Binary.FieldBeginLength("dry_run_query", thrift.BOOL, 64) + l += bthrift.Binary.BoolLength(p.DryRunQuery) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) fastWriteField86(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetAnalyzeTimeout() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "analyze_timeout", thrift.I32, 86) - offset += bthrift.Binary.WriteI32(buf[offset:], p.AnalyzeTimeout) +func (p *TQueryOptions) field65Length() int { + l := 0 + if p.IsSetEnableCommonExprPushdown() { + l += bthrift.Binary.FieldBeginLength("enable_common_expr_pushdown", thrift.BOOL, 65) + l += bthrift.Binary.BoolLength(p.EnableCommonExprPushdown) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TQueryOptions) field1Length() int { +func (p *TQueryOptions) field66Length() int { l := 0 - if p.IsSetAbortOnError() { - l += bthrift.Binary.FieldBeginLength("abort_on_error", thrift.BOOL, 1) - l += bthrift.Binary.BoolLength(p.AbortOnError) + if p.IsSetParallelInstance() { + l += bthrift.Binary.FieldBeginLength("parallel_instance", thrift.I32, 66) + l += bthrift.Binary.I32Length(p.ParallelInstance) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field2Length() int { +func (p *TQueryOptions) field67Length() int { l := 0 - if p.IsSetMaxErrors() { - l += bthrift.Binary.FieldBeginLength("max_errors", thrift.I32, 2) - l += bthrift.Binary.I32Length(p.MaxErrors) + if p.IsSetMysqlRowBinaryFormat() { + l += bthrift.Binary.FieldBeginLength("mysql_row_binary_format", thrift.BOOL, 67) + l += bthrift.Binary.BoolLength(p.MysqlRowBinaryFormat) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field3Length() int { +func (p *TQueryOptions) field68Length() int { l := 0 - if p.IsSetDisableCodegen() { - l += bthrift.Binary.FieldBeginLength("disable_codegen", thrift.BOOL, 3) - l += bthrift.Binary.BoolLength(p.DisableCodegen) + if p.IsSetExternalAggBytesThreshold() { + l += bthrift.Binary.FieldBeginLength("external_agg_bytes_threshold", thrift.I64, 68) + l += bthrift.Binary.I64Length(p.ExternalAggBytesThreshold) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field4Length() int { +func (p *TQueryOptions) field69Length() int { l := 0 - if p.IsSetBatchSize() { - l += bthrift.Binary.FieldBeginLength("batch_size", thrift.I32, 4) - l += bthrift.Binary.I32Length(p.BatchSize) + if p.IsSetExternalAggPartitionBits() { + l += bthrift.Binary.FieldBeginLength("external_agg_partition_bits", thrift.I32, 69) + l += bthrift.Binary.I32Length(p.ExternalAggPartitionBits) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field5Length() int { +func (p *TQueryOptions) field70Length() int { l := 0 - if p.IsSetNumNodes() { - l += bthrift.Binary.FieldBeginLength("num_nodes", thrift.I32, 5) - l += bthrift.Binary.I32Length(p.NumNodes) + if p.IsSetFileCacheBasePath() { + l += bthrift.Binary.FieldBeginLength("file_cache_base_path", thrift.STRING, 70) + l += bthrift.Binary.StringLengthNocopy(*p.FileCacheBasePath) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field6Length() int { +func (p *TQueryOptions) field71Length() int { l := 0 - if p.IsSetMaxScanRangeLength() { - l += bthrift.Binary.FieldBeginLength("max_scan_range_length", thrift.I64, 6) - l += bthrift.Binary.I64Length(p.MaxScanRangeLength) + if p.IsSetEnableParquetLazyMat() { + l += bthrift.Binary.FieldBeginLength("enable_parquet_lazy_mat", thrift.BOOL, 71) + l += bthrift.Binary.BoolLength(p.EnableParquetLazyMat) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field7Length() int { +func (p *TQueryOptions) field72Length() int { l := 0 - if p.IsSetNumScannerThreads() { - l += bthrift.Binary.FieldBeginLength("num_scanner_threads", thrift.I32, 7) - l += bthrift.Binary.I32Length(p.NumScannerThreads) + if p.IsSetEnableOrcLazyMat() { + l += bthrift.Binary.FieldBeginLength("enable_orc_lazy_mat", thrift.BOOL, 72) + l += bthrift.Binary.BoolLength(p.EnableOrcLazyMat) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field8Length() int { +func (p *TQueryOptions) field73Length() int { l := 0 - if p.IsSetMaxIoBuffers() { - l += bthrift.Binary.FieldBeginLength("max_io_buffers", thrift.I32, 8) - l += bthrift.Binary.I32Length(p.MaxIoBuffers) + if p.IsSetScanQueueMemLimit() { + l += bthrift.Binary.FieldBeginLength("scan_queue_mem_limit", thrift.I64, 73) + l += bthrift.Binary.I64Length(*p.ScanQueueMemLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field9Length() int { +func (p *TQueryOptions) field74Length() int { l := 0 - if p.IsSetAllowUnsupportedFormats() { - l += bthrift.Binary.FieldBeginLength("allow_unsupported_formats", thrift.BOOL, 9) - l += bthrift.Binary.BoolLength(p.AllowUnsupportedFormats) + if p.IsSetEnableScanNodeRunSerial() { + l += bthrift.Binary.FieldBeginLength("enable_scan_node_run_serial", thrift.BOOL, 74) + l += bthrift.Binary.BoolLength(p.EnableScanNodeRunSerial) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field10Length() int { +func (p *TQueryOptions) field75Length() int { l := 0 - if p.IsSetDefaultOrderByLimit() { - l += bthrift.Binary.FieldBeginLength("default_order_by_limit", thrift.I64, 10) - l += bthrift.Binary.I64Length(p.DefaultOrderByLimit) + if p.IsSetEnableInsertStrict() { + l += bthrift.Binary.FieldBeginLength("enable_insert_strict", thrift.BOOL, 75) + l += bthrift.Binary.BoolLength(p.EnableInsertStrict) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field12Length() int { +func (p *TQueryOptions) field76Length() int { l := 0 - if p.IsSetMemLimit() { - l += bthrift.Binary.FieldBeginLength("mem_limit", thrift.I64, 12) - l += bthrift.Binary.I64Length(p.MemLimit) + if p.IsSetEnableInvertedIndexQuery() { + l += bthrift.Binary.FieldBeginLength("enable_inverted_index_query", thrift.BOOL, 76) + l += bthrift.Binary.BoolLength(p.EnableInvertedIndexQuery) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field13Length() int { +func (p *TQueryOptions) field77Length() int { l := 0 - if p.IsSetAbortOnDefaultLimitExceeded() { - l += bthrift.Binary.FieldBeginLength("abort_on_default_limit_exceeded", thrift.BOOL, 13) - l += bthrift.Binary.BoolLength(p.AbortOnDefaultLimitExceeded) + if p.IsSetTruncateCharOrVarcharColumns() { + l += bthrift.Binary.FieldBeginLength("truncate_char_or_varchar_columns", thrift.BOOL, 77) + l += bthrift.Binary.BoolLength(p.TruncateCharOrVarcharColumns) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field14Length() int { +func (p *TQueryOptions) field78Length() int { l := 0 - if p.IsSetQueryTimeout() { - l += bthrift.Binary.FieldBeginLength("query_timeout", thrift.I32, 14) - l += bthrift.Binary.I32Length(p.QueryTimeout) + if p.IsSetEnableHashJoinEarlyStartProbe() { + l += bthrift.Binary.FieldBeginLength("enable_hash_join_early_start_probe", thrift.BOOL, 78) + l += bthrift.Binary.BoolLength(p.EnableHashJoinEarlyStartProbe) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field15Length() int { +func (p *TQueryOptions) field79Length() int { l := 0 - if p.IsSetIsReportSuccess() { - l += bthrift.Binary.FieldBeginLength("is_report_success", thrift.BOOL, 15) - l += bthrift.Binary.BoolLength(p.IsReportSuccess) + if p.IsSetEnablePipelineXEngine() { + l += bthrift.Binary.FieldBeginLength("enable_pipeline_x_engine", thrift.BOOL, 79) + l += bthrift.Binary.BoolLength(p.EnablePipelineXEngine) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field16Length() int { +func (p *TQueryOptions) field80Length() int { l := 0 - if p.IsSetCodegenLevel() { - l += bthrift.Binary.FieldBeginLength("codegen_level", thrift.I32, 16) - l += bthrift.Binary.I32Length(p.CodegenLevel) + if p.IsSetEnableMemtableOnSinkNode() { + l += bthrift.Binary.FieldBeginLength("enable_memtable_on_sink_node", thrift.BOOL, 80) + l += bthrift.Binary.BoolLength(p.EnableMemtableOnSinkNode) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field17Length() int { +func (p *TQueryOptions) field81Length() int { l := 0 - if p.IsSetKuduLatestObservedTs() { - l += bthrift.Binary.FieldBeginLength("kudu_latest_observed_ts", thrift.I64, 17) - l += bthrift.Binary.I64Length(p.KuduLatestObservedTs) + if p.IsSetEnableDeleteSubPredicateV2() { + l += bthrift.Binary.FieldBeginLength("enable_delete_sub_predicate_v2", thrift.BOOL, 81) + l += bthrift.Binary.BoolLength(p.EnableDeleteSubPredicateV2) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field18Length() int { +func (p *TQueryOptions) field82Length() int { l := 0 - if p.IsSetQueryType() { - l += bthrift.Binary.FieldBeginLength("query_type", thrift.I32, 18) - l += bthrift.Binary.I32Length(int32(p.QueryType)) + if p.IsSetFeProcessUuid() { + l += bthrift.Binary.FieldBeginLength("fe_process_uuid", thrift.I64, 82) + l += bthrift.Binary.I64Length(p.FeProcessUuid) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field19Length() int { +func (p *TQueryOptions) field83Length() int { l := 0 - if p.IsSetMinReservation() { - l += bthrift.Binary.FieldBeginLength("min_reservation", thrift.I64, 19) - l += bthrift.Binary.I64Length(p.MinReservation) + if p.IsSetInvertedIndexConjunctionOptThreshold() { + l += bthrift.Binary.FieldBeginLength("inverted_index_conjunction_opt_threshold", thrift.I32, 83) + l += bthrift.Binary.I32Length(p.InvertedIndexConjunctionOptThreshold) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field20Length() int { +func (p *TQueryOptions) field84Length() int { l := 0 - if p.IsSetMaxReservation() { - l += bthrift.Binary.FieldBeginLength("max_reservation", thrift.I64, 20) - l += bthrift.Binary.I64Length(p.MaxReservation) + if p.IsSetEnableProfile() { + l += bthrift.Binary.FieldBeginLength("enable_profile", thrift.BOOL, 84) + l += bthrift.Binary.BoolLength(p.EnableProfile) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field21Length() int { +func (p *TQueryOptions) field85Length() int { l := 0 - if p.IsSetInitialReservationTotalClaims() { - l += bthrift.Binary.FieldBeginLength("initial_reservation_total_claims", thrift.I64, 21) - l += bthrift.Binary.I64Length(p.InitialReservationTotalClaims) + if p.IsSetEnablePageCache() { + l += bthrift.Binary.FieldBeginLength("enable_page_cache", thrift.BOOL, 85) + l += bthrift.Binary.BoolLength(p.EnablePageCache) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field22Length() int { +func (p *TQueryOptions) field86Length() int { l := 0 - if p.IsSetBufferPoolLimit() { - l += bthrift.Binary.FieldBeginLength("buffer_pool_limit", thrift.I64, 22) - l += bthrift.Binary.I64Length(p.BufferPoolLimit) + if p.IsSetAnalyzeTimeout() { + l += bthrift.Binary.FieldBeginLength("analyze_timeout", thrift.I32, 86) + l += bthrift.Binary.I32Length(p.AnalyzeTimeout) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field23Length() int { +func (p *TQueryOptions) field87Length() int { l := 0 - if p.IsSetDefaultSpillableBufferSize() { - l += bthrift.Binary.FieldBeginLength("default_spillable_buffer_size", thrift.I64, 23) - l += bthrift.Binary.I64Length(p.DefaultSpillableBufferSize) + if p.IsSetFasterFloatConvert() { + l += bthrift.Binary.FieldBeginLength("faster_float_convert", thrift.BOOL, 87) + l += bthrift.Binary.BoolLength(p.FasterFloatConvert) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field24Length() int { +func (p *TQueryOptions) field88Length() int { l := 0 - if p.IsSetMinSpillableBufferSize() { - l += bthrift.Binary.FieldBeginLength("min_spillable_buffer_size", thrift.I64, 24) - l += bthrift.Binary.I64Length(p.MinSpillableBufferSize) + if p.IsSetEnableDecimal256() { + l += bthrift.Binary.FieldBeginLength("enable_decimal256", thrift.BOOL, 88) + l += bthrift.Binary.BoolLength(p.EnableDecimal256) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field25Length() int { +func (p *TQueryOptions) field89Length() int { l := 0 - if p.IsSetMaxRowSize() { - l += bthrift.Binary.FieldBeginLength("max_row_size", thrift.I64, 25) - l += bthrift.Binary.I64Length(p.MaxRowSize) + if p.IsSetEnableLocalShuffle() { + l += bthrift.Binary.FieldBeginLength("enable_local_shuffle", thrift.BOOL, 89) + l += bthrift.Binary.BoolLength(p.EnableLocalShuffle) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field26Length() int { +func (p *TQueryOptions) field90Length() int { l := 0 - if p.IsSetDisableStreamPreaggregations() { - l += bthrift.Binary.FieldBeginLength("disable_stream_preaggregations", thrift.BOOL, 26) - l += bthrift.Binary.BoolLength(p.DisableStreamPreaggregations) + if p.IsSetSkipMissingVersion() { + l += bthrift.Binary.FieldBeginLength("skip_missing_version", thrift.BOOL, 90) + l += bthrift.Binary.BoolLength(p.SkipMissingVersion) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field27Length() int { +func (p *TQueryOptions) field91Length() int { l := 0 - if p.IsSetMtDop() { - l += bthrift.Binary.FieldBeginLength("mt_dop", thrift.I32, 27) - l += bthrift.Binary.I32Length(p.MtDop) + if p.IsSetRuntimeFilterWaitInfinitely() { + l += bthrift.Binary.FieldBeginLength("runtime_filter_wait_infinitely", thrift.BOOL, 91) + l += bthrift.Binary.BoolLength(p.RuntimeFilterWaitInfinitely) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field28Length() int { +func (p *TQueryOptions) field92Length() int { l := 0 - if p.IsSetLoadMemLimit() { - l += bthrift.Binary.FieldBeginLength("load_mem_limit", thrift.I64, 28) - l += bthrift.Binary.I64Length(p.LoadMemLimit) + if p.IsSetWaitFullBlockScheduleTimes() { + l += bthrift.Binary.FieldBeginLength("wait_full_block_schedule_times", thrift.I32, 92) + l += bthrift.Binary.I32Length(p.WaitFullBlockScheduleTimes) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field29Length() int { +func (p *TQueryOptions) field93Length() int { l := 0 - if p.IsSetMaxScanKeyNum() { - l += bthrift.Binary.FieldBeginLength("max_scan_key_num", thrift.I32, 29) - l += bthrift.Binary.I32Length(*p.MaxScanKeyNum) + if p.IsSetInvertedIndexMaxExpansions() { + l += bthrift.Binary.FieldBeginLength("inverted_index_max_expansions", thrift.I32, 93) + l += bthrift.Binary.I32Length(p.InvertedIndexMaxExpansions) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field30Length() int { +func (p *TQueryOptions) field94Length() int { l := 0 - if p.IsSetMaxPushdownConditionsPerColumn() { - l += bthrift.Binary.FieldBeginLength("max_pushdown_conditions_per_column", thrift.I32, 30) - l += bthrift.Binary.I32Length(*p.MaxPushdownConditionsPerColumn) + if p.IsSetInvertedIndexSkipThreshold() { + l += bthrift.Binary.FieldBeginLength("inverted_index_skip_threshold", thrift.I32, 94) + l += bthrift.Binary.I32Length(p.InvertedIndexSkipThreshold) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field31Length() int { +func (p *TQueryOptions) field95Length() int { l := 0 - if p.IsSetEnableSpilling() { - l += bthrift.Binary.FieldBeginLength("enable_spilling", thrift.BOOL, 31) - l += bthrift.Binary.BoolLength(p.EnableSpilling) + if p.IsSetEnableParallelScan() { + l += bthrift.Binary.FieldBeginLength("enable_parallel_scan", thrift.BOOL, 95) + l += bthrift.Binary.BoolLength(p.EnableParallelScan) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field32Length() int { +func (p *TQueryOptions) field96Length() int { l := 0 - if p.IsSetEnableEnableExchangeNodeParallelMerge() { - l += bthrift.Binary.FieldBeginLength("enable_enable_exchange_node_parallel_merge", thrift.BOOL, 32) - l += bthrift.Binary.BoolLength(p.EnableEnableExchangeNodeParallelMerge) + if p.IsSetParallelScanMaxScannersCount() { + l += bthrift.Binary.FieldBeginLength("parallel_scan_max_scanners_count", thrift.I32, 96) + l += bthrift.Binary.I32Length(p.ParallelScanMaxScannersCount) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field33Length() int { +func (p *TQueryOptions) field97Length() int { l := 0 - if p.IsSetRuntimeFilterWaitTimeMs() { - l += bthrift.Binary.FieldBeginLength("runtime_filter_wait_time_ms", thrift.I32, 33) - l += bthrift.Binary.I32Length(p.RuntimeFilterWaitTimeMs) + if p.IsSetParallelScanMinRowsPerScanner() { + l += bthrift.Binary.FieldBeginLength("parallel_scan_min_rows_per_scanner", thrift.I64, 97) + l += bthrift.Binary.I64Length(p.ParallelScanMinRowsPerScanner) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field34Length() int { +func (p *TQueryOptions) field98Length() int { l := 0 - if p.IsSetRuntimeFilterMaxInNum() { - l += bthrift.Binary.FieldBeginLength("runtime_filter_max_in_num", thrift.I32, 34) - l += bthrift.Binary.I32Length(p.RuntimeFilterMaxInNum) + if p.IsSetSkipBadTablet() { + l += bthrift.Binary.FieldBeginLength("skip_bad_tablet", thrift.BOOL, 98) + l += bthrift.Binary.BoolLength(p.SkipBadTablet) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field42Length() int { +func (p *TQueryOptions) field99Length() int { l := 0 - if p.IsSetResourceLimit() { - l += bthrift.Binary.FieldBeginLength("resource_limit", thrift.STRUCT, 42) - l += p.ResourceLimit.BLength() + if p.IsSetScannerScaleUpRatio() { + l += bthrift.Binary.FieldBeginLength("scanner_scale_up_ratio", thrift.DOUBLE, 99) + l += bthrift.Binary.DoubleLength(p.ScannerScaleUpRatio) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field43Length() int { +func (p *TQueryOptions) field100Length() int { l := 0 - if p.IsSetReturnObjectDataAsBinary() { - l += bthrift.Binary.FieldBeginLength("return_object_data_as_binary", thrift.BOOL, 43) - l += bthrift.Binary.BoolLength(p.ReturnObjectDataAsBinary) + if p.IsSetEnableDistinctStreamingAggregation() { + l += bthrift.Binary.FieldBeginLength("enable_distinct_streaming_aggregation", thrift.BOOL, 100) + l += bthrift.Binary.BoolLength(p.EnableDistinctStreamingAggregation) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field44Length() int { +func (p *TQueryOptions) field101Length() int { l := 0 - if p.IsSetTrimTailingSpacesForExternalTableQuery() { - l += bthrift.Binary.FieldBeginLength("trim_tailing_spaces_for_external_table_query", thrift.BOOL, 44) - l += bthrift.Binary.BoolLength(p.TrimTailingSpacesForExternalTableQuery) + if p.IsSetEnableJoinSpill() { + l += bthrift.Binary.FieldBeginLength("enable_join_spill", thrift.BOOL, 101) + l += bthrift.Binary.BoolLength(p.EnableJoinSpill) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field45Length() int { +func (p *TQueryOptions) field102Length() int { l := 0 - if p.IsSetEnableFunctionPushdown() { - l += bthrift.Binary.FieldBeginLength("enable_function_pushdown", thrift.BOOL, 45) - l += bthrift.Binary.BoolLength(*p.EnableFunctionPushdown) + if p.IsSetEnableSortSpill() { + l += bthrift.Binary.FieldBeginLength("enable_sort_spill", thrift.BOOL, 102) + l += bthrift.Binary.BoolLength(p.EnableSortSpill) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field46Length() int { +func (p *TQueryOptions) field103Length() int { l := 0 - if p.IsSetFragmentTransmissionCompressionCodec() { - l += bthrift.Binary.FieldBeginLength("fragment_transmission_compression_codec", thrift.STRING, 46) - l += bthrift.Binary.StringLengthNocopy(*p.FragmentTransmissionCompressionCodec) + if p.IsSetEnableAggSpill() { + l += bthrift.Binary.FieldBeginLength("enable_agg_spill", thrift.BOOL, 103) + l += bthrift.Binary.BoolLength(p.EnableAggSpill) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field48Length() int { +func (p *TQueryOptions) field104Length() int { l := 0 - if p.IsSetEnableLocalExchange() { - l += bthrift.Binary.FieldBeginLength("enable_local_exchange", thrift.BOOL, 48) - l += bthrift.Binary.BoolLength(*p.EnableLocalExchange) + if p.IsSetMinRevocableMem() { + l += bthrift.Binary.FieldBeginLength("min_revocable_mem", thrift.I64, 104) + l += bthrift.Binary.I64Length(p.MinRevocableMem) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field49Length() int { +func (p *TQueryOptions) field105Length() int { l := 0 - if p.IsSetSkipStorageEngineMerge() { - l += bthrift.Binary.FieldBeginLength("skip_storage_engine_merge", thrift.BOOL, 49) - l += bthrift.Binary.BoolLength(p.SkipStorageEngineMerge) + if p.IsSetSpillStreamingAggMemLimit() { + l += bthrift.Binary.FieldBeginLength("spill_streaming_agg_mem_limit", thrift.I64, 105) + l += bthrift.Binary.I64Length(p.SpillStreamingAggMemLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field50Length() int { +func (p *TQueryOptions) field106Length() int { l := 0 - if p.IsSetSkipDeletePredicate() { - l += bthrift.Binary.FieldBeginLength("skip_delete_predicate", thrift.BOOL, 50) - l += bthrift.Binary.BoolLength(p.SkipDeletePredicate) + if p.IsSetDataQueueMaxBlocks() { + l += bthrift.Binary.FieldBeginLength("data_queue_max_blocks", thrift.I64, 106) + l += bthrift.Binary.I64Length(p.DataQueueMaxBlocks) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field51Length() int { +func (p *TQueryOptions) field107Length() int { l := 0 - if p.IsSetEnableNewShuffleHashMethod() { - l += bthrift.Binary.FieldBeginLength("enable_new_shuffle_hash_method", thrift.BOOL, 51) - l += bthrift.Binary.BoolLength(*p.EnableNewShuffleHashMethod) + if p.IsSetEnableCommonExprPushdownForInvertedIndex() { + l += bthrift.Binary.FieldBeginLength("enable_common_expr_pushdown_for_inverted_index", thrift.BOOL, 107) + l += bthrift.Binary.BoolLength(p.EnableCommonExprPushdownForInvertedIndex) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field52Length() int { +func (p *TQueryOptions) field108Length() int { l := 0 - if p.IsSetBeExecVersion() { - l += bthrift.Binary.FieldBeginLength("be_exec_version", thrift.I32, 52) - l += bthrift.Binary.I32Length(p.BeExecVersion) + if p.IsSetLocalExchangeFreeBlocksLimit() { + l += bthrift.Binary.FieldBeginLength("local_exchange_free_blocks_limit", thrift.I64, 108) + l += bthrift.Binary.I64Length(*p.LocalExchangeFreeBlocksLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field53Length() int { +func (p *TQueryOptions) field109Length() int { l := 0 - if p.IsSetPartitionedHashJoinRowsThreshold() { - l += bthrift.Binary.FieldBeginLength("partitioned_hash_join_rows_threshold", thrift.I32, 53) - l += bthrift.Binary.I32Length(p.PartitionedHashJoinRowsThreshold) + if p.IsSetEnableForceSpill() { + l += bthrift.Binary.FieldBeginLength("enable_force_spill", thrift.BOOL, 109) + l += bthrift.Binary.BoolLength(p.EnableForceSpill) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field54Length() int { +func (p *TQueryOptions) field110Length() int { l := 0 - if p.IsSetEnableShareHashTableForBroadcastJoin() { - l += bthrift.Binary.FieldBeginLength("enable_share_hash_table_for_broadcast_join", thrift.BOOL, 54) - l += bthrift.Binary.BoolLength(*p.EnableShareHashTableForBroadcastJoin) + if p.IsSetEnableParquetFilterByMinMax() { + l += bthrift.Binary.FieldBeginLength("enable_parquet_filter_by_min_max", thrift.BOOL, 110) + l += bthrift.Binary.BoolLength(p.EnableParquetFilterByMinMax) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field55Length() int { +func (p *TQueryOptions) field111Length() int { l := 0 - if p.IsSetCheckOverflowForDecimal() { - l += bthrift.Binary.FieldBeginLength("check_overflow_for_decimal", thrift.BOOL, 55) - l += bthrift.Binary.BoolLength(p.CheckOverflowForDecimal) + if p.IsSetEnableOrcFilterByMinMax() { + l += bthrift.Binary.FieldBeginLength("enable_orc_filter_by_min_max", thrift.BOOL, 111) + l += bthrift.Binary.BoolLength(p.EnableOrcFilterByMinMax) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field56Length() int { +func (p *TQueryOptions) field112Length() int { l := 0 - if p.IsSetSkipDeleteBitmap() { - l += bthrift.Binary.FieldBeginLength("skip_delete_bitmap", thrift.BOOL, 56) - l += bthrift.Binary.BoolLength(p.SkipDeleteBitmap) + if p.IsSetMaxColumnReaderNum() { + l += bthrift.Binary.FieldBeginLength("max_column_reader_num", thrift.I32, 112) + l += bthrift.Binary.I32Length(p.MaxColumnReaderNum) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field57Length() int { +func (p *TQueryOptions) field113Length() int { l := 0 - if p.IsSetEnablePipelineEngine() { - l += bthrift.Binary.FieldBeginLength("enable_pipeline_engine", thrift.BOOL, 57) - l += bthrift.Binary.BoolLength(p.EnablePipelineEngine) + if p.IsSetEnableLocalMergeSort() { + l += bthrift.Binary.FieldBeginLength("enable_local_merge_sort", thrift.BOOL, 113) + l += bthrift.Binary.BoolLength(p.EnableLocalMergeSort) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field58Length() int { +func (p *TQueryOptions) field114Length() int { l := 0 - if p.IsSetRepeatMaxNum() { - l += bthrift.Binary.FieldBeginLength("repeat_max_num", thrift.I32, 58) - l += bthrift.Binary.I32Length(p.RepeatMaxNum) + if p.IsSetEnableParallelResultSink() { + l += bthrift.Binary.FieldBeginLength("enable_parallel_result_sink", thrift.BOOL, 114) + l += bthrift.Binary.BoolLength(p.EnableParallelResultSink) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field59Length() int { +func (p *TQueryOptions) field115Length() int { l := 0 - if p.IsSetExternalSortBytesThreshold() { - l += bthrift.Binary.FieldBeginLength("external_sort_bytes_threshold", thrift.I64, 59) - l += bthrift.Binary.I64Length(p.ExternalSortBytesThreshold) + if p.IsSetEnableShortCircuitQueryAccessColumnStore() { + l += bthrift.Binary.FieldBeginLength("enable_short_circuit_query_access_column_store", thrift.BOOL, 115) + l += bthrift.Binary.BoolLength(p.EnableShortCircuitQueryAccessColumnStore) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field60Length() int { +func (p *TQueryOptions) field116Length() int { l := 0 - if p.IsSetPartitionedHashAggRowsThreshold() { - l += bthrift.Binary.FieldBeginLength("partitioned_hash_agg_rows_threshold", thrift.I32, 60) - l += bthrift.Binary.I32Length(p.PartitionedHashAggRowsThreshold) + if p.IsSetEnableNoNeedReadDataOpt() { + l += bthrift.Binary.FieldBeginLength("enable_no_need_read_data_opt", thrift.BOOL, 116) + l += bthrift.Binary.BoolLength(p.EnableNoNeedReadDataOpt) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field61Length() int { +func (p *TQueryOptions) field117Length() int { l := 0 - if p.IsSetEnableFileCache() { - l += bthrift.Binary.FieldBeginLength("enable_file_cache", thrift.BOOL, 61) - l += bthrift.Binary.BoolLength(p.EnableFileCache) + if p.IsSetReadCsvEmptyLineAsNull() { + l += bthrift.Binary.FieldBeginLength("read_csv_empty_line_as_null", thrift.BOOL, 117) + l += bthrift.Binary.BoolLength(p.ReadCsvEmptyLineAsNull) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field62Length() int { +func (p *TQueryOptions) field118Length() int { l := 0 - if p.IsSetInsertTimeout() { - l += bthrift.Binary.FieldBeginLength("insert_timeout", thrift.I32, 62) - l += bthrift.Binary.I32Length(p.InsertTimeout) + if p.IsSetSerdeDialect() { + l += bthrift.Binary.FieldBeginLength("serde_dialect", thrift.I32, 118) + l += bthrift.Binary.I32Length(int32(p.SerdeDialect)) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field63Length() int { +func (p *TQueryOptions) field119Length() int { l := 0 - if p.IsSetExecutionTimeout() { - l += bthrift.Binary.FieldBeginLength("execution_timeout", thrift.I32, 63) - l += bthrift.Binary.I32Length(p.ExecutionTimeout) + if p.IsSetEnableMatchWithoutInvertedIndex() { + l += bthrift.Binary.FieldBeginLength("enable_match_without_inverted_index", thrift.BOOL, 119) + l += bthrift.Binary.BoolLength(p.EnableMatchWithoutInvertedIndex) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field64Length() int { +func (p *TQueryOptions) field120Length() int { l := 0 - if p.IsSetDryRunQuery() { - l += bthrift.Binary.FieldBeginLength("dry_run_query", thrift.BOOL, 64) - l += bthrift.Binary.BoolLength(p.DryRunQuery) + if p.IsSetEnableFallbackOnMissingInvertedIndex() { + l += bthrift.Binary.FieldBeginLength("enable_fallback_on_missing_inverted_index", thrift.BOOL, 120) + l += bthrift.Binary.BoolLength(p.EnableFallbackOnMissingInvertedIndex) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field65Length() int { +func (p *TQueryOptions) field121Length() int { l := 0 - if p.IsSetEnableCommonExprPushdown() { - l += bthrift.Binary.FieldBeginLength("enable_common_expr_pushdown", thrift.BOOL, 65) - l += bthrift.Binary.BoolLength(p.EnableCommonExprPushdown) + if p.IsSetKeepCarriageReturn() { + l += bthrift.Binary.FieldBeginLength("keep_carriage_return", thrift.BOOL, 121) + l += bthrift.Binary.BoolLength(p.KeepCarriageReturn) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field66Length() int { +func (p *TQueryOptions) field122Length() int { l := 0 - if p.IsSetParallelInstance() { - l += bthrift.Binary.FieldBeginLength("parallel_instance", thrift.I32, 66) - l += bthrift.Binary.I32Length(p.ParallelInstance) + if p.IsSetRuntimeBloomFilterMinSize() { + l += bthrift.Binary.FieldBeginLength("runtime_bloom_filter_min_size", thrift.I32, 122) + l += bthrift.Binary.I32Length(p.RuntimeBloomFilterMinSize) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field67Length() int { +func (p *TQueryOptions) field123Length() int { l := 0 - if p.IsSetMysqlRowBinaryFormat() { - l += bthrift.Binary.FieldBeginLength("mysql_row_binary_format", thrift.BOOL, 67) - l += bthrift.Binary.BoolLength(p.MysqlRowBinaryFormat) + if p.IsSetHiveParquetUseColumnNames() { + l += bthrift.Binary.FieldBeginLength("hive_parquet_use_column_names", thrift.BOOL, 123) + l += bthrift.Binary.BoolLength(p.HiveParquetUseColumnNames) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field68Length() int { +func (p *TQueryOptions) field124Length() int { l := 0 - if p.IsSetExternalAggBytesThreshold() { - l += bthrift.Binary.FieldBeginLength("external_agg_bytes_threshold", thrift.I64, 68) - l += bthrift.Binary.I64Length(p.ExternalAggBytesThreshold) + if p.IsSetHiveOrcUseColumnNames() { + l += bthrift.Binary.FieldBeginLength("hive_orc_use_column_names", thrift.BOOL, 124) + l += bthrift.Binary.BoolLength(p.HiveOrcUseColumnNames) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field69Length() int { +func (p *TQueryOptions) field125Length() int { l := 0 - if p.IsSetExternalAggPartitionBits() { - l += bthrift.Binary.FieldBeginLength("external_agg_partition_bits", thrift.I32, 69) - l += bthrift.Binary.I32Length(p.ExternalAggPartitionBits) + if p.IsSetEnableSegmentCache() { + l += bthrift.Binary.FieldBeginLength("enable_segment_cache", thrift.BOOL, 125) + l += bthrift.Binary.BoolLength(p.EnableSegmentCache) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field70Length() int { +func (p *TQueryOptions) field126Length() int { l := 0 - if p.IsSetFileCacheBasePath() { - l += bthrift.Binary.FieldBeginLength("file_cache_base_path", thrift.STRING, 70) - l += bthrift.Binary.StringLengthNocopy(*p.FileCacheBasePath) + if p.IsSetRuntimeBloomFilterMaxSize() { + l += bthrift.Binary.FieldBeginLength("runtime_bloom_filter_max_size", thrift.I32, 126) + l += bthrift.Binary.I32Length(p.RuntimeBloomFilterMaxSize) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field71Length() int { +func (p *TQueryOptions) field127Length() int { l := 0 - if p.IsSetEnableParquetLazyMat() { - l += bthrift.Binary.FieldBeginLength("enable_parquet_lazy_mat", thrift.BOOL, 71) - l += bthrift.Binary.BoolLength(p.EnableParquetLazyMat) + if p.IsSetInListValueCountThreshold() { + l += bthrift.Binary.FieldBeginLength("in_list_value_count_threshold", thrift.I32, 127) + l += bthrift.Binary.I32Length(p.InListValueCountThreshold) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field72Length() int { +func (p *TQueryOptions) field128Length() int { l := 0 - if p.IsSetEnableOrcLazyMat() { - l += bthrift.Binary.FieldBeginLength("enable_orc_lazy_mat", thrift.BOOL, 72) - l += bthrift.Binary.BoolLength(p.EnableOrcLazyMat) + if p.IsSetEnableVerboseProfile() { + l += bthrift.Binary.FieldBeginLength("enable_verbose_profile", thrift.BOOL, 128) + l += bthrift.Binary.BoolLength(p.EnableVerboseProfile) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field73Length() int { +func (p *TQueryOptions) field129Length() int { l := 0 - if p.IsSetScanQueueMemLimit() { - l += bthrift.Binary.FieldBeginLength("scan_queue_mem_limit", thrift.I64, 73) - l += bthrift.Binary.I64Length(*p.ScanQueueMemLimit) + if p.IsSetRpcVerboseProfileMaxInstanceCount() { + l += bthrift.Binary.FieldBeginLength("rpc_verbose_profile_max_instance_count", thrift.I32, 129) + l += bthrift.Binary.I32Length(p.RpcVerboseProfileMaxInstanceCount) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field74Length() int { +func (p *TQueryOptions) field130Length() int { l := 0 - if p.IsSetEnableScanNodeRunSerial() { - l += bthrift.Binary.FieldBeginLength("enable_scan_node_run_serial", thrift.BOOL, 74) - l += bthrift.Binary.BoolLength(p.EnableScanNodeRunSerial) + if p.IsSetEnableAdaptivePipelineTaskSerialReadOnLimit() { + l += bthrift.Binary.FieldBeginLength("enable_adaptive_pipeline_task_serial_read_on_limit", thrift.BOOL, 130) + l += bthrift.Binary.BoolLength(p.EnableAdaptivePipelineTaskSerialReadOnLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field75Length() int { +func (p *TQueryOptions) field131Length() int { l := 0 - if p.IsSetEnableInsertStrict() { - l += bthrift.Binary.FieldBeginLength("enable_insert_strict", thrift.BOOL, 75) - l += bthrift.Binary.BoolLength(p.EnableInsertStrict) + if p.IsSetAdaptivePipelineTaskSerialReadOnLimit() { + l += bthrift.Binary.FieldBeginLength("adaptive_pipeline_task_serial_read_on_limit", thrift.I32, 131) + l += bthrift.Binary.I32Length(p.AdaptivePipelineTaskSerialReadOnLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field76Length() int { +func (p *TQueryOptions) field132Length() int { l := 0 - if p.IsSetEnableInvertedIndexQuery() { - l += bthrift.Binary.FieldBeginLength("enable_inverted_index_query", thrift.BOOL, 76) - l += bthrift.Binary.BoolLength(p.EnableInvertedIndexQuery) + if p.IsSetParallelPrepareThreshold() { + l += bthrift.Binary.FieldBeginLength("parallel_prepare_threshold", thrift.I32, 132) + l += bthrift.Binary.I32Length(p.ParallelPrepareThreshold) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field77Length() int { +func (p *TQueryOptions) field133Length() int { l := 0 - if p.IsSetTruncateCharOrVarcharColumns() { - l += bthrift.Binary.FieldBeginLength("truncate_char_or_varchar_columns", thrift.BOOL, 77) - l += bthrift.Binary.BoolLength(p.TruncateCharOrVarcharColumns) + if p.IsSetPartitionTopnMaxPartitions() { + l += bthrift.Binary.FieldBeginLength("partition_topn_max_partitions", thrift.I32, 133) + l += bthrift.Binary.I32Length(p.PartitionTopnMaxPartitions) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field78Length() int { +func (p *TQueryOptions) field134Length() int { l := 0 - if p.IsSetEnableHashJoinEarlyStartProbe() { - l += bthrift.Binary.FieldBeginLength("enable_hash_join_early_start_probe", thrift.BOOL, 78) - l += bthrift.Binary.BoolLength(p.EnableHashJoinEarlyStartProbe) + if p.IsSetPartitionTopnPrePartitionRows() { + l += bthrift.Binary.FieldBeginLength("partition_topn_pre_partition_rows", thrift.I32, 134) + l += bthrift.Binary.I32Length(p.PartitionTopnPrePartitionRows) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field79Length() int { +func (p *TQueryOptions) field135Length() int { l := 0 - if p.IsSetEnablePipelineXEngine() { - l += bthrift.Binary.FieldBeginLength("enable_pipeline_x_engine", thrift.BOOL, 79) - l += bthrift.Binary.BoolLength(p.EnablePipelineXEngine) + if p.IsSetEnableParallelOutfile() { + l += bthrift.Binary.FieldBeginLength("enable_parallel_outfile", thrift.BOOL, 135) + l += bthrift.Binary.BoolLength(p.EnableParallelOutfile) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field80Length() int { +func (p *TQueryOptions) field136Length() int { l := 0 - if p.IsSetEnableMemtableOnSinkNode() { - l += bthrift.Binary.FieldBeginLength("enable_memtable_on_sink_node", thrift.BOOL, 80) - l += bthrift.Binary.BoolLength(p.EnableMemtableOnSinkNode) + if p.IsSetEnablePhraseQuerySequentialOpt() { + l += bthrift.Binary.FieldBeginLength("enable_phrase_query_sequential_opt", thrift.BOOL, 136) + l += bthrift.Binary.BoolLength(p.EnablePhraseQuerySequentialOpt) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field81Length() int { +func (p *TQueryOptions) field137Length() int { l := 0 - if p.IsSetEnableDeleteSubPredicateV2() { - l += bthrift.Binary.FieldBeginLength("enable_delete_sub_predicate_v2", thrift.BOOL, 81) - l += bthrift.Binary.BoolLength(p.EnableDeleteSubPredicateV2) + if p.IsSetEnableAutoCreateWhenOverwrite() { + l += bthrift.Binary.FieldBeginLength("enable_auto_create_when_overwrite", thrift.BOOL, 137) + l += bthrift.Binary.BoolLength(p.EnableAutoCreateWhenOverwrite) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field82Length() int { +func (p *TQueryOptions) field138Length() int { l := 0 - if p.IsSetFeProcessUuid() { - l += bthrift.Binary.FieldBeginLength("fe_process_uuid", thrift.I64, 82) - l += bthrift.Binary.I64Length(p.FeProcessUuid) + if p.IsSetOrcTinyStripeThresholdBytes() { + l += bthrift.Binary.FieldBeginLength("orc_tiny_stripe_threshold_bytes", thrift.I64, 138) + l += bthrift.Binary.I64Length(p.OrcTinyStripeThresholdBytes) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field83Length() int { +func (p *TQueryOptions) field139Length() int { l := 0 - if p.IsSetInvertedIndexConjunctionOptThreshold() { - l += bthrift.Binary.FieldBeginLength("inverted_index_conjunction_opt_threshold", thrift.I32, 83) - l += bthrift.Binary.I32Length(p.InvertedIndexConjunctionOptThreshold) + if p.IsSetOrcOnceMaxReadBytes() { + l += bthrift.Binary.FieldBeginLength("orc_once_max_read_bytes", thrift.I64, 139) + l += bthrift.Binary.I64Length(p.OrcOnceMaxReadBytes) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field84Length() int { +func (p *TQueryOptions) field140Length() int { l := 0 - if p.IsSetEnableProfile() { - l += bthrift.Binary.FieldBeginLength("enable_profile", thrift.BOOL, 84) - l += bthrift.Binary.BoolLength(p.EnableProfile) + if p.IsSetOrcMaxMergeDistanceBytes() { + l += bthrift.Binary.FieldBeginLength("orc_max_merge_distance_bytes", thrift.I64, 140) + l += bthrift.Binary.I64Length(p.OrcMaxMergeDistanceBytes) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field85Length() int { +func (p *TQueryOptions) field141Length() int { l := 0 - if p.IsSetEnablePageCache() { - l += bthrift.Binary.FieldBeginLength("enable_page_cache", thrift.BOOL, 85) - l += bthrift.Binary.BoolLength(p.EnablePageCache) + if p.IsSetIgnoreRuntimeFilterError() { + l += bthrift.Binary.FieldBeginLength("ignore_runtime_filter_error", thrift.BOOL, 141) + l += bthrift.Binary.BoolLength(p.IgnoreRuntimeFilterError) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TQueryOptions) field86Length() int { +func (p *TQueryOptions) field1000Length() int { l := 0 - if p.IsSetAnalyzeTimeout() { - l += bthrift.Binary.FieldBeginLength("analyze_timeout", thrift.I32, 86) - l += bthrift.Binary.I32Length(p.AnalyzeTimeout) + if p.IsSetDisableFileCache() { + l += bthrift.Binary.FieldBeginLength("disable_file_cache", thrift.BOOL, 1000) + l += bthrift.Binary.BoolLength(p.DisableFileCache) l += bthrift.Binary.FieldEndLength() } @@ -5616,6 +8528,20 @@ func (p *TRuntimeFilterTargetParamsV2) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -5702,6 +8628,36 @@ func (p *TRuntimeFilterTargetParamsV2) FastReadField2(buf []byte) (int, error) { return offset, nil } +func (p *TRuntimeFilterTargetParamsV2) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TargetFragmentIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TargetFragmentIds = append(p.TargetFragmentIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TRuntimeFilterTargetParamsV2) FastWrite(buf []byte) int { return 0 @@ -5713,6 +8669,7 @@ func (p *TRuntimeFilterTargetParamsV2) FastWriteNocopy(buf []byte, binaryWriter if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -5725,6 +8682,7 @@ func (p *TRuntimeFilterTargetParamsV2) BLength() int { if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -5755,6 +8713,25 @@ func (p *TRuntimeFilterTargetParamsV2) fastWriteField2(buf []byte, binaryWriter return offset } +func (p *TRuntimeFilterTargetParamsV2) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTargetFragmentIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "target_fragment_ids", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TargetFragmentIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TRuntimeFilterTargetParamsV2) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("target_fragment_instance_ids", thrift.LIST, 1) @@ -5775,6 +8752,19 @@ func (p *TRuntimeFilterTargetParamsV2) field2Length() int { return l } +func (p *TRuntimeFilterTargetParamsV2) field3Length() int { + l := 0 + if p.IsSetTargetFragmentIds() { + l += bthrift.Binary.FieldBeginLength("target_fragment_ids", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TargetFragmentIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TargetFragmentIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TRuntimeFilterParams) FastRead(buf []byte) (int, error) { var err error var offset int @@ -6494,6 +9484,20 @@ func (p *TPlanFragmentExecParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 14: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -6760,6 +9764,36 @@ func (p *TPlanFragmentExecParams) FastReadField13(buf []byte) (int, error) { return offset, nil } +func (p *TPlanFragmentExecParams) FastReadField14(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TPlanFragmentExecParams) FastWrite(buf []byte) int { return 0 @@ -6779,6 +9813,7 @@ func (p *TPlanFragmentExecParams) FastWriteNocopy(buf []byte, binaryWriter bthri offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -6799,6 +9834,7 @@ func (p *TPlanFragmentExecParams) BLength() int { l += p.field11Length() l += p.field12Length() l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -6938,6 +9974,25 @@ func (p *TPlanFragmentExecParams) fastWriteField13(buf []byte, binaryWriter bthr return offset } +func (p *TPlanFragmentExecParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 14) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPlanFragmentExecParams) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 1) @@ -7051,6 +10106,19 @@ func (p *TPlanFragmentExecParams) field13Length() int { return l } +func (p *TPlanFragmentExecParams) field14Length() int { + l := 0 + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 14) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TQueryGlobals) FastRead(buf []byte) (int, error) { var err error var offset int @@ -8197,57 +11265,321 @@ func (p *TColumnDict) BLength() int { func (p *TColumnDict) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Type)) + if p.IsSetType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Type)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TColumnDict) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "str_dict", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.StrDict { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TColumnDict) field1Length() int { + l := 0 + if p.IsSetType() { + l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.Type)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TColumnDict) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("str_dict", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.StrDict)) + for _, v := range p.StrDict { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TGlobalDict) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGlobalDict[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGlobalDict) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Dicts = make(map[int32]*TColumnDict, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + _val := NewTColumnDict() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Dicts[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TGlobalDict) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SlotDicts = make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.SlotDicts[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TGlobalDict) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGlobalDict) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGlobalDict") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGlobalDict) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGlobalDict") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGlobalDict) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDicts() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dicts", thrift.MAP, 1) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + var length int + for k, v := range p.Dicts { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TColumnDict) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TGlobalDict) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "str_dict", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.StrDict { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetSlotDicts() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "slot_dicts", thrift.MAP, 2) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) + var length int + for k, v := range p.SlotDicts { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TColumnDict) field1Length() int { +func (p *TGlobalDict) field1Length() int { l := 0 - if p.IsSetType() { - l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(*p.Type)) + if p.IsSetDicts() { + l += bthrift.Binary.FieldBeginLength("dicts", thrift.MAP, 1) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.Dicts)) + for k, v := range p.Dicts { + l += bthrift.Binary.I32Length(k) + + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TColumnDict) field2Length() int { +func (p *TGlobalDict) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("str_dict", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.StrDict)) - for _, v := range p.StrDict { - l += bthrift.Binary.StringLengthNocopy(v) - + if p.IsSetSlotDicts() { + l += bthrift.Binary.FieldBeginLength("slot_dicts", thrift.MAP, 2) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.SlotDicts)) + var tmpK int32 + var tmpV int32 + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.SlotDicts) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() return l } -func (p *TGlobalDict) FastRead(buf []byte) (int, error) { +func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8270,7 +11602,7 @@ func (p *TGlobalDict) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -8284,7 +11616,7 @@ func (p *TGlobalDict) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -8297,6 +11629,34 @@ func (p *TGlobalDict) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -8323,7 +11683,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGlobalDict[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineWorkloadGroup[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8332,43 +11692,33 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TGlobalDict) FastReadField1(buf []byte) (int, error) { +func (p *TPipelineWorkloadGroup) FastReadField1(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err - } - p.Dicts = make(map[int32]*TColumnDict, size) - for i := 0; i < size; i++ { - var _key int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + } else { + offset += l + p.Id = &v - _key = v + } + return offset, nil +} - } - _val := NewTColumnDict() - if l, err := _val.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } +func (p *TPipelineWorkloadGroup) FastReadField2(buf []byte) (int, error) { + offset := 0 - p.Dicts[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Name = &v + } return offset, nil } -func (p *TGlobalDict) FastReadField2(buf []byte) (int, error) { +func (p *TPipelineWorkloadGroup) FastReadField3(buf []byte) (int, error) { offset := 0 _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) @@ -8376,10 +11726,10 @@ func (p *TGlobalDict) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.SlotDicts = make(map[int32]int32, size) + p.Properties = make(map[string]string, size) for i := 0; i < size; i++ { - var _key int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -8388,8 +11738,8 @@ func (p *TGlobalDict) FastReadField2(buf []byte) (int, error) { } - var _val int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -8398,7 +11748,7 @@ func (p *TGlobalDict) FastReadField2(buf []byte) (int, error) { } - p.SlotDicts[_key] = _val + p.Properties[_key] = _val } if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err @@ -8408,88 +11758,140 @@ func (p *TGlobalDict) FastReadField2(buf []byte) (int, error) { return offset, nil } +func (p *TPipelineWorkloadGroup) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Version = &v + + } + return offset, nil +} + // for compatibility -func (p *TGlobalDict) FastWrite(buf []byte) int { +func (p *TPipelineWorkloadGroup) FastWrite(buf []byte) int { return 0 } -func (p *TGlobalDict) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineWorkloadGroup) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGlobalDict") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineWorkloadGroup") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TGlobalDict) BLength() int { +func (p *TPipelineWorkloadGroup) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TGlobalDict") + l += bthrift.Binary.StructBeginLength("TPipelineWorkloadGroup") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TGlobalDict) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineWorkloadGroup) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDicts() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dicts", thrift.MAP, 1) + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineWorkloadGroup) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineWorkloadGroup) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProperties() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 3) mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) var length int - for k, v := range p.Dicts { + for k, v := range p.Properties { length++ - offset += bthrift.Binary.WriteI32(buf[offset:], k) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TGlobalDict) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineWorkloadGroup) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSlotDicts() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "slot_dicts", thrift.MAP, 2) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) - var length int - for k, v := range p.SlotDicts { - length++ + if p.IsSetVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version) - offset += bthrift.Binary.WriteI32(buf[offset:], k) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - offset += bthrift.Binary.WriteI32(buf[offset:], v) +func (p *TPipelineWorkloadGroup) field1Length() int { + l := 0 + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.Id) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineWorkloadGroup) field2Length() int { + l := 0 + if p.IsSetName() { + l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Name) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TGlobalDict) field1Length() int { +func (p *TPipelineWorkloadGroup) field3Length() int { l := 0 - if p.IsSetDicts() { - l += bthrift.Binary.FieldBeginLength("dicts", thrift.MAP, 1) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.Dicts)) - for k, v := range p.Dicts { + if p.IsSetProperties() { + l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) + for k, v := range p.Properties { - l += bthrift.Binary.I32Length(k) + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) - l += v.BLength() } l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() @@ -8497,15 +11899,12 @@ func (p *TGlobalDict) field1Length() int { return l } -func (p *TGlobalDict) field2Length() int { +func (p *TPipelineWorkloadGroup) field4Length() int { l := 0 - if p.IsSetSlotDicts() { - l += bthrift.Binary.FieldBeginLength("slot_dicts", thrift.MAP, 2) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.SlotDicts)) - var tmpK int32 - var tmpV int32 - l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.SlotDicts) - l += bthrift.Binary.MapEndLength() + if p.IsSetVersion() { + l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.Version) + l += bthrift.Binary.FieldEndLength() } return l @@ -8885,6 +12284,118 @@ func (p *TExecPlanFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 26: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField26(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 27: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField27(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 28: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField28(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 29: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField29(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 30: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField30(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 31: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField31(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 32: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField32(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -9269,24 +12780,143 @@ func (p *TExecPlanFragmentParams) FastReadField24(buf []byte) (int, error) { offset += l } - p.FileScanParams[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + p.FileScanParams[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField25(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.WalId = &v + + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField26(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadStreamPerNode = &v + + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField27(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TotalLoadStreams = &v + + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField28(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumLocalSink = &v + + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField29(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ContentLength = &v + + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField30(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.WorkloadGroups = make([]*TPipelineWorkloadGroup, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPipelineWorkloadGroup() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.WorkloadGroups = append(p.WorkloadGroups, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField31(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsNereids = v + + } + return offset, nil +} + +func (p *TExecPlanFragmentParams) FastReadField32(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.CurrentConnectFe = tmp return offset, nil } -func (p *TExecPlanFragmentParams) FastReadField25(buf []byte) (int, error) { +func (p *TExecPlanFragmentParams) FastReadField1000(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.WalId = &v + p.IsMowTable = &v } return offset, nil @@ -9310,6 +12940,12 @@ func (p *TExecPlanFragmentParams) FastWriteNocopy(buf []byte, binaryWriter bthri offset += p.fastWriteField20(buf[offset:], binaryWriter) offset += p.fastWriteField21(buf[offset:], binaryWriter) offset += p.fastWriteField25(buf[offset:], binaryWriter) + offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField31(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -9326,6 +12962,8 @@ func (p *TExecPlanFragmentParams) FastWriteNocopy(buf []byte, binaryWriter bthri offset += p.fastWriteField22(buf[offset:], binaryWriter) offset += p.fastWriteField23(buf[offset:], binaryWriter) offset += p.fastWriteField24(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) + offset += p.fastWriteField32(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -9361,6 +12999,14 @@ func (p *TExecPlanFragmentParams) BLength() int { l += p.field23Length() l += p.field24Length() l += p.field25Length() + l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field30Length() + l += p.field31Length() + l += p.field32Length() + l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -9647,6 +13293,100 @@ func (p *TExecPlanFragmentParams) fastWriteField25(buf []byte, binaryWriter bthr return offset } +func (p *TExecPlanFragmentParams) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadStreamPerNode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_stream_per_node", thrift.I32, 26) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.LoadStreamPerNode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTotalLoadStreams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_load_streams", thrift.I32, 27) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.TotalLoadStreams) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumLocalSink() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_local_sink", thrift.I32, 28) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumLocalSink) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetContentLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "content_length", thrift.I64, 29) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ContentLength) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWorkloadGroups() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_groups", thrift.LIST, 30) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.WorkloadGroups { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNereids() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_nereids", thrift.BOOL, 31) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsNereids) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField32(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentConnectFe() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_connect_fe", thrift.STRUCT, 32) + offset += p.CurrentConnectFe.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExecPlanFragmentParams) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsMowTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_mow_table", thrift.BOOL, 1000) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsMowTable) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TExecPlanFragmentParams) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) @@ -9919,6 +13659,96 @@ func (p *TExecPlanFragmentParams) field25Length() int { return l } +func (p *TExecPlanFragmentParams) field26Length() int { + l := 0 + if p.IsSetLoadStreamPerNode() { + l += bthrift.Binary.FieldBeginLength("load_stream_per_node", thrift.I32, 26) + l += bthrift.Binary.I32Length(*p.LoadStreamPerNode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field27Length() int { + l := 0 + if p.IsSetTotalLoadStreams() { + l += bthrift.Binary.FieldBeginLength("total_load_streams", thrift.I32, 27) + l += bthrift.Binary.I32Length(*p.TotalLoadStreams) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field28Length() int { + l := 0 + if p.IsSetNumLocalSink() { + l += bthrift.Binary.FieldBeginLength("num_local_sink", thrift.I32, 28) + l += bthrift.Binary.I32Length(*p.NumLocalSink) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field29Length() int { + l := 0 + if p.IsSetContentLength() { + l += bthrift.Binary.FieldBeginLength("content_length", thrift.I64, 29) + l += bthrift.Binary.I64Length(*p.ContentLength) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field30Length() int { + l := 0 + if p.IsSetWorkloadGroups() { + l += bthrift.Binary.FieldBeginLength("workload_groups", thrift.LIST, 30) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.WorkloadGroups)) + for _, v := range p.WorkloadGroups { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field31Length() int { + l := 0 + if p.IsSetIsNereids() { + l += bthrift.Binary.FieldBeginLength("is_nereids", thrift.BOOL, 31) + l += bthrift.Binary.BoolLength(p.IsNereids) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field32Length() int { + l := 0 + if p.IsSetCurrentConnectFe() { + l += bthrift.Binary.FieldBeginLength("current_connect_fe", thrift.STRUCT, 32) + l += p.CurrentConnectFe.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExecPlanFragmentParams) field1000Length() int { + l := 0 + if p.IsSetIsMowTable() { + l += bthrift.Binary.FieldBeginLength("is_mow_table", thrift.BOOL, 1000) + l += bthrift.Binary.BoolLength(*p.IsMowTable) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TExecPlanFragmentParamsList) FastRead(buf []byte) (int, error) { var err error var offset int @@ -10797,6 +14627,20 @@ func (p *TFoldConstantParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10956,6 +14800,19 @@ func (p *TFoldConstantParams) FastReadField5(buf []byte) (int, error) { return offset, nil } +func (p *TFoldConstantParams) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsNereids = &v + + } + return offset, nil +} + // for compatibility func (p *TFoldConstantParams) FastWrite(buf []byte) int { return 0 @@ -10966,6 +14823,7 @@ func (p *TFoldConstantParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.B offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFoldConstantParams") if p != nil { offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) @@ -10985,6 +14843,7 @@ func (p *TFoldConstantParams) BLength() int { l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -11060,6 +14919,17 @@ func (p *TFoldConstantParams) fastWriteField5(buf []byte, binaryWriter bthrift.B return offset } +func (p *TFoldConstantParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNereids() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_nereids", thrift.BOOL, 6) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsNereids) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TFoldConstantParams) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("expr_map", thrift.MAP, 1) @@ -11121,6 +14991,17 @@ func (p *TFoldConstantParams) field5Length() int { return l } +func (p *TFoldConstantParams) field6Length() int { + l := 0 + if p.IsSetIsNereids() { + l += bthrift.Binary.FieldBeginLength("is_nereids", thrift.BOOL, 6) + l += bthrift.Binary.BoolLength(*p.IsNereids) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TTransmitDataParams) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13827,110 +17708,30 @@ func (p *TTabletWriterCancelParams) field1Length() int { return l } -func (p *TTabletWriterCancelParams) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("index_id", thrift.I64, 2) - l += bthrift.Binary.I64Length(p.IndexId) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TTabletWriterCancelParams) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("sender_no", thrift.I32, 3) - l += bthrift.Binary.I32Length(p.SenderNo) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TTabletWriterCancelResult_) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldTypeError - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) - -SkipFieldTypeError: - return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -// for compatibility -func (p *TTabletWriterCancelResult_) FastWrite(buf []byte) int { - return 0 -} +func (p *TTabletWriterCancelParams) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("index_id", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.IndexId) -func (p *TTabletWriterCancelResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTabletWriterCancelResult") - if p != nil { - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset + l += bthrift.Binary.FieldEndLength() + return l } -func (p *TTabletWriterCancelResult_) BLength() int { +func (p *TTabletWriterCancelParams) field3Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTabletWriterCancelResult") - if p != nil { - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() + l += bthrift.Binary.FieldBeginLength("sender_no", thrift.I32, 3) + l += bthrift.Binary.I32Length(p.SenderNo) + + l += bthrift.Binary.FieldEndLength() return l } -func (p *TFetchDataParams) FastRead(buf []byte) (int, error) { +func (p *TTabletWriterCancelResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetProtocolVersion bool = false - var issetFragmentInstanceId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -13946,43 +17747,10 @@ func (p *TFetchDataParams) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetProtocolVersion = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetFragmentInstanceId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -13997,131 +17765,52 @@ func (p *TFetchDataParams) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetFragmentInstanceId { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataParams[fieldId])) -} - -func (p *TFetchDataParams) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.ProtocolVersion = PaloInternalServiceVersion(v) - - } - return offset, nil -} - -func (p *TFetchDataParams) FastReadField2(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.FragmentInstanceId = tmp - return offset, nil } // for compatibility -func (p *TFetchDataParams) FastWrite(buf []byte) int { +func (p *TTabletWriterCancelResult_) FastWrite(buf []byte) int { return 0 } -func (p *TFetchDataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTabletWriterCancelResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchDataParams") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTabletWriterCancelResult") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFetchDataParams) BLength() int { +func (p *TTabletWriterCancelResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFetchDataParams") + l += bthrift.Binary.StructBeginLength("TTabletWriterCancelResult") if p != nil { - l += p.field1Length() - l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFetchDataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocol_version", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TFetchDataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 2) - offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TFetchDataParams) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFetchDataParams) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 2) - l += p.FragmentInstanceId.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFetchDataResult_) FastRead(buf []byte) (int, error) { +func (p *TFetchDataParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetResultBatch bool = false - var issetEos bool = false - var issetPacketNum bool = false + var issetProtocolVersion bool = false + var issetFragmentInstanceId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -14139,13 +17828,13 @@ func (p *TFetchDataResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetResultBatch = true + issetProtocolVersion = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14154,42 +17843,13 @@ func (p *TFetchDataResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetEos = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPacketNum = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField4(buf[offset:]) + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetFragmentInstanceId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14217,65 +17877,33 @@ func (p *TFetchDataResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetResultBatch { + if !issetProtocolVersion { fieldId = 1 goto RequiredFieldNotSetError } - if !issetEos { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetPacketNum { - fieldId = 3 - goto RequiredFieldNotSetError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataResult_[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataResult_[fieldId])) -} - -func (p *TFetchDataResult_) FastReadField1(buf []byte) (int, error) { - offset := 0 - - tmp := data.NewTResultBatch() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.ResultBatch = tmp - return offset, nil -} - -func (p *TFetchDataResult_) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.Eos = v - + if !issetFragmentInstanceId { + fieldId = 2 + goto RequiredFieldNotSetError } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataParams[fieldId])) } -func (p *TFetchDataResult_) FastReadField3(buf []byte) (int, error) { +func (p *TFetchDataParams) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { @@ -14283,139 +17911,97 @@ func (p *TFetchDataResult_) FastReadField3(buf []byte) (int, error) { } else { offset += l - p.PacketNum = v + p.ProtocolVersion = PaloInternalServiceVersion(v) } return offset, nil } -func (p *TFetchDataResult_) FastReadField4(buf []byte) (int, error) { +func (p *TFetchDataParams) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() + tmp := types.NewTUniqueId() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Status = tmp + p.FragmentInstanceId = tmp return offset, nil } // for compatibility -func (p *TFetchDataResult_) FastWrite(buf []byte) int { +func (p *TFetchDataParams) FastWrite(buf []byte) int { return 0 } -func (p *TFetchDataResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchDataResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchDataParams") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFetchDataResult_) BLength() int { +func (p *TFetchDataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFetchDataResult") + l += bthrift.Binary.StructBeginLength("TFetchDataParams") if p != nil { l += p.field1Length() l += p.field2Length() - l += p.field3Length() - l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFetchDataResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_batch", thrift.STRUCT, 1) - offset += p.ResultBatch.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TFetchDataResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "eos", thrift.BOOL, 2) - offset += bthrift.Binary.WriteBool(buf[offset:], p.Eos) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocol_version", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TFetchDataResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "packet_num", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], p.PacketNum) - + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 2) + offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TFetchDataResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStatus() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 4) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFetchDataResult_) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("result_batch", thrift.STRUCT, 1) - l += p.ResultBatch.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TFetchDataResult_) field2Length() int { +func (p *TFetchDataParams) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("eos", thrift.BOOL, 2) - l += bthrift.Binary.BoolLength(p.Eos) + l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) l += bthrift.Binary.FieldEndLength() return l } -func (p *TFetchDataResult_) field3Length() int { +func (p *TFetchDataParams) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("packet_num", thrift.I32, 3) - l += bthrift.Binary.I32Length(p.PacketNum) - + l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 2) + l += p.FragmentInstanceId.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TFetchDataResult_) field4Length() int { - l := 0 - if p.IsSetStatus() { - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 4) - l += p.Status.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCondition) FastRead(buf []byte) (int, error) { +func (p *TFetchDataResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetColumnName bool = false - var issetConditionOp bool = false - var issetConditionValues bool = false + var issetResultBatch bool = false + var issetEos bool = false + var issetPacketNum bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -14433,13 +18019,13 @@ func (p *TCondition) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetColumnName = true + issetResultBatch = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14448,13 +18034,13 @@ func (p *TCondition) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetConditionOp = true + issetEos = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14463,13 +18049,13 @@ func (p *TCondition) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetConditionValues = true + issetPacketNum = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14478,7 +18064,7 @@ func (p *TCondition) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -14491,20 +18077,6 @@ func (p *TCondition) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 5: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14525,17 +18097,17 @@ func (p *TCondition) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetColumnName { + if !issetResultBatch { fieldId = 1 goto RequiredFieldNotSetError } - if !issetConditionOp { + if !issetEos { fieldId = 2 goto RequiredFieldNotSetError } - if !issetConditionValues { + if !issetPacketNum { fieldId = 3 goto RequiredFieldNotSetError } @@ -14545,7 +18117,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCondition[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFetchDataResult_[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -14553,247 +18125,177 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCondition[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TFetchDataResult_[fieldId])) } -func (p *TCondition) FastReadField1(buf []byte) (int, error) { +func (p *TFetchDataResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := data.NewTResultBatch() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.ColumnName = v - } + p.ResultBatch = tmp return offset, nil } -func (p *TCondition) FastReadField2(buf []byte) (int, error) { +func (p *TFetchDataResult_) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ConditionOp = v - - } - return offset, nil -} - -func (p *TCondition) FastReadField3(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.ConditionValues = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } + p.Eos = v - p.ConditionValues = append(p.ConditionValues, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } return offset, nil } -func (p *TCondition) FastReadField4(buf []byte) (int, error) { +func (p *TFetchDataResult_) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ColumnUniqueId = &v + + p.PacketNum = v } return offset, nil } -func (p *TCondition) FastReadField5(buf []byte) (int, error) { +func (p *TFetchDataResult_) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + tmp := status.NewTStatus() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.MarkedByRuntimeFilter = v - } + p.Status = tmp return offset, nil } // for compatibility -func (p *TCondition) FastWrite(buf []byte) int { +func (p *TFetchDataResult_) FastWrite(buf []byte) int { return 0 } -func (p *TCondition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCondition") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFetchDataResult") if p != nil { - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TCondition) BLength() int { +func (p *TFetchDataResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TCondition") + l += bthrift.Binary.StructBeginLength("TFetchDataResult") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() - l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TCondition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_name", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ColumnName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TCondition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "condition_op", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ConditionOp) - + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_batch", thrift.STRUCT, 1) + offset += p.ResultBatch.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TCondition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "condition_values", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ConditionValues { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "eos", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], p.Eos) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TCondition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumnUniqueId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_unique_id", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.ColumnUniqueId) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "packet_num", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PacketNum) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TCondition) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFetchDataResult_) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMarkedByRuntimeFilter() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "marked_by_runtime_filter", thrift.BOOL, 5) - offset += bthrift.Binary.WriteBool(buf[offset:], p.MarkedByRuntimeFilter) - + if p.IsSetStatus() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 4) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TCondition) field1Length() int { +func (p *TFetchDataResult_) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("column_name", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(p.ColumnName) - + l += bthrift.Binary.FieldBeginLength("result_batch", thrift.STRUCT, 1) + l += p.ResultBatch.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TCondition) field2Length() int { +func (p *TFetchDataResult_) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("condition_op", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(p.ConditionOp) + l += bthrift.Binary.FieldBeginLength("eos", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(p.Eos) l += bthrift.Binary.FieldEndLength() return l } -func (p *TCondition) field3Length() int { +func (p *TFetchDataResult_) field3Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("condition_values", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ConditionValues)) - for _, v := range p.ConditionValues { - l += bthrift.Binary.StringLengthNocopy(v) + l += bthrift.Binary.FieldBeginLength("packet_num", thrift.I32, 3) + l += bthrift.Binary.I32Length(p.PacketNum) - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TCondition) field4Length() int { - l := 0 - if p.IsSetColumnUniqueId() { - l += bthrift.Binary.FieldBeginLength("column_unique_id", thrift.I32, 4) - l += bthrift.Binary.I32Length(*p.ColumnUniqueId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TCondition) field5Length() int { +func (p *TFetchDataResult_) field4Length() int { l := 0 - if p.IsSetMarkedByRuntimeFilter() { - l += bthrift.Binary.FieldBeginLength("marked_by_runtime_filter", thrift.BOOL, 5) - l += bthrift.Binary.BoolLength(p.MarkedByRuntimeFilter) - + if p.IsSetStatus() { + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 4) + l += p.Status.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TExportStatusResult_) FastRead(buf []byte) (int, error) { +func (p *TCondition) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetStatus bool = false - var issetState bool = false + var issetColumnName bool = false + var issetConditionOp bool = false + var issetConditionValues bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -14811,13 +18313,13 @@ func (p *TExportStatusResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetStatus = true + issetColumnName = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14826,13 +18328,13 @@ func (p *TExportStatusResult_) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetState = true + issetConditionOp = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14847,6 +18349,49 @@ func (p *TExportStatusResult_) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } + issetConditionValues = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 1000: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1000(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -14874,22 +18419,27 @@ func (p *TExportStatusResult_) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetStatus { + if !issetColumnName { fieldId = 1 goto RequiredFieldNotSetError } - if !issetState { + if !issetConditionOp { fieldId = 2 goto RequiredFieldNotSetError } + + if !issetConditionValues { + fieldId = 3 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExportStatusResult_[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCondition[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -14897,37 +18447,38 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExportStatusResult_[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCondition[fieldId])) } -func (p *TExportStatusResult_) FastReadField1(buf []byte) (int, error) { +func (p *TCondition) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := status.NewTStatus() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.ColumnName = v + } - p.Status = tmp return offset, nil } -func (p *TExportStatusResult_) FastReadField2(buf []byte) (int, error) { +func (p *TCondition) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.State = types.TExportState(v) + p.ConditionOp = v } return offset, nil } -func (p *TExportStatusResult_) FastReadField3(buf []byte) (int, error) { +func (p *TCondition) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -14935,7 +18486,7 @@ func (p *TExportStatusResult_) FastReadField3(buf []byte) (int, error) { if err != nil { return offset, err } - p.Files = make([]string, 0, size) + p.ConditionValues = make([]string, 0, size) for i := 0; i < size; i++ { var _elem string if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { @@ -14947,7 +18498,7 @@ func (p *TExportStatusResult_) FastReadField3(buf []byte) (int, error) { } - p.Files = append(p.Files, _elem) + p.ConditionValues = append(p.ConditionValues, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -14957,113 +18508,224 @@ func (p *TExportStatusResult_) FastReadField3(buf []byte) (int, error) { return offset, nil } +func (p *TCondition) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ColumnUniqueId = &v + + } + return offset, nil +} + +func (p *TCondition) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.MarkedByRuntimeFilter = v + + } + return offset, nil +} + +func (p *TCondition) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.CompoundType = TCompoundType(v) + + } + return offset, nil +} + // for compatibility -func (p *TExportStatusResult_) FastWrite(buf []byte) int { +func (p *TCondition) FastWrite(buf []byte) int { return 0 } -func (p *TExportStatusResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCondition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExportStatusResult") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCondition") if p != nil { + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TExportStatusResult_) BLength() int { +func (p *TCondition) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TExportStatusResult") + l += bthrift.Binary.StructBeginLength("TCondition") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TExportStatusResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCondition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) - offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ColumnName) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExportStatusResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCondition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "state", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.State)) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "condition_op", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.ConditionOp) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TCondition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "condition_values", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ConditionValues { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TCondition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnUniqueId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_unique_id", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ColumnUniqueId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TCondition) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMarkedByRuntimeFilter() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "marked_by_runtime_filter", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], p.MarkedByRuntimeFilter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TExportStatusResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TCondition) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFiles() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "files", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.Files { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetCompoundType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compound_type", thrift.I32, 1000) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.CompoundType)) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TExportStatusResult_) field1Length() int { +func (p *TCondition) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) - l += p.Status.BLength() + l += bthrift.Binary.FieldBeginLength("column_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(p.ColumnName) + l += bthrift.Binary.FieldEndLength() return l } -func (p *TExportStatusResult_) field2Length() int { +func (p *TCondition) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("state", thrift.I32, 2) - l += bthrift.Binary.I32Length(int32(p.State)) + l += bthrift.Binary.FieldBeginLength("condition_op", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(p.ConditionOp) l += bthrift.Binary.FieldEndLength() return l } -func (p *TExportStatusResult_) field3Length() int { +func (p *TCondition) field3Length() int { l := 0 - if p.IsSetFiles() { - l += bthrift.Binary.FieldBeginLength("files", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Files)) - for _, v := range p.Files { - l += bthrift.Binary.StringLengthNocopy(v) + l += bthrift.Binary.FieldBeginLength("condition_values", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ConditionValues)) + for _, v := range p.ConditionValues { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TCondition) field4Length() int { + l := 0 + if p.IsSetColumnUniqueId() { + l += bthrift.Binary.FieldBeginLength("column_unique_id", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.ColumnUniqueId) - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineInstanceParams) FastRead(buf []byte) (int, error) { +func (p *TCondition) field5Length() int { + l := 0 + if p.IsSetMarkedByRuntimeFilter() { + l += bthrift.Binary.FieldBeginLength("marked_by_runtime_filter", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(p.MarkedByRuntimeFilter) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TCondition) field1000Length() int { + l := 0 + if p.IsSetCompoundType() { + l += bthrift.Binary.FieldBeginLength("compound_type", thrift.I32, 1000) + l += bthrift.Binary.I32Length(int32(p.CompoundType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExportStatusResult_) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetFragmentInstanceId bool = false - var issetPerNodeScanRanges bool = false + var issetStatus bool = false + var issetState bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -15087,7 +18749,7 @@ func (p *TPipelineInstanceParams) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetFragmentInstanceId = true + issetStatus = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -15096,12 +18758,13 @@ func (p *TPipelineInstanceParams) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetState = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -15110,273 +18773,115 @@ func (p *TPipelineInstanceParams) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetPerNodeScanRanges = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - if !issetFragmentInstanceId { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetPerNodeScanRanges { - fieldId = 3 - goto RequiredFieldNotSetError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineInstanceParams[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineInstanceParams[fieldId])) -} - -func (p *TPipelineInstanceParams) FastReadField1(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.FragmentInstanceId = tmp - return offset, nil -} - -func (p *TPipelineInstanceParams) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.BuildHashTableForBroadcastJoin = v - - } - return offset, nil -} - -func (p *TPipelineInstanceParams) FastReadField3(buf []byte) (int, error) { - offset := 0 - - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PerNodeScanRanges = make(map[types.TPlanNodeId][]*TScanRangeParams, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - _val := make([]*TScanRangeParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTScanRangeParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - - _val = append(_val, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l + if err != nil { + goto SkipFieldError + } } - p.PerNodeScanRanges[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset, nil -} - -func (p *TPipelineInstanceParams) FastReadField4(buf []byte) (int, error) { - offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.SenderId = &v + if !issetStatus { + fieldId = 1 + goto RequiredFieldNotSetError + } + if !issetState { + fieldId = 2 + goto RequiredFieldNotSetError } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExportStatusResult_[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExportStatusResult_[fieldId])) } -func (p *TPipelineInstanceParams) FastReadField5(buf []byte) (int, error) { +func (p *TExportStatusResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTRuntimeFilterParams() + tmp := status.NewTStatus() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.RuntimeFilterParams = tmp + p.Status = tmp return offset, nil } -func (p *TPipelineInstanceParams) FastReadField6(buf []byte) (int, error) { +func (p *TExportStatusResult_) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BackendNum = &v + + p.State = types.TExportState(v) } return offset, nil } -func (p *TPipelineInstanceParams) FastReadField7(buf []byte) (int, error) { +func (p *TExportStatusResult_) FastReadField3(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.PerNodeSharedScans = make(map[types.TPlanNodeId]bool, size) + p.Files = make([]string, 0, size) for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val bool - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _val = v + _elem = v } - p.PerNodeSharedScans[_key] = _val + p.Files = append(p.Files, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -15385,234 +18890,112 @@ func (p *TPipelineInstanceParams) FastReadField7(buf []byte) (int, error) { } // for compatibility -func (p *TPipelineInstanceParams) FastWrite(buf []byte) int { +func (p *TExportStatusResult_) FastWrite(buf []byte) int { return 0 } -func (p *TPipelineInstanceParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExportStatusResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineInstanceParams") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExportStatusResult") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPipelineInstanceParams) BLength() int { +func (p *TExportStatusResult_) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPipelineInstanceParams") + l += bthrift.Binary.StructBeginLength("TExportStatusResult") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPipelineInstanceParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 1) - offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPipelineInstanceParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBuildHashTableForBroadcastJoin() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "build_hash_table_for_broadcast_join", thrift.BOOL, 2) - offset += bthrift.Binary.WriteBool(buf[offset:], p.BuildHashTableForBroadcastJoin) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineInstanceParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExportStatusResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_node_scan_ranges", thrift.MAP, 3) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, 0) - var length int - for k, v := range p.PerNodeScanRanges { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) - - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range v { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.LIST, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1) + offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TPipelineInstanceParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSenderId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sender_id", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.SenderId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineInstanceParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetRuntimeFilterParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_params", thrift.STRUCT, 5) - offset += p.RuntimeFilterParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineInstanceParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBackendNum() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_num", thrift.I32, 6) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.BackendNum) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineInstanceParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExportStatusResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPerNodeSharedScans() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_node_shared_scans", thrift.MAP, 7) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.BOOL, 0) - var length int - for k, v := range p.PerNodeSharedScans { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) - - offset += bthrift.Binary.WriteBool(buf[offset:], v) - - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.BOOL, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineInstanceParams) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 1) - l += p.FragmentInstanceId.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPipelineInstanceParams) field2Length() int { - l := 0 - if p.IsSetBuildHashTableForBroadcastJoin() { - l += bthrift.Binary.FieldBeginLength("build_hash_table_for_broadcast_join", thrift.BOOL, 2) - l += bthrift.Binary.BoolLength(p.BuildHashTableForBroadcastJoin) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPipelineInstanceParams) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("per_node_scan_ranges", thrift.MAP, 3) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, len(p.PerNodeScanRanges)) - for k, v := range p.PerNodeScanRanges { - - l += bthrift.Binary.I32Length(k) - - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) - for _, v := range v { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() - return l + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "state", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.State)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset } -func (p *TPipelineInstanceParams) field4Length() int { - l := 0 - if p.IsSetSenderId() { - l += bthrift.Binary.FieldBeginLength("sender_id", thrift.I32, 4) - l += bthrift.Binary.I32Length(*p.SenderId) +func (p *TExportStatusResult_) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFiles() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "files", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.Files { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - l += bthrift.Binary.FieldEndLength() + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineInstanceParams) field5Length() int { +func (p *TExportStatusResult_) field1Length() int { l := 0 - if p.IsSetRuntimeFilterParams() { - l += bthrift.Binary.FieldBeginLength("runtime_filter_params", thrift.STRUCT, 5) - l += p.RuntimeFilterParams.BLength() - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1) + l += p.Status.BLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TPipelineInstanceParams) field6Length() int { +func (p *TExportStatusResult_) field2Length() int { l := 0 - if p.IsSetBackendNum() { - l += bthrift.Binary.FieldBeginLength("backend_num", thrift.I32, 6) - l += bthrift.Binary.I32Length(*p.BackendNum) + l += bthrift.Binary.FieldBeginLength("state", thrift.I32, 2) + l += bthrift.Binary.I32Length(int32(p.State)) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TPipelineInstanceParams) field7Length() int { +func (p *TExportStatusResult_) field3Length() int { l := 0 - if p.IsSetPerNodeSharedScans() { - l += bthrift.Binary.FieldBeginLength("per_node_shared_scans", thrift.MAP, 7) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.BOOL, len(p.PerNodeSharedScans)) - var tmpK types.TPlanNodeId - var tmpV bool - l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.BoolLength(bool(tmpV))) * len(p.PerNodeSharedScans) - l += bthrift.Binary.MapEndLength() + if p.IsSetFiles() { + l += bthrift.Binary.FieldBeginLength("files", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.Files)) + for _, v := range p.Files { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { +func (p *TPipelineInstanceParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetFragmentInstanceId bool = false + var issetPerNodeScanRanges bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -15630,12 +19013,13 @@ func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetFragmentInstanceId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -15644,7 +19028,7 @@ func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -15664,6 +19048,7 @@ func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } + issetPerNodeScanRanges = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -15672,7 +19057,7 @@ func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -15685,6 +19070,76 @@ func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -15705,48 +19160,151 @@ func (p *TPipelineWorkloadGroup) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetFragmentInstanceId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetPerNodeScanRanges { + fieldId = 3 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineWorkloadGroup[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineInstanceParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineInstanceParams[fieldId])) } -func (p *TPipelineWorkloadGroup) FastReadField1(buf []byte) (int, error) { +func (p *TPipelineInstanceParams) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Id = &v + } + p.FragmentInstanceId = tmp + return offset, nil +} + +func (p *TPipelineInstanceParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.BuildHashTableForBroadcastJoin = v + + } + return offset, nil +} + +func (p *TPipelineInstanceParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PerNodeScanRanges = make(map[types.TPlanNodeId][]*TScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _val := make([]*TScanRangeParams, 0, size) + for i := 0; i < size; i++ { + _elem := NewTScanRangeParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _val = append(_val, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PerNodeScanRanges[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineInstanceParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SenderId = &v + + } + return offset, nil +} +func (p *TPipelineInstanceParams) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := NewTRuntimeFilterParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } + p.RuntimeFilterParams = tmp return offset, nil } -func (p *TPipelineWorkloadGroup) FastReadField2(buf []byte) (int, error) { +func (p *TPipelineInstanceParams) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Name = &v + p.BackendNum = &v } return offset, nil } -func (p *TPipelineWorkloadGroup) FastReadField3(buf []byte) (int, error) { +func (p *TPipelineInstanceParams) FastReadField7(buf []byte) (int, error) { offset := 0 _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) @@ -15754,10 +19312,10 @@ func (p *TPipelineWorkloadGroup) FastReadField3(buf []byte) (int, error) { if err != nil { return offset, err } - p.Properties = make(map[string]string, size) + p.PerNodeSharedScans = make(map[types.TPlanNodeId]bool, size) for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -15766,8 +19324,8 @@ func (p *TPipelineWorkloadGroup) FastReadField3(buf []byte) (int, error) { } - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + var _val bool + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -15776,7 +19334,7 @@ func (p *TPipelineWorkloadGroup) FastReadField3(buf []byte) (int, error) { } - p.Properties[_key] = _val + p.PerNodeSharedScans[_key] = _val } if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err @@ -15786,153 +19344,349 @@ func (p *TPipelineWorkloadGroup) FastReadField3(buf []byte) (int, error) { return offset, nil } -func (p *TPipelineWorkloadGroup) FastReadField4(buf []byte) (int, error) { +func (p *TPipelineInstanceParams) FastReadField8(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Version = &v + } + return offset, nil +} + +func (p *TPipelineInstanceParams) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterDescs = make([]*plannodes.TTopnFilterDesc, 0, size) + for i := 0; i < size; i++ { + _elem := plannodes.NewTTopnFilterDesc() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TopnFilterDescs = append(p.TopnFilterDescs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } return offset, nil } // for compatibility -func (p *TPipelineWorkloadGroup) FastWrite(buf []byte) int { +func (p *TPipelineInstanceParams) FastWrite(buf []byte) int { return 0 } -func (p *TPipelineWorkloadGroup) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineInstanceParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineWorkloadGroup") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineInstanceParams") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPipelineWorkloadGroup) BLength() int { +func (p *TPipelineInstanceParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPipelineWorkloadGroup") + l += bthrift.Binary.StructBeginLength("TPipelineInstanceParams") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPipelineWorkloadGroup) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineInstanceParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_instance_id", thrift.STRUCT, 1) + offset += p.FragmentInstanceId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineInstanceParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBuildHashTableForBroadcastJoin() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "build_hash_table_for_broadcast_join", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], p.BuildHashTableForBroadcastJoin) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPipelineWorkloadGroup) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineInstanceParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_node_scan_ranges", thrift.MAP, 3) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, 0) + var length int + for k, v := range p.PerNodeScanRanges { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.LIST, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineInstanceParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSenderId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sender_id", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.SenderId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPipelineWorkloadGroup) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineInstanceParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetProperties() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 3) + if p.IsSetRuntimeFilterParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_params", thrift.STRUCT, 5) + offset += p.RuntimeFilterParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineInstanceParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendNum() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_num", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BackendNum) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineInstanceParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPerNodeSharedScans() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_node_shared_scans", thrift.MAP, 7) mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.BOOL, 0) var length int - for k, v := range p.Properties { + for k, v := range p.PerNodeSharedScans { length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + offset += bthrift.Binary.WriteI32(buf[offset:], k) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + offset += bthrift.Binary.WriteBool(buf[offset:], v) } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.BOOL, length) offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPipelineWorkloadGroup) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineInstanceParams) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetVersion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version) + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 8) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} +func (p *TPipelineInstanceParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterDescs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_descs", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TopnFilterDescs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset +} + +func (p *TPipelineInstanceParams) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("fragment_instance_id", thrift.STRUCT, 1) + l += p.FragmentInstanceId.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPipelineInstanceParams) field2Length() int { + l := 0 + if p.IsSetBuildHashTableForBroadcastJoin() { + l += bthrift.Binary.FieldBeginLength("build_hash_table_for_broadcast_join", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(p.BuildHashTableForBroadcastJoin) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineInstanceParams) field3Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("per_node_scan_ranges", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, len(p.PerNodeScanRanges)) + for k, v := range p.PerNodeScanRanges { + + l += bthrift.Binary.I32Length(k) + + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPipelineInstanceParams) field4Length() int { + l := 0 + if p.IsSetSenderId() { + l += bthrift.Binary.FieldBeginLength("sender_id", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.SenderId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineInstanceParams) field5Length() int { + l := 0 + if p.IsSetRuntimeFilterParams() { + l += bthrift.Binary.FieldBeginLength("runtime_filter_params", thrift.STRUCT, 5) + l += p.RuntimeFilterParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l } -func (p *TPipelineWorkloadGroup) field1Length() int { +func (p *TPipelineInstanceParams) field6Length() int { l := 0 - if p.IsSetId() { - l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.Id) + if p.IsSetBackendNum() { + l += bthrift.Binary.FieldBeginLength("backend_num", thrift.I32, 6) + l += bthrift.Binary.I32Length(*p.BackendNum) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineWorkloadGroup) field2Length() int { +func (p *TPipelineInstanceParams) field7Length() int { l := 0 - if p.IsSetName() { - l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Name) - + if p.IsSetPerNodeSharedScans() { + l += bthrift.Binary.FieldBeginLength("per_node_shared_scans", thrift.MAP, 7) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.BOOL, len(p.PerNodeSharedScans)) + var tmpK types.TPlanNodeId + var tmpV bool + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.BoolLength(bool(tmpV))) * len(p.PerNodeSharedScans) + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineWorkloadGroup) field3Length() int { +func (p *TPipelineInstanceParams) field8Length() int { l := 0 - if p.IsSetProperties() { - l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 3) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) - for k, v := range p.Properties { - - l += bthrift.Binary.StringLengthNocopy(k) - - l += bthrift.Binary.StringLengthNocopy(v) - - } - l += bthrift.Binary.MapEndLength() + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 8) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineWorkloadGroup) field4Length() int { +func (p *TPipelineInstanceParams) field9Length() int { l := 0 - if p.IsSetVersion() { - l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.Version) - + if p.IsSetTopnFilterDescs() { + l += bthrift.Binary.FieldBeginLength("topn_filter_descs", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TopnFilterDescs)) + for _, v := range p.TopnFilterDescs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l @@ -16092,9 +19846,219 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 10: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField10(buf[offset:]) + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 17: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 21: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 22: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField22(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 23: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 24: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField24(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 26: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField26(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16106,9 +20070,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: + case 27: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField11(buf[offset:]) + l, err = p.FastReadField27(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16120,9 +20084,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 12: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField12(buf[offset:]) + case 28: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField28(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16134,9 +20098,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 13: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField13(buf[offset:]) + case 29: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField29(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16148,9 +20112,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 14: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField14(buf[offset:]) + case 30: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField30(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16162,9 +20126,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 15: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField15(buf[offset:]) + case 31: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField31(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16176,9 +20140,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 16: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField16(buf[offset:]) + case 32: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField32(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16190,9 +20154,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 17: + case 33: if fieldTypeId == thrift.I32 { - l, err = p.FastReadField17(buf[offset:]) + l, err = p.FastReadField33(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16204,9 +20168,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 18: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField18(buf[offset:]) + case 34: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField34(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16218,9 +20182,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 19: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField19(buf[offset:]) + case 35: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField35(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16232,9 +20196,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 20: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField20(buf[offset:]) + case 36: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField36(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16246,9 +20210,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 21: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField21(buf[offset:]) + case 37: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField37(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16260,9 +20224,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 22: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField22(buf[offset:]) + case 38: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField38(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16274,9 +20238,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 23: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField23(buf[offset:]) + case 39: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField39(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16288,9 +20252,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 24: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField24(buf[offset:]) + case 40: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField40(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16302,9 +20266,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 26: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField26(buf[offset:]) + case 41: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField41(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16316,9 +20280,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 27: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField27(buf[offset:]) + case 42: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField42(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16330,9 +20294,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 28: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField28(buf[offset:]) + case 43: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField43(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16344,9 +20308,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 29: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField29(buf[offset:]) + case 44: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField44(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16358,9 +20322,9 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 30: + case 1000: if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField30(buf[offset:]) + l, err = p.FastReadField1000(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -16375,439 +20339,2236 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l - if err != nil { - goto SkipFieldError - } + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetProtocolVersion { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetQueryId { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetPerExchNumSenders { + fieldId = 4 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineFragmentParams[fieldId])) +} + +func (p *TPipelineFragmentParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ProtocolVersion = PaloInternalServiceVersion(v) + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryId = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentId = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PerExchNumSenders = make(map[types.TPlanNodeId]int32, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PerExchNumSenders[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := descriptors.NewTDescriptorTable() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.DescTbl = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField6(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTResourceInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.ResourceInfo = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Destinations = make([]*datasinks.TPlanFragmentDestination, 0, size) + for i := 0; i < size; i++ { + _elem := datasinks.NewTPlanFragmentDestination() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.Destinations = append(p.Destinations, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumSenders = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SendQueryStatisticsWithEveryBatch = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Coord = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField11(buf []byte) (int, error) { + offset := 0 + + tmp := NewTQueryGlobals() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryGlobals = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField12(buf []byte) (int, error) { + offset := 0 + + tmp := NewTQueryOptions() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryOptions = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ImportLabel = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbName = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadJobId = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField16(buf []byte) (int, error) { + offset := 0 + + tmp := NewTLoadErrorHubInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadErrorHubInfo = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField17(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentNumOnHost = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.NeedWaitExecutionTrigger = v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField20(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.InstancesSharingHashTable = make([]*types.TUniqueId, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTUniqueId() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.InstancesSharingHashTable = append(p.InstancesSharingHashTable, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField21(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsSimplifiedParam = v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField22(buf []byte) (int, error) { + offset := 0 + + tmp := NewTGlobalDict() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.GlobalDict = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField23(buf []byte) (int, error) { + offset := 0 + + tmp := planner.NewTPlanFragment() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Fragment = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField24(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.LocalParams = make([]*TPipelineInstanceParams, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPipelineInstanceParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.LocalParams = append(p.LocalParams, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField26(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.WorkloadGroups = make([]*TPipelineWorkloadGroup, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPipelineWorkloadGroup() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.WorkloadGroups = append(p.WorkloadGroups, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField27(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTxnParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TxnConf = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField28(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableName = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField29(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FileScanParams = make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + _val := plannodes.NewTFileScanRangeParams() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FileScanParams[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField30(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.GroupCommit = v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField31(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.LoadStreamPerNode = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField32(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TotalLoadStreams = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField33(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumLocalSink = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField34(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumBuckets = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField35(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.BucketSeqToInstanceIdx = make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.BucketSeqToInstanceIdx[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField36(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PerNodeSharedScans = make(map[types.TPlanNodeId]bool, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val bool + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PerNodeSharedScans[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField37(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ParallelInstances = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField38(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TotalInstances = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField39(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ShuffleIdxToInstanceIdx = make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.ShuffleIdxToInstanceIdx[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField40(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsNereids = v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField41(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.WalId = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField42(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ContentLength = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField43(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.CurrentConnectFe = tmp + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField44(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParams) FastReadField1000(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsMowTable = &v + + } + return offset, nil +} + +// for compatibility +func (p *TPipelineFragmentParams) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPipelineFragmentParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineFragmentParams") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) + offset += p.fastWriteField31(buf[offset:], binaryWriter) + offset += p.fastWriteField32(buf[offset:], binaryWriter) + offset += p.fastWriteField33(buf[offset:], binaryWriter) + offset += p.fastWriteField34(buf[offset:], binaryWriter) + offset += p.fastWriteField37(buf[offset:], binaryWriter) + offset += p.fastWriteField38(buf[offset:], binaryWriter) + offset += p.fastWriteField40(buf[offset:], binaryWriter) + offset += p.fastWriteField41(buf[offset:], binaryWriter) + offset += p.fastWriteField42(buf[offset:], binaryWriter) + offset += p.fastWriteField1000(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) + offset += p.fastWriteField24(buf[offset:], binaryWriter) + offset += p.fastWriteField26(buf[offset:], binaryWriter) + offset += p.fastWriteField27(buf[offset:], binaryWriter) + offset += p.fastWriteField28(buf[offset:], binaryWriter) + offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField35(buf[offset:], binaryWriter) + offset += p.fastWriteField36(buf[offset:], binaryWriter) + offset += p.fastWriteField39(buf[offset:], binaryWriter) + offset += p.fastWriteField43(buf[offset:], binaryWriter) + offset += p.fastWriteField44(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPipelineFragmentParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPipelineFragmentParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() + l += p.field24Length() + l += p.field26Length() + l += p.field27Length() + l += p.field28Length() + l += p.field29Length() + l += p.field30Length() + l += p.field31Length() + l += p.field32Length() + l += p.field33Length() + l += p.field34Length() + l += p.field35Length() + l += p.field36Length() + l += p.field37Length() + l += p.field38Length() + l += p.field39Length() + l += p.field40Length() + l += p.field41Length() + l += p.field42Length() + l += p.field43Length() + l += p.field44Length() + l += p.field1000Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPipelineFragmentParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocol_version", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 2) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_id", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_exch_num_senders", thrift.MAP, 4) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) + var length int + for k, v := range p.PerExchNumSenders { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDescTbl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "desc_tbl", thrift.STRUCT, 5) + offset += p.DescTbl.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetResourceInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resource_info", thrift.STRUCT, 6) + offset += p.ResourceInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "destinations", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.Destinations { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumSenders() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_senders", thrift.I32, 8) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumSenders) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSendQueryStatisticsWithEveryBatch() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "send_query_statistics_with_every_batch", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.SendQueryStatisticsWithEveryBatch) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCoord() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "coord", thrift.STRUCT, 10) + offset += p.Coord.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryGlobals() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_globals", thrift.STRUCT, 11) + offset += p.QueryGlobals.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryOptions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_options", thrift.STRUCT, 12) + offset += p.QueryOptions.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetImportLabel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "import_label", thrift.STRING, 13) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ImportLabel) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 14) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadJobId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_job_id", thrift.I64, 15) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadJobId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadErrorHubInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_error_hub_info", thrift.STRUCT, 16) + offset += p.LoadErrorHubInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentNumOnHost() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_num_on_host", thrift.I32, 17) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentNumOnHost) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 18) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNeedWaitExecutionTrigger() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "need_wait_execution_trigger", thrift.BOOL, 19) + offset += bthrift.Binary.WriteBool(buf[offset:], p.NeedWaitExecutionTrigger) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetInstancesSharingHashTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "instances_sharing_hash_table", thrift.LIST, 20) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.InstancesSharingHashTable { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsSimplifiedParam() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_simplified_param", thrift.BOOL, 21) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsSimplifiedParam) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGlobalDict() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "global_dict", thrift.STRUCT, 22) + offset += p.GlobalDict.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragment() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment", thrift.STRUCT, 23) + offset += p.Fragment.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_params", thrift.LIST, 24) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.LocalParams { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWorkloadGroups() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_groups", thrift.LIST, 26) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.WorkloadGroups { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTxnConf() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_conf", thrift.STRUCT, 27) + offset += p.TxnConf.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 28) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileScanParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_scan_params", thrift.MAP, 29) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + var length int + for k, v := range p.FileScanParams { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit", thrift.BOOL, 30) + offset += bthrift.Binary.WriteBool(buf[offset:], p.GroupCommit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField31(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadStreamPerNode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_stream_per_node", thrift.I32, 31) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.LoadStreamPerNode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField32(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTotalLoadStreams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_load_streams", thrift.I32, 32) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.TotalLoadStreams) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField33(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumLocalSink() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_local_sink", thrift.I32, 33) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumLocalSink) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumBuckets() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_buckets", thrift.I32, 34) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumBuckets) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField35(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBucketSeqToInstanceIdx() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bucket_seq_to_instance_idx", thrift.MAP, 35) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) + var length int + for k, v := range p.BucketSeqToInstanceIdx { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField36(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPerNodeSharedScans() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_node_shared_scans", thrift.MAP, 36) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.BOOL, 0) + var length int + for k, v := range p.PerNodeSharedScans { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteBool(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.BOOL, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField37(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParallelInstances() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_instances", thrift.I32, 37) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ParallelInstances) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField38(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTotalInstances() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_instances", thrift.I32, 38) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.TotalInstances) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField39(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetShuffleIdxToInstanceIdx() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "shuffle_idx_to_instance_idx", thrift.MAP, 39) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) + var length int + for k, v := range p.ShuffleIdxToInstanceIdx { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField40(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNereids() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_nereids", thrift.BOOL, 40) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsNereids) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField41(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWalId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "wal_id", thrift.I64, 41) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.WalId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField42(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetContentLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "content_length", thrift.I64, 42) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ContentLength) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField43(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentConnectFe() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_connect_fe", thrift.STRUCT, 43) + offset += p.CurrentConnectFe.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField44(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 44) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsMowTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_mow_table", thrift.BOOL, 1000) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsMowTable) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParams) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPipelineFragmentParams) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 2) + l += p.QueryId.BLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPipelineFragmentParams) field3Length() int { + l := 0 + if p.IsSetFragmentId() { + l += bthrift.Binary.FieldBeginLength("fragment_id", thrift.I32, 3) + l += bthrift.Binary.I32Length(*p.FragmentId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParams) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("per_exch_num_senders", thrift.MAP, 4) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.PerExchNumSenders)) + var tmpK types.TPlanNodeId + var tmpV int32 + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.PerExchNumSenders) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPipelineFragmentParams) field5Length() int { + l := 0 + if p.IsSetDescTbl() { + l += bthrift.Binary.FieldBeginLength("desc_tbl", thrift.STRUCT, 5) + l += p.DescTbl.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } +func (p *TPipelineFragmentParams) field6Length() int { + l := 0 + if p.IsSetResourceInfo() { + l += bthrift.Binary.FieldBeginLength("resource_info", thrift.STRUCT, 6) + l += p.ResourceInfo.BLength() + l += bthrift.Binary.FieldEndLength() } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError + return l +} + +func (p *TPipelineFragmentParams) field7Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("destinations", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Destinations)) + for _, v := range p.Destinations { + l += v.BLength() } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} - if !issetProtocolVersion { - fieldId = 1 - goto RequiredFieldNotSetError +func (p *TPipelineFragmentParams) field8Length() int { + l := 0 + if p.IsSetNumSenders() { + l += bthrift.Binary.FieldBeginLength("num_senders", thrift.I32, 8) + l += bthrift.Binary.I32Length(*p.NumSenders) + + l += bthrift.Binary.FieldEndLength() } + return l +} - if !issetQueryId { - fieldId = 2 - goto RequiredFieldNotSetError +func (p *TPipelineFragmentParams) field9Length() int { + l := 0 + if p.IsSetSendQueryStatisticsWithEveryBatch() { + l += bthrift.Binary.FieldBeginLength("send_query_statistics_with_every_batch", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(*p.SendQueryStatisticsWithEveryBatch) + + l += bthrift.Binary.FieldEndLength() } + return l +} - if !issetPerExchNumSenders { - fieldId = 4 - goto RequiredFieldNotSetError +func (p *TPipelineFragmentParams) field10Length() int { + l := 0 + if p.IsSetCoord() { + l += bthrift.Binary.FieldBeginLength("coord", thrift.STRUCT, 10) + l += p.Coord.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParams[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPipelineFragmentParams[fieldId])) + return l } -func (p *TPipelineFragmentParams) FastReadField1(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field11Length() int { + l := 0 + if p.IsSetQueryGlobals() { + l += bthrift.Binary.FieldBeginLength("query_globals", thrift.STRUCT, 11) + l += p.QueryGlobals.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TPipelineFragmentParams) field12Length() int { + l := 0 + if p.IsSetQueryOptions() { + l += bthrift.Binary.FieldBeginLength("query_options", thrift.STRUCT, 12) + l += p.QueryOptions.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - p.ProtocolVersion = PaloInternalServiceVersion(v) +func (p *TPipelineFragmentParams) field13Length() int { + l := 0 + if p.IsSetImportLabel() { + l += bthrift.Binary.FieldBeginLength("import_label", thrift.STRING, 13) + l += bthrift.Binary.StringLengthNocopy(*p.ImportLabel) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField2(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field14Length() int { + l := 0 + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 14) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.QueryId = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField3(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field15Length() int { + l := 0 + if p.IsSetLoadJobId() { + l += bthrift.Binary.FieldBeginLength("load_job_id", thrift.I64, 15) + l += bthrift.Binary.I64Length(*p.LoadJobId) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FragmentId = &v + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TPipelineFragmentParams) field16Length() int { + l := 0 + if p.IsSetLoadErrorHubInfo() { + l += bthrift.Binary.FieldBeginLength("load_error_hub_info", thrift.STRUCT, 16) + l += p.LoadErrorHubInfo.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField4(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field17Length() int { + l := 0 + if p.IsSetFragmentNumOnHost() { + l += bthrift.Binary.FieldBeginLength("fragment_num_on_host", thrift.I32, 17) + l += bthrift.Binary.I32Length(*p.FragmentNumOnHost) - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err + l += bthrift.Binary.FieldEndLength() } - p.PerExchNumSenders = make(map[types.TPlanNodeId]int32, size) - for i := 0; i < size; i++ { - var _key types.TPlanNodeId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return l +} - _key = v +func (p *TPipelineFragmentParams) field18Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 18) + l += bthrift.Binary.I64Length(*p.BackendId) - } + l += bthrift.Binary.FieldEndLength() + } + return l +} - var _val int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TPipelineFragmentParams) field19Length() int { + l := 0 + if p.IsSetNeedWaitExecutionTrigger() { + l += bthrift.Binary.FieldBeginLength("need_wait_execution_trigger", thrift.BOOL, 19) + l += bthrift.Binary.BoolLength(p.NeedWaitExecutionTrigger) - _val = v + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TPipelineFragmentParams) field20Length() int { + l := 0 + if p.IsSetInstancesSharingHashTable() { + l += bthrift.Binary.FieldBeginLength("instances_sharing_hash_table", thrift.LIST, 20) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.InstancesSharingHashTable)) + for _, v := range p.InstancesSharingHashTable { + l += v.BLength() } - - p.PerExchNumSenders[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField5(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field21Length() int { + l := 0 + if p.IsSetIsSimplifiedParam() { + l += bthrift.Binary.FieldBeginLength("is_simplified_param", thrift.BOOL, 21) + l += bthrift.Binary.BoolLength(p.IsSimplifiedParam) - tmp := descriptors.NewTDescriptorTable() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.DescTbl = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField6(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field22Length() int { + l := 0 + if p.IsSetGlobalDict() { + l += bthrift.Binary.FieldBeginLength("global_dict", thrift.STRUCT, 22) + l += p.GlobalDict.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - tmp := types.NewTResourceInfo() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TPipelineFragmentParams) field23Length() int { + l := 0 + if p.IsSetFragment() { + l += bthrift.Binary.FieldBeginLength("fragment", thrift.STRUCT, 23) + l += p.Fragment.BLength() + l += bthrift.Binary.FieldEndLength() } - p.ResourceInfo = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField7(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err +func (p *TPipelineFragmentParams) field24Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("local_params", thrift.LIST, 24) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.LocalParams)) + for _, v := range p.LocalParams { + l += v.BLength() } - p.Destinations = make([]*datasinks.TPlanFragmentDestination, 0, size) - for i := 0; i < size; i++ { - _elem := datasinks.NewTPlanFragmentDestination() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} - p.Destinations = append(p.Destinations, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TPipelineFragmentParams) field26Length() int { + l := 0 + if p.IsSetWorkloadGroups() { + l += bthrift.Binary.FieldBeginLength("workload_groups", thrift.LIST, 26) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.WorkloadGroups)) + for _, v := range p.WorkloadGroups { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField8(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field27Length() int { + l := 0 + if p.IsSetTxnConf() { + l += bthrift.Binary.FieldBeginLength("txn_conf", thrift.STRUCT, 27) + l += p.TxnConf.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.NumSenders = &v +func (p *TPipelineFragmentParams) field28Length() int { + l := 0 + if p.IsSetTableName() { + l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 28) + l += bthrift.Binary.StringLengthNocopy(*p.TableName) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField9(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field29Length() int { + l := 0 + if p.IsSetFileScanParams() { + l += bthrift.Binary.FieldBeginLength("file_scan_params", thrift.MAP, 29) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.FileScanParams)) + for k, v := range p.FileScanParams { - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.SendQueryStatisticsWithEveryBatch = &v + l += bthrift.Binary.I32Length(k) + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField10(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field30Length() int { + l := 0 + if p.IsSetGroupCommit() { + l += bthrift.Binary.FieldBeginLength("group_commit", thrift.BOOL, 30) + l += bthrift.Binary.BoolLength(p.GroupCommit) - tmp := types.NewTNetworkAddress() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.Coord = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField11(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field31Length() int { + l := 0 + if p.IsSetLoadStreamPerNode() { + l += bthrift.Binary.FieldBeginLength("load_stream_per_node", thrift.I32, 31) + l += bthrift.Binary.I32Length(*p.LoadStreamPerNode) - tmp := NewTQueryGlobals() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.QueryGlobals = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField12(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field32Length() int { + l := 0 + if p.IsSetTotalLoadStreams() { + l += bthrift.Binary.FieldBeginLength("total_load_streams", thrift.I32, 32) + l += bthrift.Binary.I32Length(*p.TotalLoadStreams) - tmp := NewTQueryOptions() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.QueryOptions = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField13(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ImportLabel = &v +func (p *TPipelineFragmentParams) field33Length() int { + l := 0 + if p.IsSetNumLocalSink() { + l += bthrift.Binary.FieldBeginLength("num_local_sink", thrift.I32, 33) + l += bthrift.Binary.I32Length(*p.NumLocalSink) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField14(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DbName = &v +func (p *TPipelineFragmentParams) field34Length() int { + l := 0 + if p.IsSetNumBuckets() { + l += bthrift.Binary.FieldBeginLength("num_buckets", thrift.I32, 34) + l += bthrift.Binary.I32Length(*p.NumBuckets) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField15(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.LoadJobId = &v - +func (p *TPipelineFragmentParams) field35Length() int { + l := 0 + if p.IsSetBucketSeqToInstanceIdx() { + l += bthrift.Binary.FieldBeginLength("bucket_seq_to_instance_idx", thrift.MAP, 35) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.BucketSeqToInstanceIdx)) + var tmpK int32 + var tmpV int32 + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.BucketSeqToInstanceIdx) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField16(buf []byte) (int, error) { - offset := 0 - - tmp := NewTLoadErrorHubInfo() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TPipelineFragmentParams) field36Length() int { + l := 0 + if p.IsSetPerNodeSharedScans() { + l += bthrift.Binary.FieldBeginLength("per_node_shared_scans", thrift.MAP, 36) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.BOOL, len(p.PerNodeSharedScans)) + var tmpK types.TPlanNodeId + var tmpV bool + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.BoolLength(bool(tmpV))) * len(p.PerNodeSharedScans) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() } - p.LoadErrorHubInfo = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField17(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.FragmentNumOnHost = &v +func (p *TPipelineFragmentParams) field37Length() int { + l := 0 + if p.IsSetParallelInstances() { + l += bthrift.Binary.FieldBeginLength("parallel_instances", thrift.I32, 37) + l += bthrift.Binary.I32Length(*p.ParallelInstances) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField18(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.BackendId = &v +func (p *TPipelineFragmentParams) field38Length() int { + l := 0 + if p.IsSetTotalInstances() { + l += bthrift.Binary.FieldBeginLength("total_instances", thrift.I32, 38) + l += bthrift.Binary.I32Length(*p.TotalInstances) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField19(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.NeedWaitExecutionTrigger = v - +func (p *TPipelineFragmentParams) field39Length() int { + l := 0 + if p.IsSetShuffleIdxToInstanceIdx() { + l += bthrift.Binary.FieldBeginLength("shuffle_idx_to_instance_idx", thrift.MAP, 39) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.ShuffleIdxToInstanceIdx)) + var tmpK int32 + var tmpV int32 + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.ShuffleIdxToInstanceIdx) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField20(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field40Length() int { + l := 0 + if p.IsSetIsNereids() { + l += bthrift.Binary.FieldBeginLength("is_nereids", thrift.BOOL, 40) + l += bthrift.Binary.BoolLength(p.IsNereids) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err + l += bthrift.Binary.FieldEndLength() } - p.InstancesSharingHashTable = make([]*types.TUniqueId, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTUniqueId() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + return l +} - p.InstancesSharingHashTable = append(p.InstancesSharingHashTable, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *TPipelineFragmentParams) field41Length() int { + l := 0 + if p.IsSetWalId() { + l += bthrift.Binary.FieldBeginLength("wal_id", thrift.I64, 41) + l += bthrift.Binary.I64Length(*p.WalId) + + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField21(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field42Length() int { + l := 0 + if p.IsSetContentLength() { + l += bthrift.Binary.FieldBeginLength("content_length", thrift.I64, 42) + l += bthrift.Binary.I64Length(*p.ContentLength) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() + } + return l +} - p.IsSimplifiedParam = v +func (p *TPipelineFragmentParams) field43Length() int { + l := 0 + if p.IsSetCurrentConnectFe() { + l += bthrift.Binary.FieldBeginLength("current_connect_fe", thrift.STRUCT, 43) + l += p.CurrentConnectFe.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} +func (p *TPipelineFragmentParams) field44Length() int { + l := 0 + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 44) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField22(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParams) field1000Length() int { + l := 0 + if p.IsSetIsMowTable() { + l += bthrift.Binary.FieldBeginLength("is_mow_table", thrift.BOOL, 1000) + l += bthrift.Binary.BoolLength(*p.IsMowTable) - tmp := NewTGlobalDict() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.GlobalDict = tmp - return offset, nil + return l } -func (p *TPipelineFragmentParams) FastReadField23(buf []byte) (int, error) { - offset := 0 +func (p *TPipelineFragmentParamsList) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - tmp := planner.NewTPlanFragment() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l + if err != nil { + goto ReadFieldEndError + } } - p.Fragment = tmp - return offset, nil -} - -func (p *TPipelineFragmentParams) FastReadField24(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) offset += l if err != nil { - return offset, err + goto ReadStructEndError } - p.LocalParams = make([]*TPipelineInstanceParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTPipelineInstanceParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.LocalParams = append(p.LocalParams, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPipelineFragmentParams) FastReadField26(buf []byte) (int, error) { +func (p *TPipelineFragmentParamsList) FastReadField1(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -16815,16 +22576,16 @@ func (p *TPipelineFragmentParams) FastReadField26(buf []byte) (int, error) { if err != nil { return offset, err } - p.WorkloadGroups = make([]*TPipelineWorkloadGroup, 0, size) + p.ParamsList = make([]*TPipelineFragmentParams, 0, size) for i := 0; i < size; i++ { - _elem := NewTPipelineWorkloadGroup() + _elem := NewTPipelineFragmentParams() if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.WorkloadGroups = append(p.WorkloadGroups, _elem) + p.ParamsList = append(p.ParamsList, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -16834,33 +22595,20 @@ func (p *TPipelineFragmentParams) FastReadField26(buf []byte) (int, error) { return offset, nil } -func (p *TPipelineFragmentParams) FastReadField27(buf []byte) (int, error) { +func (p *TPipelineFragmentParamsList) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := NewTTxnParams() + tmp := descriptors.NewTDescriptorTable() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.TxnConf = tmp - return offset, nil -} - -func (p *TPipelineFragmentParams) FastReadField28(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.TableName = &v - - } + p.DescTbl = tmp return offset, nil } -func (p *TPipelineFragmentParams) FastReadField29(buf []byte) (int, error) { +func (p *TPipelineFragmentParamsList) FastReadField3(buf []byte) (int, error) { offset := 0 _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) @@ -16884,400 +22632,239 @@ func (p *TPipelineFragmentParams) FastReadField29(buf []byte) (int, error) { return offset, err } else { offset += l - } - - p.FileScanParams[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TPipelineFragmentParams) FastReadField30(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.GroupCommit = v - - } - return offset, nil -} - -// for compatibility -func (p *TPipelineFragmentParams) FastWrite(buf []byte) int { - return 0 -} - -func (p *TPipelineFragmentParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineFragmentParams") - if p != nil { - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) - offset += p.fastWriteField17(buf[offset:], binaryWriter) - offset += p.fastWriteField18(buf[offset:], binaryWriter) - offset += p.fastWriteField19(buf[offset:], binaryWriter) - offset += p.fastWriteField21(buf[offset:], binaryWriter) - offset += p.fastWriteField30(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) - offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) - offset += p.fastWriteField20(buf[offset:], binaryWriter) - offset += p.fastWriteField22(buf[offset:], binaryWriter) - offset += p.fastWriteField23(buf[offset:], binaryWriter) - offset += p.fastWriteField24(buf[offset:], binaryWriter) - offset += p.fastWriteField26(buf[offset:], binaryWriter) - offset += p.fastWriteField27(buf[offset:], binaryWriter) - offset += p.fastWriteField28(buf[offset:], binaryWriter) - offset += p.fastWriteField29(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TPipelineFragmentParams) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TPipelineFragmentParams") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() - l += p.field13Length() - l += p.field14Length() - l += p.field15Length() - l += p.field16Length() - l += p.field17Length() - l += p.field18Length() - l += p.field19Length() - l += p.field20Length() - l += p.field21Length() - l += p.field22Length() - l += p.field23Length() - l += p.field24Length() - l += p.field26Length() - l += p.field27Length() - l += p.field28Length() - l += p.field29Length() - l += p.field30Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TPipelineFragmentParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "protocol_version", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.ProtocolVersion)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 2) - offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFragmentId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_id", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "per_exch_num_senders", thrift.MAP, 4) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) - var length int - for k, v := range p.PerExchNumSenders { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) - - offset += bthrift.Binary.WriteI32(buf[offset:], v) - - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDescTbl() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "desc_tbl", thrift.STRUCT, 5) - offset += p.DescTbl.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetResourceInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resource_info", thrift.STRUCT, 6) - offset += p.ResourceInfo.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "destinations", thrift.LIST, 7) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Destinations { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + + p.FileScanParams[_key] = _val } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField4(buf []byte) (int, error) { offset := 0 - if p.IsSetNumSenders() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_senders", thrift.I32, 8) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumSenders) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Coord = tmp + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField5(buf []byte) (int, error) { offset := 0 - if p.IsSetSendQueryStatisticsWithEveryBatch() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "send_query_statistics_with_every_batch", thrift.BOOL, 9) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.SendQueryStatisticsWithEveryBatch) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTQueryGlobals() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.QueryGlobals = tmp + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField6(buf []byte) (int, error) { offset := 0 - if p.IsSetCoord() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "coord", thrift.STRUCT, 10) - offset += p.Coord.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} -func (p *TPipelineFragmentParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetQueryGlobals() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_globals", thrift.STRUCT, 11) - offset += p.QueryGlobals.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := types.NewTResourceInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.ResourceInfo = tmp + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField7(buf []byte) (int, error) { offset := 0 - if p.IsSetQueryOptions() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_options", thrift.STRUCT, 12) - offset += p.QueryOptions.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} -func (p *TPipelineFragmentParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetImportLabel() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "import_label", thrift.STRING, 13) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ImportLabel) + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentNumOnHost = &v - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField8(buf []byte) (int, error) { offset := 0 - if p.IsSetDbName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 14) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTQueryOptions() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.QueryOptions = tmp + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField9(buf []byte) (int, error) { offset := 0 - if p.IsSetLoadJobId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_job_id", thrift.I64, 15) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.LoadJobId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsNereids = v -func (p *TPipelineFragmentParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadErrorHubInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_error_hub_info", thrift.STRUCT, 16) - offset += p.LoadErrorHubInfo.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField10(buf []byte) (int, error) { offset := 0 - if p.IsSetFragmentNumOnHost() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_num_on_host", thrift.I32, 17) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentNumOnHost) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return offset + p.WorkloadGroups = make([]*TPipelineWorkloadGroup, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPipelineWorkloadGroup() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.WorkloadGroups = append(p.WorkloadGroups, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField11(buf []byte) (int, error) { offset := 0 - if p.IsSetBackendId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backend_id", thrift.I64, 18) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.QueryId = tmp + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField12(buf []byte) (int, error) { offset := 0 - if p.IsSetNeedWaitExecutionTrigger() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "need_wait_execution_trigger", thrift.BOOL, 19) - offset += bthrift.Binary.WriteBool(buf[offset:], p.NeedWaitExecutionTrigger) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return offset -} + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v -func (p *TPipelineFragmentParams) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetInstancesSharingHashTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "instances_sharing_hash_table", thrift.LIST, 20) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.InstancesSharingHashTable { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) } - return offset + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastReadField13(buf []byte) (int, error) { offset := 0 - if p.IsSetIsSimplifiedParam() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_simplified_param", thrift.BOOL, 21) - offset += bthrift.Binary.WriteBool(buf[offset:], p.IsSimplifiedParam) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.RuntimeFilterMergeAddr = tmp + return offset, nil } -func (p *TPipelineFragmentParams) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetGlobalDict() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "global_dict", thrift.STRUCT, 22) - offset += p.GlobalDict.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset +// for compatibility +func (p *TPipelineFragmentParamsList) FastWrite(buf []byte) int { + return 0 } -func (p *TPipelineFragmentParams) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFragment() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment", thrift.STRUCT, 23) - offset += p.Fragment.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineFragmentParamsList") + if p != nil { + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPipelineFragmentParams) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_params", thrift.LIST, 24) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.LocalParams { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) +func (p *TPipelineFragmentParamsList) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPipelineFragmentParamsList") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TPipelineFragmentParams) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetWorkloadGroups() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_groups", thrift.LIST, 26) + if p.IsSetParamsList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params_list", thrift.LIST, 1) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.WorkloadGroups { + for _, v := range p.ParamsList { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -17288,31 +22875,20 @@ func (p *TPipelineFragmentParams) fastWriteField26(buf []byte, binaryWriter bthr return offset } -func (p *TPipelineFragmentParams) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTxnConf() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_conf", thrift.STRUCT, 27) - offset += p.TxnConf.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineFragmentParams) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 28) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) - + if p.IsSetDescTbl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "desc_tbl", thrift.STRUCT, 2) + offset += p.DescTbl.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPipelineFragmentParams) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetFileScanParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_scan_params", thrift.MAP, 29) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_scan_params", thrift.MAP, 3) mapBeginOffset := offset offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) var length int @@ -17330,489 +22906,270 @@ func (p *TPipelineFragmentParams) fastWriteField29(buf []byte, binaryWriter bthr return offset } -func (p *TPipelineFragmentParams) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPipelineFragmentParamsList) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetGroupCommit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_commit", thrift.BOOL, 30) - offset += bthrift.Binary.WriteBool(buf[offset:], p.GroupCommit) - + if p.IsSetCoord() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "coord", thrift.STRUCT, 4) + offset += p.Coord.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPipelineFragmentParams) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.ProtocolVersion)) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPipelineFragmentParams) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 2) - l += p.QueryId.BLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPipelineFragmentParams) field3Length() int { - l := 0 - if p.IsSetFragmentId() { - l += bthrift.Binary.FieldBeginLength("fragment_id", thrift.I32, 3) - l += bthrift.Binary.I32Length(*p.FragmentId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPipelineFragmentParams) field4Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("per_exch_num_senders", thrift.MAP, 4) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.PerExchNumSenders)) - var tmpK types.TPlanNodeId - var tmpV int32 - l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.PerExchNumSenders) - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPipelineFragmentParams) field5Length() int { - l := 0 - if p.IsSetDescTbl() { - l += bthrift.Binary.FieldBeginLength("desc_tbl", thrift.STRUCT, 5) - l += p.DescTbl.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TPipelineFragmentParamsList) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryGlobals() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_globals", thrift.STRUCT, 5) + offset += p.QueryGlobals.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field6Length() int { - l := 0 +func (p *TPipelineFragmentParamsList) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 if p.IsSetResourceInfo() { - l += bthrift.Binary.FieldBeginLength("resource_info", thrift.STRUCT, 6) - l += p.ResourceInfo.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPipelineFragmentParams) field7Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("destinations", thrift.LIST, 7) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Destinations)) - for _, v := range p.Destinations { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TPipelineFragmentParams) field8Length() int { - l := 0 - if p.IsSetNumSenders() { - l += bthrift.Binary.FieldBeginLength("num_senders", thrift.I32, 8) - l += bthrift.Binary.I32Length(*p.NumSenders) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPipelineFragmentParams) field9Length() int { - l := 0 - if p.IsSetSendQueryStatisticsWithEveryBatch() { - l += bthrift.Binary.FieldBeginLength("send_query_statistics_with_every_batch", thrift.BOOL, 9) - l += bthrift.Binary.BoolLength(*p.SendQueryStatisticsWithEveryBatch) - - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resource_info", thrift.STRUCT, 6) + offset += p.ResourceInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field10Length() int { - l := 0 - if p.IsSetCoord() { - l += bthrift.Binary.FieldBeginLength("coord", thrift.STRUCT, 10) - l += p.Coord.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} +func (p *TPipelineFragmentParamsList) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentNumOnHost() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_num_on_host", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentNumOnHost) -func (p *TPipelineFragmentParams) field11Length() int { - l := 0 - if p.IsSetQueryGlobals() { - l += bthrift.Binary.FieldBeginLength("query_globals", thrift.STRUCT, 11) - l += p.QueryGlobals.BLength() - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field12Length() int { - l := 0 +func (p *TPipelineFragmentParamsList) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 if p.IsSetQueryOptions() { - l += bthrift.Binary.FieldBeginLength("query_options", thrift.STRUCT, 12) - l += p.QueryOptions.BLength() - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_options", thrift.STRUCT, 8) + offset += p.QueryOptions.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field13Length() int { - l := 0 - if p.IsSetImportLabel() { - l += bthrift.Binary.FieldBeginLength("import_label", thrift.STRING, 13) - l += bthrift.Binary.StringLengthNocopy(*p.ImportLabel) +func (p *TPipelineFragmentParamsList) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNereids() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_nereids", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsNereids) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field14Length() int { - l := 0 - if p.IsSetDbName() { - l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 14) - l += bthrift.Binary.StringLengthNocopy(*p.DbName) - - l += bthrift.Binary.FieldEndLength() +func (p *TPipelineFragmentParamsList) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWorkloadGroups() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_groups", thrift.LIST, 10) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.WorkloadGroups { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field15Length() int { - l := 0 - if p.IsSetLoadJobId() { - l += bthrift.Binary.FieldBeginLength("load_job_id", thrift.I64, 15) - l += bthrift.Binary.I64Length(*p.LoadJobId) - - l += bthrift.Binary.FieldEndLength() +func (p *TPipelineFragmentParamsList) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 11) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field16Length() int { - l := 0 - if p.IsSetLoadErrorHubInfo() { - l += bthrift.Binary.FieldBeginLength("load_error_hub_info", thrift.STRUCT, 16) - l += p.LoadErrorHubInfo.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TPipelineFragmentParamsList) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 12) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field17Length() int { - l := 0 - if p.IsSetFragmentNumOnHost() { - l += bthrift.Binary.FieldBeginLength("fragment_num_on_host", thrift.I32, 17) - l += bthrift.Binary.I32Length(*p.FragmentNumOnHost) - - l += bthrift.Binary.FieldEndLength() +func (p *TPipelineFragmentParamsList) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRuntimeFilterMergeAddr() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_merge_addr", thrift.STRUCT, 13) + offset += p.RuntimeFilterMergeAddr.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TPipelineFragmentParams) field18Length() int { +func (p *TPipelineFragmentParamsList) field1Length() int { l := 0 - if p.IsSetBackendId() { - l += bthrift.Binary.FieldBeginLength("backend_id", thrift.I64, 18) - l += bthrift.Binary.I64Length(*p.BackendId) - + if p.IsSetParamsList() { + l += bthrift.Binary.FieldBeginLength("params_list", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ParamsList)) + for _, v := range p.ParamsList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field19Length() int { +func (p *TPipelineFragmentParamsList) field2Length() int { l := 0 - if p.IsSetNeedWaitExecutionTrigger() { - l += bthrift.Binary.FieldBeginLength("need_wait_execution_trigger", thrift.BOOL, 19) - l += bthrift.Binary.BoolLength(p.NeedWaitExecutionTrigger) - + if p.IsSetDescTbl() { + l += bthrift.Binary.FieldBeginLength("desc_tbl", thrift.STRUCT, 2) + l += p.DescTbl.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field20Length() int { +func (p *TPipelineFragmentParamsList) field3Length() int { l := 0 - if p.IsSetInstancesSharingHashTable() { - l += bthrift.Binary.FieldBeginLength("instances_sharing_hash_table", thrift.LIST, 20) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.InstancesSharingHashTable)) - for _, v := range p.InstancesSharingHashTable { + if p.IsSetFileScanParams() { + l += bthrift.Binary.FieldBeginLength("file_scan_params", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.FileScanParams)) + for k, v := range p.FileScanParams { + + l += bthrift.Binary.I32Length(k) + l += v.BLength() } - l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field21Length() int { +func (p *TPipelineFragmentParamsList) field4Length() int { l := 0 - if p.IsSetIsSimplifiedParam() { - l += bthrift.Binary.FieldBeginLength("is_simplified_param", thrift.BOOL, 21) - l += bthrift.Binary.BoolLength(p.IsSimplifiedParam) - + if p.IsSetCoord() { + l += bthrift.Binary.FieldBeginLength("coord", thrift.STRUCT, 4) + l += p.Coord.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field22Length() int { +func (p *TPipelineFragmentParamsList) field5Length() int { l := 0 - if p.IsSetGlobalDict() { - l += bthrift.Binary.FieldBeginLength("global_dict", thrift.STRUCT, 22) - l += p.GlobalDict.BLength() + if p.IsSetQueryGlobals() { + l += bthrift.Binary.FieldBeginLength("query_globals", thrift.STRUCT, 5) + l += p.QueryGlobals.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field23Length() int { +func (p *TPipelineFragmentParamsList) field6Length() int { l := 0 - if p.IsSetFragment() { - l += bthrift.Binary.FieldBeginLength("fragment", thrift.STRUCT, 23) - l += p.Fragment.BLength() + if p.IsSetResourceInfo() { + l += bthrift.Binary.FieldBeginLength("resource_info", thrift.STRUCT, 6) + l += p.ResourceInfo.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field24Length() int { +func (p *TPipelineFragmentParamsList) field7Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("local_params", thrift.LIST, 24) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.LocalParams)) - for _, v := range p.LocalParams { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} + if p.IsSetFragmentNumOnHost() { + l += bthrift.Binary.FieldBeginLength("fragment_num_on_host", thrift.I32, 7) + l += bthrift.Binary.I32Length(*p.FragmentNumOnHost) -func (p *TPipelineFragmentParams) field26Length() int { - l := 0 - if p.IsSetWorkloadGroups() { - l += bthrift.Binary.FieldBeginLength("workload_groups", thrift.LIST, 26) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.WorkloadGroups)) - for _, v := range p.WorkloadGroups { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field27Length() int { +func (p *TPipelineFragmentParamsList) field8Length() int { l := 0 - if p.IsSetTxnConf() { - l += bthrift.Binary.FieldBeginLength("txn_conf", thrift.STRUCT, 27) - l += p.TxnConf.BLength() + if p.IsSetQueryOptions() { + l += bthrift.Binary.FieldBeginLength("query_options", thrift.STRUCT, 8) + l += p.QueryOptions.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field28Length() int { +func (p *TPipelineFragmentParamsList) field9Length() int { l := 0 - if p.IsSetTableName() { - l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 28) - l += bthrift.Binary.StringLengthNocopy(*p.TableName) + if p.IsSetIsNereids() { + l += bthrift.Binary.FieldBeginLength("is_nereids", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(p.IsNereids) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field29Length() int { +func (p *TPipelineFragmentParamsList) field10Length() int { l := 0 - if p.IsSetFileScanParams() { - l += bthrift.Binary.FieldBeginLength("file_scan_params", thrift.MAP, 29) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.FileScanParams)) - for k, v := range p.FileScanParams { - - l += bthrift.Binary.I32Length(k) - + if p.IsSetWorkloadGroups() { + l += bthrift.Binary.FieldBeginLength("workload_groups", thrift.LIST, 10) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.WorkloadGroups)) + for _, v := range p.WorkloadGroups { l += v.BLength() } - l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParams) field30Length() int { +func (p *TPipelineFragmentParamsList) field11Length() int { l := 0 - if p.IsSetGroupCommit() { - l += bthrift.Binary.FieldBeginLength("group_commit", thrift.BOOL, 30) - l += bthrift.Binary.BoolLength(p.GroupCommit) - + if p.IsSetQueryId() { + l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 11) + l += p.QueryId.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPipelineFragmentParamsList) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TPipelineFragmentParamsList) FastReadField1(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.ParamsList = make([]*TPipelineFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTPipelineFragmentParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.ParamsList = append(p.ParamsList, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -// for compatibility -func (p *TPipelineFragmentParamsList) FastWrite(buf []byte) int { - return 0 -} - -func (p *TPipelineFragmentParamsList) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineFragmentParamsList") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TPipelineFragmentParamsList) BLength() int { +func (p *TPipelineFragmentParamsList) field12Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPipelineFragmentParamsList") - if p != nil { - l += p.field1Length() + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 12) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() return l } -func (p *TPipelineFragmentParamsList) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetParamsList() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params_list", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.ParamsList { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPipelineFragmentParamsList) field1Length() int { +func (p *TPipelineFragmentParamsList) field13Length() int { l := 0 - if p.IsSetParamsList() { - l += bthrift.Binary.FieldBeginLength("params_list", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ParamsList)) - for _, v := range p.ParamsList { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetRuntimeFilterMergeAddr() { + l += bthrift.Binary.FieldBeginLength("runtime_filter_merge_addr", thrift.STRUCT, 13) + l += p.RuntimeFilterMergeAddr.BLength() l += bthrift.Binary.FieldEndLength() } return l diff --git a/pkg/rpc/kitex_gen/paloservice/PaloService.go b/pkg/rpc/kitex_gen/paloservice/PaloService.go index c2f54e8b..5b0602da 100644 --- a/pkg/rpc/kitex_gen/paloservice/PaloService.go +++ b/pkg/rpc/kitex_gen/paloservice/PaloService.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package paloservice diff --git a/pkg/rpc/kitex_gen/paloservice/k-PaloService.go b/pkg/rpc/kitex_gen/paloservice/k-PaloService.go index 14d45195..88ff1849 100644 --- a/pkg/rpc/kitex_gen/paloservice/k-PaloService.go +++ b/pkg/rpc/kitex_gen/paloservice/k-PaloService.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package paloservice @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status" ) diff --git a/pkg/rpc/kitex_gen/partitions/Partitions.go b/pkg/rpc/kitex_gen/partitions/Partitions.go index e95ec514..ec3a10c1 100644 --- a/pkg/rpc/kitex_gen/partitions/Partitions.go +++ b/pkg/rpc/kitex_gen/partitions/Partitions.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package partitions @@ -21,6 +21,9 @@ const ( TPartitionType_RANGE_PARTITIONED TPartitionType = 3 TPartitionType_LIST_PARTITIONED TPartitionType = 4 TPartitionType_BUCKET_SHFFULE_HASH_PARTITIONED TPartitionType = 5 + TPartitionType_TABLET_SINK_SHUFFLE_PARTITIONED TPartitionType = 6 + TPartitionType_TABLE_SINK_HASH_PARTITIONED TPartitionType = 7 + TPartitionType_TABLE_SINK_RANDOM_PARTITIONED TPartitionType = 8 ) func (p TPartitionType) String() string { @@ -37,6 +40,12 @@ func (p TPartitionType) String() string { return "LIST_PARTITIONED" case TPartitionType_BUCKET_SHFFULE_HASH_PARTITIONED: return "BUCKET_SHFFULE_HASH_PARTITIONED" + case TPartitionType_TABLET_SINK_SHUFFLE_PARTITIONED: + return "TABLET_SINK_SHUFFLE_PARTITIONED" + case TPartitionType_TABLE_SINK_HASH_PARTITIONED: + return "TABLE_SINK_HASH_PARTITIONED" + case TPartitionType_TABLE_SINK_RANDOM_PARTITIONED: + return "TABLE_SINK_RANDOM_PARTITIONED" } return "" } @@ -55,6 +64,12 @@ func TPartitionTypeFromString(s string) (TPartitionType, error) { return TPartitionType_LIST_PARTITIONED, nil case "BUCKET_SHFFULE_HASH_PARTITIONED": return TPartitionType_BUCKET_SHFFULE_HASH_PARTITIONED, nil + case "TABLET_SINK_SHUFFLE_PARTITIONED": + return TPartitionType_TABLET_SINK_SHUFFLE_PARTITIONED, nil + case "TABLE_SINK_HASH_PARTITIONED": + return TPartitionType_TABLE_SINK_HASH_PARTITIONED, nil + case "TABLE_SINK_RANDOM_PARTITIONED": + return TPartitionType_TABLE_SINK_RANDOM_PARTITIONED, nil } return TPartitionType(0), fmt.Errorf("not a valid TPartitionType string") } @@ -132,7 +147,6 @@ func NewTPartitionKey() *TPartitionKey { } func (p *TPartitionKey) InitDefault() { - *p = TPartitionKey{} } func (p *TPartitionKey) GetSign() (v int16) { @@ -206,37 +220,30 @@ func (p *TPartitionKey) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSign = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -268,30 +275,37 @@ RequiredFieldNotSetError: } func (p *TPartitionKey) ReadField1(iprot thrift.TProtocol) error { + + var _field int16 if v, err := iprot.ReadI16(); err != nil { return err } else { - p.Sign = v + _field = v } + p.Sign = _field return nil } - func (p *TPartitionKey) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TPrimitiveType(v) - p.Type = &tmp + _field = &tmp } + p.Type = _field return nil } - func (p *TPartitionKey) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Key = &v + _field = &v } + p.Key = _field return nil } @@ -313,7 +327,6 @@ func (p *TPartitionKey) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -392,6 +405,7 @@ func (p *TPartitionKey) String() string { return "" } return fmt.Sprintf("TPartitionKey(%+v)", *p) + } func (p *TPartitionKey) DeepEqual(ano *TPartitionKey) bool { @@ -456,7 +470,6 @@ func NewTPartitionRange() *TPartitionRange { } func (p *TPartitionRange) InitDefault() { - *p = TPartitionRange{} } var TPartitionRange_StartKey_DEFAULT *TPartitionKey @@ -541,10 +554,8 @@ func (p *TPartitionRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStartKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -552,10 +563,8 @@ func (p *TPartitionRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetEndKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { @@ -563,10 +572,8 @@ func (p *TPartitionRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIncludeStartKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { @@ -574,17 +581,14 @@ func (p *TPartitionRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIncludeEndKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -631,36 +635,41 @@ RequiredFieldNotSetError: } func (p *TPartitionRange) ReadField1(iprot thrift.TProtocol) error { - p.StartKey = NewTPartitionKey() - if err := p.StartKey.Read(iprot); err != nil { + _field := NewTPartitionKey() + if err := _field.Read(iprot); err != nil { return err } + p.StartKey = _field return nil } - func (p *TPartitionRange) ReadField2(iprot thrift.TProtocol) error { - p.EndKey = NewTPartitionKey() - if err := p.EndKey.Read(iprot); err != nil { + _field := NewTPartitionKey() + if err := _field.Read(iprot); err != nil { return err } + p.EndKey = _field return nil } - func (p *TPartitionRange) ReadField3(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IncludeStartKey = v + _field = v } + p.IncludeStartKey = _field return nil } - func (p *TPartitionRange) ReadField4(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IncludeEndKey = v + _field = v } + p.IncludeEndKey = _field return nil } @@ -686,7 +695,6 @@ func (p *TPartitionRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -778,6 +786,7 @@ func (p *TPartitionRange) String() string { return "" } return fmt.Sprintf("TPartitionRange(%+v)", *p) + } func (p *TPartitionRange) DeepEqual(ano *TPartitionRange) bool { @@ -842,7 +851,6 @@ func NewTRangePartition() *TRangePartition { } func (p *TRangePartition) InitDefault() { - *p = TRangePartition{} } func (p *TRangePartition) GetPartitionId() (v int64) { @@ -934,10 +942,8 @@ func (p *TRangePartition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPartitionId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -945,37 +951,30 @@ func (p *TRangePartition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRange = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1012,48 +1011,56 @@ RequiredFieldNotSetError: } func (p *TRangePartition) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionId = v + _field = v } + p.PartitionId = _field return nil } - func (p *TRangePartition) ReadField2(iprot thrift.TProtocol) error { - p.Range = NewTPartitionRange() - if err := p.Range.Read(iprot); err != nil { + _field := NewTPartitionRange() + if err := _field.Read(iprot); err != nil { return err } + p.Range = _field return nil } - func (p *TRangePartition) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DistributedExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.DistributedExprs = append(p.DistributedExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DistributedExprs = _field return nil } - func (p *TRangePartition) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DistributeBucket = &v + _field = &v } + p.DistributeBucket = _field return nil } @@ -1079,7 +1086,6 @@ func (p *TRangePartition) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1183,6 +1189,7 @@ func (p *TRangePartition) String() string { return "" } return fmt.Sprintf("TRangePartition(%+v)", *p) + } func (p *TRangePartition) DeepEqual(ano *TRangePartition) bool { @@ -1257,7 +1264,6 @@ func NewTDataPartition() *TDataPartition { } func (p *TDataPartition) InitDefault() { - *p = TDataPartition{} } func (p *TDataPartition) GetType() (v TPartitionType) { @@ -1331,37 +1337,30 @@ func (p *TDataPartition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1393,51 +1392,60 @@ RequiredFieldNotSetError: } func (p *TDataPartition) ReadField1(iprot thrift.TProtocol) error { + + var _field TPartitionType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TPartitionType(v) + _field = TPartitionType(v) } + p.Type = _field return nil } - func (p *TDataPartition) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionExprs = append(p.PartitionExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionExprs = _field return nil } - func (p *TDataPartition) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionInfos = make([]*TRangePartition, 0, size) + _field := make([]*TRangePartition, 0, size) + values := make([]TRangePartition, size) for i := 0; i < size; i++ { - _elem := NewTRangePartition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionInfos = append(p.PartitionInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionInfos = _field return nil } @@ -1459,7 +1467,6 @@ func (p *TDataPartition) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1554,6 +1561,7 @@ func (p *TDataPartition) String() string { return "" } return fmt.Sprintf("TDataPartition(%+v)", *p) + } func (p *TDataPartition) DeepEqual(ano *TDataPartition) bool { diff --git a/pkg/rpc/kitex_gen/partitions/k-Partitions.go b/pkg/rpc/kitex_gen/partitions/k-Partitions.go index 37e3ed7e..a4a794c6 100644 --- a/pkg/rpc/kitex_gen/partitions/k-Partitions.go +++ b/pkg/rpc/kitex_gen/partitions/k-Partitions.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package partitions @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" ) diff --git a/pkg/rpc/kitex_gen/planner/Planner.go b/pkg/rpc/kitex_gen/planner/Planner.go index 282b3a03..89dbe493 100644 --- a/pkg/rpc/kitex_gen/planner/Planner.go +++ b/pkg/rpc/kitex_gen/planner/Planner.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package planner @@ -9,16 +9,18 @@ import ( "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/partitions" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/plannodes" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/querycache" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" ) type TPlanFragment struct { - Plan *plannodes.TPlan `thrift:"plan,2,optional" frugal:"2,optional,plannodes.TPlan" json:"plan,omitempty"` - OutputExprs []*exprs.TExpr `thrift:"output_exprs,4,optional" frugal:"4,optional,list" json:"output_exprs,omitempty"` - OutputSink *datasinks.TDataSink `thrift:"output_sink,5,optional" frugal:"5,optional,datasinks.TDataSink" json:"output_sink,omitempty"` - Partition *partitions.TDataPartition `thrift:"partition,6,required" frugal:"6,required,partitions.TDataPartition" json:"partition"` - MinReservationBytes *int64 `thrift:"min_reservation_bytes,7,optional" frugal:"7,optional,i64" json:"min_reservation_bytes,omitempty"` - InitialReservationTotalClaims *int64 `thrift:"initial_reservation_total_claims,8,optional" frugal:"8,optional,i64" json:"initial_reservation_total_claims,omitempty"` + Plan *plannodes.TPlan `thrift:"plan,2,optional" frugal:"2,optional,plannodes.TPlan" json:"plan,omitempty"` + OutputExprs []*exprs.TExpr `thrift:"output_exprs,4,optional" frugal:"4,optional,list" json:"output_exprs,omitempty"` + OutputSink *datasinks.TDataSink `thrift:"output_sink,5,optional" frugal:"5,optional,datasinks.TDataSink" json:"output_sink,omitempty"` + Partition *partitions.TDataPartition `thrift:"partition,6,required" frugal:"6,required,partitions.TDataPartition" json:"partition"` + MinReservationBytes *int64 `thrift:"min_reservation_bytes,7,optional" frugal:"7,optional,i64" json:"min_reservation_bytes,omitempty"` + InitialReservationTotalClaims *int64 `thrift:"initial_reservation_total_claims,8,optional" frugal:"8,optional,i64" json:"initial_reservation_total_claims,omitempty"` + QueryCacheParam *querycache.TQueryCacheParam `thrift:"query_cache_param,9,optional" frugal:"9,optional,querycache.TQueryCacheParam" json:"query_cache_param,omitempty"` } func NewTPlanFragment() *TPlanFragment { @@ -26,7 +28,6 @@ func NewTPlanFragment() *TPlanFragment { } func (p *TPlanFragment) InitDefault() { - *p = TPlanFragment{} } var TPlanFragment_Plan_DEFAULT *plannodes.TPlan @@ -82,6 +83,15 @@ func (p *TPlanFragment) GetInitialReservationTotalClaims() (v int64) { } return *p.InitialReservationTotalClaims } + +var TPlanFragment_QueryCacheParam_DEFAULT *querycache.TQueryCacheParam + +func (p *TPlanFragment) GetQueryCacheParam() (v *querycache.TQueryCacheParam) { + if !p.IsSetQueryCacheParam() { + return TPlanFragment_QueryCacheParam_DEFAULT + } + return p.QueryCacheParam +} func (p *TPlanFragment) SetPlan(val *plannodes.TPlan) { p.Plan = val } @@ -100,6 +110,9 @@ func (p *TPlanFragment) SetMinReservationBytes(val *int64) { func (p *TPlanFragment) SetInitialReservationTotalClaims(val *int64) { p.InitialReservationTotalClaims = val } +func (p *TPlanFragment) SetQueryCacheParam(val *querycache.TQueryCacheParam) { + p.QueryCacheParam = val +} var fieldIDToName_TPlanFragment = map[int16]string{ 2: "plan", @@ -108,6 +121,7 @@ var fieldIDToName_TPlanFragment = map[int16]string{ 6: "partition", 7: "min_reservation_bytes", 8: "initial_reservation_total_claims", + 9: "query_cache_param", } func (p *TPlanFragment) IsSetPlan() bool { @@ -134,6 +148,10 @@ func (p *TPlanFragment) IsSetInitialReservationTotalClaims() bool { return p.InitialReservationTotalClaims != nil } +func (p *TPlanFragment) IsSetQueryCacheParam() bool { + return p.QueryCacheParam != nil +} + func (p *TPlanFragment) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -159,30 +177,24 @@ func (p *TPlanFragment) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { @@ -190,37 +202,38 @@ func (p *TPlanFragment) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPartition = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -252,64 +265,80 @@ RequiredFieldNotSetError: } func (p *TPlanFragment) ReadField2(iprot thrift.TProtocol) error { - p.Plan = plannodes.NewTPlan() - if err := p.Plan.Read(iprot); err != nil { + _field := plannodes.NewTPlan() + if err := _field.Read(iprot); err != nil { return err } + p.Plan = _field return nil } - func (p *TPlanFragment) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OutputExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.OutputExprs = append(p.OutputExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OutputExprs = _field return nil } - func (p *TPlanFragment) ReadField5(iprot thrift.TProtocol) error { - p.OutputSink = datasinks.NewTDataSink() - if err := p.OutputSink.Read(iprot); err != nil { + _field := datasinks.NewTDataSink() + if err := _field.Read(iprot); err != nil { return err } + p.OutputSink = _field return nil } - func (p *TPlanFragment) ReadField6(iprot thrift.TProtocol) error { - p.Partition = partitions.NewTDataPartition() - if err := p.Partition.Read(iprot); err != nil { + _field := partitions.NewTDataPartition() + if err := _field.Read(iprot); err != nil { return err } + p.Partition = _field return nil } - func (p *TPlanFragment) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MinReservationBytes = &v + _field = &v } + p.MinReservationBytes = _field return nil } - func (p *TPlanFragment) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InitialReservationTotalClaims = &v + _field = &v } + p.InitialReservationTotalClaims = _field + return nil +} +func (p *TPlanFragment) ReadField9(iprot thrift.TProtocol) error { + _field := querycache.NewTQueryCacheParam() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryCacheParam = _field return nil } @@ -343,7 +372,10 @@ func (p *TPlanFragment) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -482,11 +514,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } +func (p *TPlanFragment) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryCacheParam() { + if err = oprot.WriteFieldBegin("query_cache_param", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryCacheParam.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + func (p *TPlanFragment) String() string { if p == nil { return "" } return fmt.Sprintf("TPlanFragment(%+v)", *p) + } func (p *TPlanFragment) DeepEqual(ano *TPlanFragment) bool { @@ -513,6 +565,9 @@ func (p *TPlanFragment) DeepEqual(ano *TPlanFragment) bool { if !p.Field8DeepEqual(ano.InitialReservationTotalClaims) { return false } + if !p.Field9DeepEqual(ano.QueryCacheParam) { + return false + } return true } @@ -574,6 +629,13 @@ func (p *TPlanFragment) Field8DeepEqual(src *int64) bool { } return true } +func (p *TPlanFragment) Field9DeepEqual(src *querycache.TQueryCacheParam) bool { + + if !p.QueryCacheParam.DeepEqual(src) { + return false + } + return true +} type TScanRangeLocation struct { Server *types.TNetworkAddress `thrift:"server,1,required" frugal:"1,required,types.TNetworkAddress" json:"server"` @@ -589,10 +651,7 @@ func NewTScanRangeLocation() *TScanRangeLocation { } func (p *TScanRangeLocation) InitDefault() { - *p = TScanRangeLocation{ - - VolumeId: -1, - } + p.VolumeId = -1 } var TScanRangeLocation_Server_DEFAULT *types.TNetworkAddress @@ -675,37 +734,30 @@ func (p *TScanRangeLocation) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetServer = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -737,28 +789,33 @@ RequiredFieldNotSetError: } func (p *TScanRangeLocation) ReadField1(iprot thrift.TProtocol) error { - p.Server = types.NewTNetworkAddress() - if err := p.Server.Read(iprot); err != nil { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { return err } + p.Server = _field return nil } - func (p *TScanRangeLocation) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VolumeId = v + _field = v } + p.VolumeId = _field return nil } - func (p *TScanRangeLocation) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BackendId = &v + _field = &v } + p.BackendId = _field return nil } @@ -780,7 +837,6 @@ func (p *TScanRangeLocation) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -859,6 +915,7 @@ func (p *TScanRangeLocation) String() string { return "" } return fmt.Sprintf("TScanRangeLocation(%+v)", *p) + } func (p *TScanRangeLocation) DeepEqual(ano *TScanRangeLocation) bool { @@ -916,7 +973,6 @@ func NewTScanRangeLocations() *TScanRangeLocations { } func (p *TScanRangeLocations) InitDefault() { - *p = TScanRangeLocations{} } var TScanRangeLocations_ScanRange_DEFAULT *plannodes.TScanRange @@ -973,27 +1029,22 @@ func (p *TScanRangeLocations) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetScanRange = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1025,30 +1076,34 @@ RequiredFieldNotSetError: } func (p *TScanRangeLocations) ReadField1(iprot thrift.TProtocol) error { - p.ScanRange = plannodes.NewTScanRange() - if err := p.ScanRange.Read(iprot); err != nil { + _field := plannodes.NewTScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.ScanRange = _field return nil } - func (p *TScanRangeLocations) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Locations = make([]*TScanRangeLocation, 0, size) + _field := make([]*TScanRangeLocation, 0, size) + values := make([]TScanRangeLocation, size) for i := 0; i < size; i++ { - _elem := NewTScanRangeLocation() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Locations = append(p.Locations, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Locations = _field return nil } @@ -1066,7 +1121,6 @@ func (p *TScanRangeLocations) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1132,6 +1186,7 @@ func (p *TScanRangeLocations) String() string { return "" } return fmt.Sprintf("TScanRangeLocations(%+v)", *p) + } func (p *TScanRangeLocations) DeepEqual(ano *TScanRangeLocations) bool { diff --git a/pkg/rpc/kitex_gen/planner/k-Planner.go b/pkg/rpc/kitex_gen/planner/k-Planner.go index 093b9181..7e85e2c9 100644 --- a/pkg/rpc/kitex_gen/planner/k-Planner.go +++ b/pkg/rpc/kitex_gen/planner/k-Planner.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package planner @@ -11,10 +11,12 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/datasinks" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/partitions" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/plannodes" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/querycache" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types" ) @@ -30,6 +32,7 @@ var ( _ = exprs.KitexUnusedProtection _ = partitions.KitexUnusedProtection _ = plannodes.KitexUnusedProtection + _ = querycache.KitexUnusedProtection _ = types.KitexUnusedProtection ) @@ -141,6 +144,20 @@ func (p *TPlanFragment) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -274,6 +291,19 @@ func (p *TPlanFragment) FastReadField8(buf []byte) (int, error) { return offset, nil } +func (p *TPlanFragment) FastReadField9(buf []byte) (int, error) { + offset := 0 + + tmp := querycache.NewTQueryCacheParam() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryCacheParam = tmp + return offset, nil +} + // for compatibility func (p *TPlanFragment) FastWrite(buf []byte) int { return 0 @@ -289,6 +319,7 @@ func (p *TPlanFragment) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -305,6 +336,7 @@ func (p *TPlanFragment) BLength() int { l += p.field6Length() l += p.field7Length() l += p.field8Length() + l += p.field9Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -379,6 +411,16 @@ func (p *TPlanFragment) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryW return offset } +func (p *TPlanFragment) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryCacheParam() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_cache_param", thrift.STRUCT, 9) + offset += p.QueryCacheParam.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPlanFragment) field2Length() int { l := 0 if p.IsSetPlan() { @@ -443,6 +485,16 @@ func (p *TPlanFragment) field8Length() int { return l } +func (p *TPlanFragment) field9Length() int { + l := 0 + if p.IsSetQueryCacheParam() { + l += bthrift.Binary.FieldBeginLength("query_cache_param", thrift.STRUCT, 9) + l += p.QueryCacheParam.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TScanRangeLocation) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/plannodes/PlanNodes.go b/pkg/rpc/kitex_gen/plannodes/PlanNodes.go index a4d3a459..d542dbc3 100644 --- a/pkg/rpc/kitex_gen/plannodes/PlanNodes.go +++ b/pkg/rpc/kitex_gen/plannodes/PlanNodes.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package plannodes @@ -337,6 +337,7 @@ const ( TFileFormatType_FORMAT_CSV_LZ4BLOCK TFileFormatType = 13 TFileFormatType_FORMAT_CSV_SNAPPYBLOCK TFileFormatType = 14 TFileFormatType_FORMAT_WAL TFileFormatType = 15 + TFileFormatType_FORMAT_ARROW TFileFormatType = 16 ) func (p TFileFormatType) String() string { @@ -375,6 +376,8 @@ func (p TFileFormatType) String() string { return "FORMAT_CSV_SNAPPYBLOCK" case TFileFormatType_FORMAT_WAL: return "FORMAT_WAL" + case TFileFormatType_FORMAT_ARROW: + return "FORMAT_ARROW" } return "" } @@ -415,6 +418,8 @@ func TFileFormatTypeFromString(s string) (TFileFormatType, error) { return TFileFormatType_FORMAT_CSV_SNAPPYBLOCK, nil case "FORMAT_WAL": return TFileFormatType_FORMAT_WAL, nil + case "FORMAT_ARROW": + return TFileFormatType_FORMAT_ARROW, nil } return TFileFormatType(0), fmt.Errorf("not a valid TFileFormatType string") } @@ -447,6 +452,8 @@ const ( TFileCompressType_LZOP TFileCompressType = 7 TFileCompressType_LZ4BLOCK TFileCompressType = 8 TFileCompressType_SNAPPYBLOCK TFileCompressType = 9 + TFileCompressType_ZLIB TFileCompressType = 10 + TFileCompressType_ZSTD TFileCompressType = 11 ) func (p TFileCompressType) String() string { @@ -471,6 +478,10 @@ func (p TFileCompressType) String() string { return "LZ4BLOCK" case TFileCompressType_SNAPPYBLOCK: return "SNAPPYBLOCK" + case TFileCompressType_ZLIB: + return "ZLIB" + case TFileCompressType_ZSTD: + return "ZSTD" } return "" } @@ -497,6 +508,10 @@ func TFileCompressTypeFromString(s string) (TFileCompressType, error) { return TFileCompressType_LZ4BLOCK, nil case "SNAPPYBLOCK": return TFileCompressType_SNAPPYBLOCK, nil + case "ZLIB": + return TFileCompressType_ZLIB, nil + case "ZSTD": + return TFileCompressType_ZSTD, nil } return TFileCompressType(0), fmt.Errorf("not a valid TFileCompressType string") } @@ -666,6 +681,7 @@ const ( TJoinOp_LEFT_ANTI_JOIN TJoinOp = 8 TJoinOp_RIGHT_ANTI_JOIN TJoinOp = 9 TJoinOp_NULL_AWARE_LEFT_ANTI_JOIN TJoinOp = 10 + TJoinOp_NULL_AWARE_LEFT_SEMI_JOIN TJoinOp = 11 ) func (p TJoinOp) String() string { @@ -692,6 +708,8 @@ func (p TJoinOp) String() string { return "RIGHT_ANTI_JOIN" case TJoinOp_NULL_AWARE_LEFT_ANTI_JOIN: return "NULL_AWARE_LEFT_ANTI_JOIN" + case TJoinOp_NULL_AWARE_LEFT_SEMI_JOIN: + return "NULL_AWARE_LEFT_SEMI_JOIN" } return "" } @@ -720,6 +738,8 @@ func TJoinOpFromString(s string) (TJoinOp, error) { return TJoinOp_RIGHT_ANTI_JOIN, nil case "NULL_AWARE_LEFT_ANTI_JOIN": return TJoinOp_NULL_AWARE_LEFT_ANTI_JOIN, nil + case "NULL_AWARE_LEFT_SEMI_JOIN": + return TJoinOp_NULL_AWARE_LEFT_SEMI_JOIN, nil } return TJoinOp(0), fmt.Errorf("not a valid TJoinOp string") } @@ -739,6 +759,63 @@ func (p *TJoinOp) Value() (driver.Value, error) { return int64(*p), nil } +type TJoinDistributionType int64 + +const ( + TJoinDistributionType_NONE TJoinDistributionType = 0 + TJoinDistributionType_BROADCAST TJoinDistributionType = 1 + TJoinDistributionType_PARTITIONED TJoinDistributionType = 2 + TJoinDistributionType_BUCKET_SHUFFLE TJoinDistributionType = 3 + TJoinDistributionType_COLOCATE TJoinDistributionType = 4 +) + +func (p TJoinDistributionType) String() string { + switch p { + case TJoinDistributionType_NONE: + return "NONE" + case TJoinDistributionType_BROADCAST: + return "BROADCAST" + case TJoinDistributionType_PARTITIONED: + return "PARTITIONED" + case TJoinDistributionType_BUCKET_SHUFFLE: + return "BUCKET_SHUFFLE" + case TJoinDistributionType_COLOCATE: + return "COLOCATE" + } + return "" +} + +func TJoinDistributionTypeFromString(s string) (TJoinDistributionType, error) { + switch s { + case "NONE": + return TJoinDistributionType_NONE, nil + case "BROADCAST": + return TJoinDistributionType_BROADCAST, nil + case "PARTITIONED": + return TJoinDistributionType_PARTITIONED, nil + case "BUCKET_SHUFFLE": + return TJoinDistributionType_BUCKET_SHUFFLE, nil + case "COLOCATE": + return TJoinDistributionType_COLOCATE, nil + } + return TJoinDistributionType(0), fmt.Errorf("not a valid TJoinDistributionType string") +} + +func TJoinDistributionTypePtr(v TJoinDistributionType) *TJoinDistributionType { return &v } +func (p *TJoinDistributionType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TJoinDistributionType(result.Int64) + return +} + +func (p *TJoinDistributionType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TAggregationOp int64 const ( @@ -876,6 +953,53 @@ func (p *TAggregationOp) Value() (driver.Value, error) { return int64(*p), nil } +type TSortAlgorithm int64 + +const ( + TSortAlgorithm_HEAP_SORT TSortAlgorithm = 0 + TSortAlgorithm_TOPN_SORT TSortAlgorithm = 1 + TSortAlgorithm_FULL_SORT TSortAlgorithm = 2 +) + +func (p TSortAlgorithm) String() string { + switch p { + case TSortAlgorithm_HEAP_SORT: + return "HEAP_SORT" + case TSortAlgorithm_TOPN_SORT: + return "TOPN_SORT" + case TSortAlgorithm_FULL_SORT: + return "FULL_SORT" + } + return "" +} + +func TSortAlgorithmFromString(s string) (TSortAlgorithm, error) { + switch s { + case "HEAP_SORT": + return TSortAlgorithm_HEAP_SORT, nil + case "TOPN_SORT": + return TSortAlgorithm_TOPN_SORT, nil + case "FULL_SORT": + return TSortAlgorithm_FULL_SORT, nil + } + return TSortAlgorithm(0), fmt.Errorf("not a valid TSortAlgorithm string") +} + +func TSortAlgorithmPtr(v TSortAlgorithm) *TSortAlgorithm { return &v } +func (p *TSortAlgorithm) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TSortAlgorithm(result.Int64) + return +} + +func (p *TSortAlgorithm) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TopNAlgorithm int64 const ( @@ -923,6 +1047,58 @@ func (p *TopNAlgorithm) Value() (driver.Value, error) { return int64(*p), nil } +type TPartTopNPhase int64 + +const ( + TPartTopNPhase_UNKNOWN TPartTopNPhase = 0 + TPartTopNPhase_ONE_PHASE_GLOBAL TPartTopNPhase = 1 + TPartTopNPhase_TWO_PHASE_LOCAL TPartTopNPhase = 2 + TPartTopNPhase_TWO_PHASE_GLOBAL TPartTopNPhase = 3 +) + +func (p TPartTopNPhase) String() string { + switch p { + case TPartTopNPhase_UNKNOWN: + return "UNKNOWN" + case TPartTopNPhase_ONE_PHASE_GLOBAL: + return "ONE_PHASE_GLOBAL" + case TPartTopNPhase_TWO_PHASE_LOCAL: + return "TWO_PHASE_LOCAL" + case TPartTopNPhase_TWO_PHASE_GLOBAL: + return "TWO_PHASE_GLOBAL" + } + return "" +} + +func TPartTopNPhaseFromString(s string) (TPartTopNPhase, error) { + switch s { + case "UNKNOWN": + return TPartTopNPhase_UNKNOWN, nil + case "ONE_PHASE_GLOBAL": + return TPartTopNPhase_ONE_PHASE_GLOBAL, nil + case "TWO_PHASE_LOCAL": + return TPartTopNPhase_TWO_PHASE_LOCAL, nil + case "TWO_PHASE_GLOBAL": + return TPartTopNPhase_TWO_PHASE_GLOBAL, nil + } + return TPartTopNPhase(0), fmt.Errorf("not a valid TPartTopNPhase string") +} + +func TPartTopNPhasePtr(v TPartTopNPhase) *TPartTopNPhase { return &v } +func (p *TPartTopNPhase) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TPartTopNPhase(result.Int64) + return +} + +func (p *TPartTopNPhase) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TAnalyticWindowType int64 const ( @@ -1133,6 +1309,53 @@ func (p *TRuntimeFilterType) Value() (driver.Value, error) { return int64(*p), nil } +type TMinMaxRuntimeFilterType int64 + +const ( + TMinMaxRuntimeFilterType_MIN TMinMaxRuntimeFilterType = 1 + TMinMaxRuntimeFilterType_MAX TMinMaxRuntimeFilterType = 2 + TMinMaxRuntimeFilterType_MIN_MAX TMinMaxRuntimeFilterType = 4 +) + +func (p TMinMaxRuntimeFilterType) String() string { + switch p { + case TMinMaxRuntimeFilterType_MIN: + return "MIN" + case TMinMaxRuntimeFilterType_MAX: + return "MAX" + case TMinMaxRuntimeFilterType_MIN_MAX: + return "MIN_MAX" + } + return "" +} + +func TMinMaxRuntimeFilterTypeFromString(s string) (TMinMaxRuntimeFilterType, error) { + switch s { + case "MIN": + return TMinMaxRuntimeFilterType_MIN, nil + case "MAX": + return TMinMaxRuntimeFilterType_MAX, nil + case "MIN_MAX": + return TMinMaxRuntimeFilterType_MIN_MAX, nil + } + return TMinMaxRuntimeFilterType(0), fmt.Errorf("not a valid TMinMaxRuntimeFilterType string") +} + +func TMinMaxRuntimeFilterTypePtr(v TMinMaxRuntimeFilterType) *TMinMaxRuntimeFilterType { return &v } +func (p *TMinMaxRuntimeFilterType) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TMinMaxRuntimeFilterType(result.Int64) + return +} + +func (p *TMinMaxRuntimeFilterType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TKeyRange struct { BeginKey int64 `thrift:"begin_key,1,required" frugal:"1,required,i64" json:"begin_key"` EndKey int64 `thrift:"end_key,2,required" frugal:"2,required,i64" json:"end_key"` @@ -1145,7 +1368,6 @@ func NewTKeyRange() *TKeyRange { } func (p *TKeyRange) InitDefault() { - *p = TKeyRange{} } func (p *TKeyRange) GetBeginKey() (v int64) { @@ -1212,10 +1434,8 @@ func (p *TKeyRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBeginKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -1223,10 +1443,8 @@ func (p *TKeyRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetEndKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -1234,10 +1452,8 @@ func (p *TKeyRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -1245,17 +1461,14 @@ func (p *TKeyRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1302,38 +1515,47 @@ RequiredFieldNotSetError: } func (p *TKeyRange) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BeginKey = v + _field = v } + p.BeginKey = _field return nil } - func (p *TKeyRange) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.EndKey = v + _field = v } + p.EndKey = _field return nil } - func (p *TKeyRange) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnType = types.TPrimitiveType(v) + _field = types.TPrimitiveType(v) } + p.ColumnType = _field return nil } - func (p *TKeyRange) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnName = v + _field = v } + p.ColumnName = _field return nil } @@ -1359,7 +1581,6 @@ func (p *TKeyRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1451,6 +1672,7 @@ func (p *TKeyRange) String() string { return "" } return fmt.Sprintf("TKeyRange(%+v)", *p) + } func (p *TKeyRange) DeepEqual(ano *TKeyRange) bool { @@ -1520,7 +1742,6 @@ func NewTPaloScanRange() *TPaloScanRange { } func (p *TPaloScanRange) InitDefault() { - *p = TPaloScanRange{} } func (p *TPaloScanRange) GetHosts() (v []*types.TNetworkAddress) { @@ -1656,10 +1877,8 @@ func (p *TPaloScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHosts = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -1667,10 +1886,8 @@ func (p *TPaloScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSchemaHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { @@ -1678,10 +1895,8 @@ func (p *TPaloScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersion = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -1689,10 +1904,8 @@ func (p *TPaloScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetVersionHash = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -1700,10 +1913,8 @@ func (p *TPaloScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { @@ -1711,47 +1922,38 @@ func (p *TPaloScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDbName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1812,101 +2014,122 @@ func (p *TPaloScanRange) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Hosts = make([]*types.TNetworkAddress, 0, size) + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Hosts = append(p.Hosts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Hosts = _field return nil } - func (p *TPaloScanRange) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SchemaHash = v + _field = v } + p.SchemaHash = _field return nil } - func (p *TPaloScanRange) ReadField3(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Version = v + _field = v } + p.Version = _field return nil } - func (p *TPaloScanRange) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.VersionHash = v + _field = v } + p.VersionHash = _field return nil } - func (p *TPaloScanRange) ReadField5(iprot thrift.TProtocol) error { + + var _field types.TTabletId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TPaloScanRange) ReadField6(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = v + _field = v } + p.DbName = _field return nil } - func (p *TPaloScanRange) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionColumnRanges = make([]*TKeyRange, 0, size) + _field := make([]*TKeyRange, 0, size) + values := make([]TKeyRange, size) for i := 0; i < size; i++ { - _elem := NewTKeyRange() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionColumnRanges = append(p.PartitionColumnRanges, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionColumnRanges = _field return nil } - func (p *TPaloScanRange) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.IndexName = &v + _field = &v } + p.IndexName = _field return nil } - func (p *TPaloScanRange) ReadField9(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } @@ -1952,7 +2175,6 @@ func (p *TPaloScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2151,6 +2373,7 @@ func (p *TPaloScanRange) String() string { return "" } return fmt.Sprintf("TPaloScanRange(%+v)", *p) + } func (p *TPaloScanRange) DeepEqual(ano *TPaloScanRange) bool { @@ -2285,7 +2508,6 @@ func NewTHdfsConf() *THdfsConf { } func (p *THdfsConf) InitDefault() { - *p = THdfsConf{} } func (p *THdfsConf) GetKey() (v string) { @@ -2334,10 +2556,8 @@ func (p *THdfsConf) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetKey = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -2345,17 +2565,14 @@ func (p *THdfsConf) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2392,20 +2609,25 @@ RequiredFieldNotSetError: } func (p *THdfsConf) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Key = v + _field = v } + p.Key = _field return nil } - func (p *THdfsConf) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Value = v + _field = v } + p.Value = _field return nil } @@ -2423,7 +2645,6 @@ func (p *THdfsConf) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2481,6 +2702,7 @@ func (p *THdfsConf) String() string { return "" } return fmt.Sprintf("THdfsConf(%+v)", *p) + } func (p *THdfsConf) DeepEqual(ano *THdfsConf) bool { @@ -2519,6 +2741,7 @@ type THdfsParams struct { HdfsKerberosPrincipal *string `thrift:"hdfs_kerberos_principal,3,optional" frugal:"3,optional,string" json:"hdfs_kerberos_principal,omitempty"` HdfsKerberosKeytab *string `thrift:"hdfs_kerberos_keytab,4,optional" frugal:"4,optional,string" json:"hdfs_kerberos_keytab,omitempty"` HdfsConf []*THdfsConf `thrift:"hdfs_conf,5,optional" frugal:"5,optional,list" json:"hdfs_conf,omitempty"` + RootPath *string `thrift:"root_path,6,optional" frugal:"6,optional,string" json:"root_path,omitempty"` } func NewTHdfsParams() *THdfsParams { @@ -2526,7 +2749,6 @@ func NewTHdfsParams() *THdfsParams { } func (p *THdfsParams) InitDefault() { - *p = THdfsParams{} } var THdfsParams_FsName_DEFAULT string @@ -2573,6 +2795,15 @@ func (p *THdfsParams) GetHdfsConf() (v []*THdfsConf) { } return p.HdfsConf } + +var THdfsParams_RootPath_DEFAULT string + +func (p *THdfsParams) GetRootPath() (v string) { + if !p.IsSetRootPath() { + return THdfsParams_RootPath_DEFAULT + } + return *p.RootPath +} func (p *THdfsParams) SetFsName(val *string) { p.FsName = val } @@ -2588,6 +2819,9 @@ func (p *THdfsParams) SetHdfsKerberosKeytab(val *string) { func (p *THdfsParams) SetHdfsConf(val []*THdfsConf) { p.HdfsConf = val } +func (p *THdfsParams) SetRootPath(val *string) { + p.RootPath = val +} var fieldIDToName_THdfsParams = map[int16]string{ 1: "fs_name", @@ -2595,6 +2829,7 @@ var fieldIDToName_THdfsParams = map[int16]string{ 3: "hdfs_kerberos_principal", 4: "hdfs_kerberos_keytab", 5: "hdfs_conf", + 6: "root_path", } func (p *THdfsParams) IsSetFsName() bool { @@ -2617,6 +2852,10 @@ func (p *THdfsParams) IsSetHdfsConf() bool { return p.HdfsConf != nil } +func (p *THdfsParams) IsSetRootPath() bool { + return p.RootPath != nil +} + func (p *THdfsParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -2641,57 +2880,54 @@ func (p *THdfsParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2717,58 +2953,81 @@ ReadStructEndError: } func (p *THdfsParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FsName = &v + _field = &v } + p.FsName = _field return nil } - func (p *THdfsParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *THdfsParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HdfsKerberosPrincipal = &v + _field = &v } + p.HdfsKerberosPrincipal = _field return nil } - func (p *THdfsParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HdfsKerberosKeytab = &v + _field = &v } + p.HdfsKerberosKeytab = _field return nil } - func (p *THdfsParams) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.HdfsConf = make([]*THdfsConf, 0, size) + _field := make([]*THdfsConf, 0, size) + values := make([]THdfsConf, size) for i := 0; i < size; i++ { - _elem := NewTHdfsConf() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.HdfsConf = append(p.HdfsConf, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.HdfsConf = _field + return nil +} +func (p *THdfsParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.RootPath = _field return nil } @@ -2798,7 +3057,10 @@ func (p *THdfsParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2920,11 +3182,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *THdfsParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetRootPath() { + if err = oprot.WriteFieldBegin("root_path", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.RootPath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + func (p *THdfsParams) String() string { if p == nil { return "" } return fmt.Sprintf("THdfsParams(%+v)", *p) + } func (p *THdfsParams) DeepEqual(ano *THdfsParams) bool { @@ -2948,6 +3230,9 @@ func (p *THdfsParams) DeepEqual(ano *THdfsParams) bool { if !p.Field5DeepEqual(ano.HdfsConf) { return false } + if !p.Field6DeepEqual(ano.RootPath) { + return false + } return true } @@ -3012,6 +3297,18 @@ func (p *THdfsParams) Field5DeepEqual(src []*THdfsConf) bool { } return true } +func (p *THdfsParams) Field6DeepEqual(src *string) bool { + + if p.RootPath == src { + return true + } else if p.RootPath == nil || src == nil { + return false + } + if strings.Compare(*p.RootPath, *src) != 0 { + return false + } + return true +} type TBrokerRangeDesc struct { FileType types.TFileType `thrift:"file_type,1,required" frugal:"1,required,TFileType" json:"file_type"` @@ -3041,7 +3338,6 @@ func NewTBrokerRangeDesc() *TBrokerRangeDesc { } func (p *TBrokerRangeDesc) InitDefault() { - *p = TBrokerRangeDesc{} } func (p *TBrokerRangeDesc) GetFileType() (v types.TFileType) { @@ -3364,10 +3660,8 @@ func (p *TBrokerRangeDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFileType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -3375,10 +3669,8 @@ func (p *TBrokerRangeDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFormatType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { @@ -3386,10 +3678,8 @@ func (p *TBrokerRangeDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSplittable = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { @@ -3397,10 +3687,8 @@ func (p *TBrokerRangeDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPath = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -3408,10 +3696,8 @@ func (p *TBrokerRangeDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStartOffset = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { @@ -3419,157 +3705,126 @@ func (p *TBrokerRangeDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSize = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I32 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.LIST { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.BOOL { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRING { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRING { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.BOOL { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.BOOL { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.STRUCT { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.BOOL { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.BOOL { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.STRING { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.I32 { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3626,92 +3881,109 @@ RequiredFieldNotSetError: } func (p *TBrokerRangeDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TFileType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FileType = types.TFileType(v) + _field = types.TFileType(v) } + p.FileType = _field return nil } - func (p *TBrokerRangeDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field TFileFormatType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FormatType = TFileFormatType(v) + _field = TFileFormatType(v) } + p.FormatType = _field return nil } - func (p *TBrokerRangeDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Splittable = v + _field = v } + p.Splittable = _field return nil } - func (p *TBrokerRangeDesc) ReadField4(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Path = v + _field = v } + p.Path = _field return nil } - func (p *TBrokerRangeDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.StartOffset = v + _field = v } + p.StartOffset = _field return nil } - func (p *TBrokerRangeDesc) ReadField6(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Size = v + _field = v } + p.Size = _field return nil } - func (p *TBrokerRangeDesc) ReadField7(iprot thrift.TProtocol) error { - p.LoadId = types.NewTUniqueId() - if err := p.LoadId.Read(iprot); err != nil { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { return err } + p.LoadId = _field return nil } - func (p *TBrokerRangeDesc) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FileSize = &v + _field = &v } + p.FileSize = _field return nil } - func (p *TBrokerRangeDesc) ReadField9(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumOfColumnsFromFile = &v + _field = &v } + p.NumOfColumnsFromFile = _field return nil } - func (p *TBrokerRangeDesc) ReadField10(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnsFromPath = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -3719,100 +3991,119 @@ func (p *TBrokerRangeDesc) ReadField10(iprot thrift.TProtocol) error { _elem = v } - p.ColumnsFromPath = append(p.ColumnsFromPath, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnsFromPath = _field return nil } - func (p *TBrokerRangeDesc) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.StripOuterArray = &v + _field = &v } + p.StripOuterArray = _field return nil } - func (p *TBrokerRangeDesc) ReadField12(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Jsonpaths = &v + _field = &v } + p.Jsonpaths = _field return nil } - func (p *TBrokerRangeDesc) ReadField13(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JsonRoot = &v + _field = &v } + p.JsonRoot = _field return nil } - func (p *TBrokerRangeDesc) ReadField14(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NumAsString = &v + _field = &v } + p.NumAsString = _field return nil } - func (p *TBrokerRangeDesc) ReadField15(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.FuzzyParse = &v + _field = &v } + p.FuzzyParse = _field return nil } - func (p *TBrokerRangeDesc) ReadField16(iprot thrift.TProtocol) error { - p.HdfsParams = NewTHdfsParams() - if err := p.HdfsParams.Read(iprot); err != nil { + _field := NewTHdfsParams() + if err := _field.Read(iprot); err != nil { return err } + p.HdfsParams = _field return nil } - func (p *TBrokerRangeDesc) ReadField17(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ReadJsonByLine = &v + _field = &v } + p.ReadJsonByLine = _field return nil } - func (p *TBrokerRangeDesc) ReadField18(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ReadByColumnDef = &v + _field = &v } + p.ReadByColumnDef = _field return nil } - func (p *TBrokerRangeDesc) ReadField19(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HeaderType = &v + _field = &v } + p.HeaderType = _field return nil } - func (p *TBrokerRangeDesc) ReadField20(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SkipLines = &v + _field = &v } + p.SkipLines = _field return nil } @@ -3902,7 +4193,6 @@ func (p *TBrokerRangeDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 20 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4302,6 +4592,7 @@ func (p *TBrokerRangeDesc) String() string { return "" } return fmt.Sprintf("TBrokerRangeDesc(%+v)", *p) + } func (p *TBrokerRangeDesc) DeepEqual(ano *TBrokerRangeDesc) bool { @@ -4602,11 +4893,8 @@ func NewTBrokerScanRangeParams() *TBrokerScanRangeParams { } func (p *TBrokerScanRangeParams) InitDefault() { - *p = TBrokerScanRangeParams{ - - ColumnSeparatorLength: 1, - LineDelimiterLength: 1, - } + p.ColumnSeparatorLength = 1 + p.LineDelimiterLength = 1 } func (p *TBrokerScanRangeParams) GetColumnSeparator() (v int8) { @@ -4852,10 +5140,8 @@ func (p *TBrokerScanRangeParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnSeparator = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BYTE { @@ -4863,10 +5149,8 @@ func (p *TBrokerScanRangeParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLineDelimiter = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -4874,10 +5158,8 @@ func (p *TBrokerScanRangeParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSrcTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { @@ -4885,10 +5167,8 @@ func (p *TBrokerScanRangeParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSrcSlotIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { @@ -4896,117 +5176,94 @@ func (p *TBrokerScanRangeParams) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDestTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.MAP { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.MAP { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.MAP { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I32 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRING { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRING { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.BOOL { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5058,39 +5315,46 @@ RequiredFieldNotSetError: } func (p *TBrokerScanRangeParams) ReadField1(iprot thrift.TProtocol) error { + + var _field int8 if v, err := iprot.ReadByte(); err != nil { return err } else { - p.ColumnSeparator = v + _field = v } + p.ColumnSeparator = _field return nil } - func (p *TBrokerScanRangeParams) ReadField2(iprot thrift.TProtocol) error { + + var _field int8 if v, err := iprot.ReadByte(); err != nil { return err } else { - p.LineDelimiter = v + _field = v } + p.LineDelimiter = _field return nil } - func (p *TBrokerScanRangeParams) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SrcTupleId = v + _field = v } + p.SrcTupleId = _field return nil } - func (p *TBrokerScanRangeParams) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SrcSlotIds = make([]types.TSlotId, 0, size) + _field := make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { + var _elem types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err @@ -5098,29 +5362,32 @@ func (p *TBrokerScanRangeParams) ReadField4(iprot thrift.TProtocol) error { _elem = v } - p.SrcSlotIds = append(p.SrcSlotIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SrcSlotIds = _field return nil } - func (p *TBrokerScanRangeParams) ReadField5(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DestTupleId = v + _field = v } + p.DestTupleId = _field return nil } - func (p *TBrokerScanRangeParams) ReadField6(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.ExprOfDestSlot = make(map[types.TSlotId]*exprs.TExpr, size) + _field := make(map[types.TSlotId]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { var _key types.TSlotId if v, err := iprot.ReadI32(); err != nil { @@ -5128,25 +5395,27 @@ func (p *TBrokerScanRangeParams) ReadField6(iprot thrift.TProtocol) error { } else { _key = v } - _val := exprs.NewTExpr() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.ExprOfDestSlot[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ExprOfDestSlot = _field return nil } - func (p *TBrokerScanRangeParams) ReadField7(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -5162,21 +5431,22 @@ func (p *TBrokerScanRangeParams) ReadField7(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } - func (p *TBrokerScanRangeParams) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionIds = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -5184,20 +5454,20 @@ func (p *TBrokerScanRangeParams) ReadField8(iprot thrift.TProtocol) error { _elem = v } - p.PartitionIds = append(p.PartitionIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionIds = _field return nil } - func (p *TBrokerScanRangeParams) ReadField9(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.DestSidToSrcSidWithoutTrans = make(map[types.TSlotId]types.TSlotId, size) + _field := make(map[types.TSlotId]types.TSlotId, size) for i := 0; i < size; i++ { var _key types.TSlotId if v, err := iprot.ReadI32(); err != nil { @@ -5213,65 +5483,78 @@ func (p *TBrokerScanRangeParams) ReadField9(iprot thrift.TProtocol) error { _val = v } - p.DestSidToSrcSidWithoutTrans[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.DestSidToSrcSidWithoutTrans = _field return nil } - func (p *TBrokerScanRangeParams) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.StrictMode = &v + _field = &v } + p.StrictMode = _field return nil } - func (p *TBrokerScanRangeParams) ReadField11(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ColumnSeparatorLength = v + _field = v } + p.ColumnSeparatorLength = _field return nil } - func (p *TBrokerScanRangeParams) ReadField12(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.LineDelimiterLength = v + _field = v } + p.LineDelimiterLength = _field return nil } - func (p *TBrokerScanRangeParams) ReadField13(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnSeparatorStr = &v + _field = &v } + p.ColumnSeparatorStr = _field return nil } - func (p *TBrokerScanRangeParams) ReadField14(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LineDelimiterStr = &v + _field = &v } + p.LineDelimiterStr = _field return nil } - func (p *TBrokerScanRangeParams) ReadField15(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.TrimDoubleQuotes = &v + _field = &v } + p.TrimDoubleQuotes = _field return nil } @@ -5341,7 +5624,6 @@ func (p *TBrokerScanRangeParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 15 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5462,11 +5744,9 @@ func (p *TBrokerScanRangeParams) writeField6(oprot thrift.TProtocol) (err error) return err } for k, v := range p.ExprOfDestSlot { - if err := oprot.WriteI32(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -5494,11 +5774,9 @@ func (p *TBrokerScanRangeParams) writeField7(oprot thrift.TProtocol) (err error) return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -5553,11 +5831,9 @@ func (p *TBrokerScanRangeParams) writeField9(oprot thrift.TProtocol) (err error) return err } for k, v := range p.DestSidToSrcSidWithoutTrans { - if err := oprot.WriteI32(k); err != nil { return err } - if err := oprot.WriteI32(v); err != nil { return err } @@ -5695,6 +5971,7 @@ func (p *TBrokerScanRangeParams) String() string { return "" } return fmt.Sprintf("TBrokerScanRangeParams(%+v)", *p) + } func (p *TBrokerScanRangeParams) DeepEqual(ano *TBrokerScanRangeParams) bool { @@ -5918,7 +6195,6 @@ func NewTBrokerScanRange() *TBrokerScanRange { } func (p *TBrokerScanRange) InitDefault() { - *p = TBrokerScanRange{} } func (p *TBrokerScanRange) GetRanges() (v []*TBrokerRangeDesc) { @@ -5985,10 +6261,8 @@ func (p *TBrokerScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRanges = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -5996,10 +6270,8 @@ func (p *TBrokerScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetParams = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -6007,17 +6279,14 @@ func (p *TBrokerScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBrokerAddresses = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6063,46 +6332,53 @@ func (p *TBrokerScanRange) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Ranges = make([]*TBrokerRangeDesc, 0, size) + _field := make([]*TBrokerRangeDesc, 0, size) + values := make([]TBrokerRangeDesc, size) for i := 0; i < size; i++ { - _elem := NewTBrokerRangeDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Ranges = append(p.Ranges, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Ranges = _field return nil } - func (p *TBrokerScanRange) ReadField2(iprot thrift.TProtocol) error { - p.Params = NewTBrokerScanRangeParams() - if err := p.Params.Read(iprot); err != nil { + _field := NewTBrokerScanRangeParams() + if err := _field.Read(iprot); err != nil { return err } + p.Params = _field return nil } - func (p *TBrokerScanRange) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size) + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.BrokerAddresses = append(p.BrokerAddresses, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.BrokerAddresses = _field return nil } @@ -6124,7 +6400,6 @@ func (p *TBrokerScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6215,6 +6490,7 @@ func (p *TBrokerScanRange) String() string { return "" } return fmt.Sprintf("TBrokerScanRange(%+v)", *p) + } func (p *TBrokerScanRange) DeepEqual(ano *TBrokerScanRange) bool { @@ -6281,7 +6557,6 @@ func NewTEsScanRange() *TEsScanRange { } func (p *TEsScanRange) InitDefault() { - *p = TEsScanRange{} } func (p *TEsScanRange) GetEsHosts() (v []*types.TNetworkAddress) { @@ -6356,10 +6631,8 @@ func (p *TEsScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetEsHosts = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -6367,20 +6640,16 @@ func (p *TEsScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndex = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -6388,17 +6657,14 @@ func (p *TEsScanRange) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetShardId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6444,45 +6710,55 @@ func (p *TEsScanRange) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.EsHosts = make([]*types.TNetworkAddress, 0, size) + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.EsHosts = append(p.EsHosts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.EsHosts = _field return nil } - func (p *TEsScanRange) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Index = v + _field = v } + p.Index = _field return nil } - func (p *TEsScanRange) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Type = &v + _field = &v } + p.Type = _field return nil } - func (p *TEsScanRange) ReadField4(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ShardId = v + _field = v } + p.ShardId = _field return nil } @@ -6508,7 +6784,6 @@ func (p *TEsScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6610,6 +6885,7 @@ func (p *TEsScanRange) String() string { return "" } return fmt.Sprintf("TEsScanRange(%+v)", *p) + } func (p *TEsScanRange) DeepEqual(ano *TEsScanRange) bool { @@ -6680,6 +6956,7 @@ type TFileTextScanRangeParams struct { MapkvDelimiter *string `thrift:"mapkv_delimiter,4,optional" frugal:"4,optional,string" json:"mapkv_delimiter,omitempty"` Enclose *int8 `thrift:"enclose,5,optional" frugal:"5,optional,i8" json:"enclose,omitempty"` Escape *int8 `thrift:"escape,6,optional" frugal:"6,optional,i8" json:"escape,omitempty"` + NullFormat *string `thrift:"null_format,7,optional" frugal:"7,optional,string" json:"null_format,omitempty"` } func NewTFileTextScanRangeParams() *TFileTextScanRangeParams { @@ -6687,7 +6964,6 @@ func NewTFileTextScanRangeParams() *TFileTextScanRangeParams { } func (p *TFileTextScanRangeParams) InitDefault() { - *p = TFileTextScanRangeParams{} } var TFileTextScanRangeParams_ColumnSeparator_DEFAULT string @@ -6743,6 +7019,15 @@ func (p *TFileTextScanRangeParams) GetEscape() (v int8) { } return *p.Escape } + +var TFileTextScanRangeParams_NullFormat_DEFAULT string + +func (p *TFileTextScanRangeParams) GetNullFormat() (v string) { + if !p.IsSetNullFormat() { + return TFileTextScanRangeParams_NullFormat_DEFAULT + } + return *p.NullFormat +} func (p *TFileTextScanRangeParams) SetColumnSeparator(val *string) { p.ColumnSeparator = val } @@ -6761,6 +7046,9 @@ func (p *TFileTextScanRangeParams) SetEnclose(val *int8) { func (p *TFileTextScanRangeParams) SetEscape(val *int8) { p.Escape = val } +func (p *TFileTextScanRangeParams) SetNullFormat(val *string) { + p.NullFormat = val +} var fieldIDToName_TFileTextScanRangeParams = map[int16]string{ 1: "column_separator", @@ -6769,6 +7057,7 @@ var fieldIDToName_TFileTextScanRangeParams = map[int16]string{ 4: "mapkv_delimiter", 5: "enclose", 6: "escape", + 7: "null_format", } func (p *TFileTextScanRangeParams) IsSetColumnSeparator() bool { @@ -6795,6 +7084,10 @@ func (p *TFileTextScanRangeParams) IsSetEscape() bool { return p.Escape != nil } +func (p *TFileTextScanRangeParams) IsSetNullFormat() bool { + return p.NullFormat != nil +} + func (p *TFileTextScanRangeParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -6819,67 +7112,62 @@ func (p *TFileTextScanRangeParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BYTE { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BYTE { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6905,56 +7193,80 @@ ReadStructEndError: } func (p *TFileTextScanRangeParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnSeparator = &v + _field = &v } + p.ColumnSeparator = _field return nil } - func (p *TFileTextScanRangeParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LineDelimiter = &v + _field = &v } + p.LineDelimiter = _field return nil } - func (p *TFileTextScanRangeParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.CollectionDelimiter = &v + _field = &v } + p.CollectionDelimiter = _field return nil } - func (p *TFileTextScanRangeParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.MapkvDelimiter = &v + _field = &v } + p.MapkvDelimiter = _field return nil } - func (p *TFileTextScanRangeParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *int8 if v, err := iprot.ReadByte(); err != nil { return err } else { - p.Enclose = &v + _field = &v } + p.Enclose = _field return nil } - func (p *TFileTextScanRangeParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *int8 if v, err := iprot.ReadByte(); err != nil { return err } else { - p.Escape = &v + _field = &v + } + p.Escape = _field + return nil +} +func (p *TFileTextScanRangeParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } + p.NullFormat = _field return nil } @@ -6988,7 +7300,10 @@ func (p *TFileTextScanRangeParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7121,11 +7436,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } +func (p *TFileTextScanRangeParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetNullFormat() { + if err = oprot.WriteFieldBegin("null_format", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.NullFormat); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + func (p *TFileTextScanRangeParams) String() string { if p == nil { return "" } return fmt.Sprintf("TFileTextScanRangeParams(%+v)", *p) + } func (p *TFileTextScanRangeParams) DeepEqual(ano *TFileTextScanRangeParams) bool { @@ -7152,6 +7487,9 @@ func (p *TFileTextScanRangeParams) DeepEqual(ano *TFileTextScanRangeParams) bool if !p.Field6DeepEqual(ano.Escape) { return false } + if !p.Field7DeepEqual(ano.NullFormat) { + return false + } return true } @@ -7227,6 +7565,18 @@ func (p *TFileTextScanRangeParams) Field6DeepEqual(src *int8) bool { } return true } +func (p *TFileTextScanRangeParams) Field7DeepEqual(src *string) bool { + + if p.NullFormat == src { + return true + } else if p.NullFormat == nil || src == nil { + return false + } + if strings.Compare(*p.NullFormat, *src) != 0 { + return false + } + return true +} type TFileScanSlotInfo struct { SlotId *types.TSlotId `thrift:"slot_id,1,optional" frugal:"1,optional,i32" json:"slot_id,omitempty"` @@ -7238,7 +7588,6 @@ func NewTFileScanSlotInfo() *TFileScanSlotInfo { } func (p *TFileScanSlotInfo) InitDefault() { - *p = TFileScanSlotInfo{} } var TFileScanSlotInfo_SlotId_DEFAULT types.TSlotId @@ -7302,27 +7651,22 @@ func (p *TFileScanSlotInfo) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7348,20 +7692,25 @@ ReadStructEndError: } func (p *TFileScanSlotInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SlotId = &v + _field = &v } + p.SlotId = _field return nil } - func (p *TFileScanSlotInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsFileSlot = &v + _field = &v } + p.IsFileSlot = _field return nil } @@ -7379,7 +7728,6 @@ func (p *TFileScanSlotInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7441,6 +7789,7 @@ func (p *TFileScanSlotInfo) String() string { return "" } return fmt.Sprintf("TFileScanSlotInfo(%+v)", *p) + } func (p *TFileScanSlotInfo) DeepEqual(ano *TFileScanSlotInfo) bool { @@ -7484,17 +7833,18 @@ func (p *TFileScanSlotInfo) Field2DeepEqual(src *bool) bool { } type TFileAttributes struct { - TextParams *TFileTextScanRangeParams `thrift:"text_params,1,optional" frugal:"1,optional,TFileTextScanRangeParams" json:"text_params,omitempty"` - StripOuterArray *bool `thrift:"strip_outer_array,2,optional" frugal:"2,optional,bool" json:"strip_outer_array,omitempty"` - Jsonpaths *string `thrift:"jsonpaths,3,optional" frugal:"3,optional,string" json:"jsonpaths,omitempty"` - JsonRoot *string `thrift:"json_root,4,optional" frugal:"4,optional,string" json:"json_root,omitempty"` - NumAsString *bool `thrift:"num_as_string,5,optional" frugal:"5,optional,bool" json:"num_as_string,omitempty"` - FuzzyParse *bool `thrift:"fuzzy_parse,6,optional" frugal:"6,optional,bool" json:"fuzzy_parse,omitempty"` - ReadJsonByLine *bool `thrift:"read_json_by_line,7,optional" frugal:"7,optional,bool" json:"read_json_by_line,omitempty"` - ReadByColumnDef *bool `thrift:"read_by_column_def,8,optional" frugal:"8,optional,bool" json:"read_by_column_def,omitempty"` - HeaderType *string `thrift:"header_type,9,optional" frugal:"9,optional,string" json:"header_type,omitempty"` - TrimDoubleQuotes *bool `thrift:"trim_double_quotes,10,optional" frugal:"10,optional,bool" json:"trim_double_quotes,omitempty"` - SkipLines *int32 `thrift:"skip_lines,11,optional" frugal:"11,optional,i32" json:"skip_lines,omitempty"` + TextParams *TFileTextScanRangeParams `thrift:"text_params,1,optional" frugal:"1,optional,TFileTextScanRangeParams" json:"text_params,omitempty"` + StripOuterArray *bool `thrift:"strip_outer_array,2,optional" frugal:"2,optional,bool" json:"strip_outer_array,omitempty"` + Jsonpaths *string `thrift:"jsonpaths,3,optional" frugal:"3,optional,string" json:"jsonpaths,omitempty"` + JsonRoot *string `thrift:"json_root,4,optional" frugal:"4,optional,string" json:"json_root,omitempty"` + NumAsString *bool `thrift:"num_as_string,5,optional" frugal:"5,optional,bool" json:"num_as_string,omitempty"` + FuzzyParse *bool `thrift:"fuzzy_parse,6,optional" frugal:"6,optional,bool" json:"fuzzy_parse,omitempty"` + ReadJsonByLine *bool `thrift:"read_json_by_line,7,optional" frugal:"7,optional,bool" json:"read_json_by_line,omitempty"` + ReadByColumnDef *bool `thrift:"read_by_column_def,8,optional" frugal:"8,optional,bool" json:"read_by_column_def,omitempty"` + HeaderType *string `thrift:"header_type,9,optional" frugal:"9,optional,string" json:"header_type,omitempty"` + TrimDoubleQuotes *bool `thrift:"trim_double_quotes,10,optional" frugal:"10,optional,bool" json:"trim_double_quotes,omitempty"` + SkipLines *int32 `thrift:"skip_lines,11,optional" frugal:"11,optional,i32" json:"skip_lines,omitempty"` + IgnoreCsvRedundantCol *bool `thrift:"ignore_csv_redundant_col,1001,optional" frugal:"1001,optional,bool" json:"ignore_csv_redundant_col,omitempty"` } func NewTFileAttributes() *TFileAttributes { @@ -7502,7 +7852,6 @@ func NewTFileAttributes() *TFileAttributes { } func (p *TFileAttributes) InitDefault() { - *p = TFileAttributes{} } var TFileAttributes_TextParams_DEFAULT *TFileTextScanRangeParams @@ -7603,6 +7952,15 @@ func (p *TFileAttributes) GetSkipLines() (v int32) { } return *p.SkipLines } + +var TFileAttributes_IgnoreCsvRedundantCol_DEFAULT bool + +func (p *TFileAttributes) GetIgnoreCsvRedundantCol() (v bool) { + if !p.IsSetIgnoreCsvRedundantCol() { + return TFileAttributes_IgnoreCsvRedundantCol_DEFAULT + } + return *p.IgnoreCsvRedundantCol +} func (p *TFileAttributes) SetTextParams(val *TFileTextScanRangeParams) { p.TextParams = val } @@ -7636,19 +7994,23 @@ func (p *TFileAttributes) SetTrimDoubleQuotes(val *bool) { func (p *TFileAttributes) SetSkipLines(val *int32) { p.SkipLines = val } +func (p *TFileAttributes) SetIgnoreCsvRedundantCol(val *bool) { + p.IgnoreCsvRedundantCol = val +} var fieldIDToName_TFileAttributes = map[int16]string{ - 1: "text_params", - 2: "strip_outer_array", - 3: "jsonpaths", - 4: "json_root", - 5: "num_as_string", - 6: "fuzzy_parse", - 7: "read_json_by_line", - 8: "read_by_column_def", - 9: "header_type", - 10: "trim_double_quotes", - 11: "skip_lines", + 1: "text_params", + 2: "strip_outer_array", + 3: "jsonpaths", + 4: "json_root", + 5: "num_as_string", + 6: "fuzzy_parse", + 7: "read_json_by_line", + 8: "read_by_column_def", + 9: "header_type", + 10: "trim_double_quotes", + 11: "skip_lines", + 1001: "ignore_csv_redundant_col", } func (p *TFileAttributes) IsSetTextParams() bool { @@ -7695,6 +8057,10 @@ func (p *TFileAttributes) IsSetSkipLines() bool { return p.SkipLines != nil } +func (p *TFileAttributes) IsSetIgnoreCsvRedundantCol() bool { + return p.IgnoreCsvRedundantCol != nil +} + func (p *TFileAttributes) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -7719,117 +8085,102 @@ func (p *TFileAttributes) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.BOOL { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I32 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 1001: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1001(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7855,100 +8206,132 @@ ReadStructEndError: } func (p *TFileAttributes) ReadField1(iprot thrift.TProtocol) error { - p.TextParams = NewTFileTextScanRangeParams() - if err := p.TextParams.Read(iprot); err != nil { + _field := NewTFileTextScanRangeParams() + if err := _field.Read(iprot); err != nil { return err } + p.TextParams = _field return nil } - func (p *TFileAttributes) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.StripOuterArray = &v + _field = &v } + p.StripOuterArray = _field return nil } - func (p *TFileAttributes) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Jsonpaths = &v + _field = &v } + p.Jsonpaths = _field return nil } - func (p *TFileAttributes) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JsonRoot = &v + _field = &v } + p.JsonRoot = _field return nil } - func (p *TFileAttributes) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NumAsString = &v + _field = &v } + p.NumAsString = _field return nil } - func (p *TFileAttributes) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.FuzzyParse = &v + _field = &v } + p.FuzzyParse = _field return nil } - func (p *TFileAttributes) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ReadJsonByLine = &v + _field = &v } + p.ReadJsonByLine = _field return nil } - func (p *TFileAttributes) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ReadByColumnDef = &v + _field = &v } + p.ReadByColumnDef = _field return nil } - func (p *TFileAttributes) ReadField9(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HeaderType = &v + _field = &v } + p.HeaderType = _field return nil } - func (p *TFileAttributes) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.TrimDoubleQuotes = &v + _field = &v } + p.TrimDoubleQuotes = _field return nil } - func (p *TFileAttributes) ReadField11(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SkipLines = &v + _field = &v } + p.SkipLines = _field + return nil +} +func (p *TFileAttributes) ReadField1001(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IgnoreCsvRedundantCol = _field return nil } @@ -8002,7 +8385,10 @@ func (p *TFileAttributes) Write(oprot thrift.TProtocol) (err error) { fieldId = 11 goto WriteFieldError } - + if err = p.writeField1001(oprot); err != nil { + fieldId = 1001 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8230,11 +8616,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } +func (p *TFileAttributes) writeField1001(oprot thrift.TProtocol) (err error) { + if p.IsSetIgnoreCsvRedundantCol() { + if err = oprot.WriteFieldBegin("ignore_csv_redundant_col", thrift.BOOL, 1001); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IgnoreCsvRedundantCol); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err) +} + func (p *TFileAttributes) String() string { if p == nil { return "" } return fmt.Sprintf("TFileAttributes(%+v)", *p) + } func (p *TFileAttributes) DeepEqual(ano *TFileAttributes) bool { @@ -8276,6 +8682,9 @@ func (p *TFileAttributes) DeepEqual(ano *TFileAttributes) bool { if !p.Field11DeepEqual(ano.SkipLines) { return false } + if !p.Field1001DeepEqual(ano.IgnoreCsvRedundantCol) { + return false + } return true } @@ -8406,12 +8815,25 @@ func (p *TFileAttributes) Field11DeepEqual(src *int32) bool { } return true } +func (p *TFileAttributes) Field1001DeepEqual(src *bool) bool { + + if p.IgnoreCsvRedundantCol == src { + return true + } else if p.IgnoreCsvRedundantCol == nil || src == nil { + return false + } + if *p.IgnoreCsvRedundantCol != *src { + return false + } + return true +} type TIcebergDeleteFileDesc struct { Path *string `thrift:"path,1,optional" frugal:"1,optional,string" json:"path,omitempty"` PositionLowerBound *int64 `thrift:"position_lower_bound,2,optional" frugal:"2,optional,i64" json:"position_lower_bound,omitempty"` PositionUpperBound *int64 `thrift:"position_upper_bound,3,optional" frugal:"3,optional,i64" json:"position_upper_bound,omitempty"` FieldIds []int32 `thrift:"field_ids,4,optional" frugal:"4,optional,list" json:"field_ids,omitempty"` + Content *int32 `thrift:"content,5,optional" frugal:"5,optional,i32" json:"content,omitempty"` } func NewTIcebergDeleteFileDesc() *TIcebergDeleteFileDesc { @@ -8419,7 +8841,6 @@ func NewTIcebergDeleteFileDesc() *TIcebergDeleteFileDesc { } func (p *TIcebergDeleteFileDesc) InitDefault() { - *p = TIcebergDeleteFileDesc{} } var TIcebergDeleteFileDesc_Path_DEFAULT string @@ -8457,6 +8878,15 @@ func (p *TIcebergDeleteFileDesc) GetFieldIds() (v []int32) { } return p.FieldIds } + +var TIcebergDeleteFileDesc_Content_DEFAULT int32 + +func (p *TIcebergDeleteFileDesc) GetContent() (v int32) { + if !p.IsSetContent() { + return TIcebergDeleteFileDesc_Content_DEFAULT + } + return *p.Content +} func (p *TIcebergDeleteFileDesc) SetPath(val *string) { p.Path = val } @@ -8469,12 +8899,16 @@ func (p *TIcebergDeleteFileDesc) SetPositionUpperBound(val *int64) { func (p *TIcebergDeleteFileDesc) SetFieldIds(val []int32) { p.FieldIds = val } +func (p *TIcebergDeleteFileDesc) SetContent(val *int32) { + p.Content = val +} var fieldIDToName_TIcebergDeleteFileDesc = map[int16]string{ 1: "path", 2: "position_lower_bound", 3: "position_upper_bound", 4: "field_ids", + 5: "content", } func (p *TIcebergDeleteFileDesc) IsSetPath() bool { @@ -8493,6 +8927,10 @@ func (p *TIcebergDeleteFileDesc) IsSetFieldIds() bool { return p.FieldIds != nil } +func (p *TIcebergDeleteFileDesc) IsSetContent() bool { + return p.Content != nil +} + func (p *TIcebergDeleteFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -8517,47 +8955,46 @@ func (p *TIcebergDeleteFileDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I32 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8583,39 +9020,46 @@ ReadStructEndError: } func (p *TIcebergDeleteFileDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Path = &v + _field = &v } + p.Path = _field return nil } - func (p *TIcebergDeleteFileDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PositionLowerBound = &v + _field = &v } + p.PositionLowerBound = _field return nil } - func (p *TIcebergDeleteFileDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PositionUpperBound = &v + _field = &v } + p.PositionUpperBound = _field return nil } - func (p *TIcebergDeleteFileDesc) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.FieldIds = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -8623,11 +9067,23 @@ func (p *TIcebergDeleteFileDesc) ReadField4(iprot thrift.TProtocol) error { _elem = v } - p.FieldIds = append(p.FieldIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.FieldIds = _field + return nil +} +func (p *TIcebergDeleteFileDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Content = _field return nil } @@ -8653,7 +9109,10 @@ func (p *TIcebergDeleteFileDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -8756,11 +9215,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } +func (p *TIcebergDeleteFileDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetContent() { + if err = oprot.WriteFieldBegin("content", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Content); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + func (p *TIcebergDeleteFileDesc) String() string { if p == nil { return "" } return fmt.Sprintf("TIcebergDeleteFileDesc(%+v)", *p) + } func (p *TIcebergDeleteFileDesc) DeepEqual(ano *TIcebergDeleteFileDesc) bool { @@ -8781,6 +9260,9 @@ func (p *TIcebergDeleteFileDesc) DeepEqual(ano *TIcebergDeleteFileDesc) bool { if !p.Field4DeepEqual(ano.FieldIds) { return false } + if !p.Field5DeepEqual(ano.Content) { + return false + } return true } @@ -8833,6 +9315,18 @@ func (p *TIcebergDeleteFileDesc) Field4DeepEqual(src []int32) bool { } return true } +func (p *TIcebergDeleteFileDesc) Field5DeepEqual(src *int32) bool { + + if p.Content == src { + return true + } else if p.Content == nil || src == nil { + return false + } + if *p.Content != *src { + return false + } + return true +} type TIcebergFileDesc struct { FormatVersion *int32 `thrift:"format_version,1,optional" frugal:"1,optional,i32" json:"format_version,omitempty"` @@ -8840,6 +9334,8 @@ type TIcebergFileDesc struct { DeleteFiles []*TIcebergDeleteFileDesc `thrift:"delete_files,3,optional" frugal:"3,optional,list" json:"delete_files,omitempty"` DeleteTableTupleId *types.TTupleId `thrift:"delete_table_tuple_id,4,optional" frugal:"4,optional,i32" json:"delete_table_tuple_id,omitempty"` FileSelectConjunct *exprs.TExpr `thrift:"file_select_conjunct,5,optional" frugal:"5,optional,exprs.TExpr" json:"file_select_conjunct,omitempty"` + OriginalFilePath *string `thrift:"original_file_path,6,optional" frugal:"6,optional,string" json:"original_file_path,omitempty"` + RowCount *int64 `thrift:"row_count,7,optional" frugal:"7,optional,i64" json:"row_count,omitempty"` } func NewTIcebergFileDesc() *TIcebergFileDesc { @@ -8847,7 +9343,6 @@ func NewTIcebergFileDesc() *TIcebergFileDesc { } func (p *TIcebergFileDesc) InitDefault() { - *p = TIcebergFileDesc{} } var TIcebergFileDesc_FormatVersion_DEFAULT int32 @@ -8894,6 +9389,24 @@ func (p *TIcebergFileDesc) GetFileSelectConjunct() (v *exprs.TExpr) { } return p.FileSelectConjunct } + +var TIcebergFileDesc_OriginalFilePath_DEFAULT string + +func (p *TIcebergFileDesc) GetOriginalFilePath() (v string) { + if !p.IsSetOriginalFilePath() { + return TIcebergFileDesc_OriginalFilePath_DEFAULT + } + return *p.OriginalFilePath +} + +var TIcebergFileDesc_RowCount_DEFAULT int64 + +func (p *TIcebergFileDesc) GetRowCount() (v int64) { + if !p.IsSetRowCount() { + return TIcebergFileDesc_RowCount_DEFAULT + } + return *p.RowCount +} func (p *TIcebergFileDesc) SetFormatVersion(val *int32) { p.FormatVersion = val } @@ -8909,6 +9422,12 @@ func (p *TIcebergFileDesc) SetDeleteTableTupleId(val *types.TTupleId) { func (p *TIcebergFileDesc) SetFileSelectConjunct(val *exprs.TExpr) { p.FileSelectConjunct = val } +func (p *TIcebergFileDesc) SetOriginalFilePath(val *string) { + p.OriginalFilePath = val +} +func (p *TIcebergFileDesc) SetRowCount(val *int64) { + p.RowCount = val +} var fieldIDToName_TIcebergFileDesc = map[int16]string{ 1: "format_version", @@ -8916,6 +9435,8 @@ var fieldIDToName_TIcebergFileDesc = map[int16]string{ 3: "delete_files", 4: "delete_table_tuple_id", 5: "file_select_conjunct", + 6: "original_file_path", + 7: "row_count", } func (p *TIcebergFileDesc) IsSetFormatVersion() bool { @@ -8938,6 +9459,14 @@ func (p *TIcebergFileDesc) IsSetFileSelectConjunct() bool { return p.FileSelectConjunct != nil } +func (p *TIcebergFileDesc) IsSetOriginalFilePath() bool { + return p.OriginalFilePath != nil +} + +func (p *TIcebergFileDesc) IsSetRowCount() bool { + return p.RowCount != nil +} + func (p *TIcebergFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -8962,57 +9491,62 @@ func (p *TIcebergFileDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9038,57 +9572,89 @@ ReadStructEndError: } func (p *TIcebergFileDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FormatVersion = &v + _field = &v } + p.FormatVersion = _field return nil } - func (p *TIcebergFileDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Content = &v + _field = &v } + p.Content = _field return nil } - func (p *TIcebergFileDesc) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DeleteFiles = make([]*TIcebergDeleteFileDesc, 0, size) + _field := make([]*TIcebergDeleteFileDesc, 0, size) + values := make([]TIcebergDeleteFileDesc, size) for i := 0; i < size; i++ { - _elem := NewTIcebergDeleteFileDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.DeleteFiles = append(p.DeleteFiles, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DeleteFiles = _field return nil } - func (p *TIcebergFileDesc) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DeleteTableTupleId = &v + _field = &v } + p.DeleteTableTupleId = _field return nil } - func (p *TIcebergFileDesc) ReadField5(iprot thrift.TProtocol) error { - p.FileSelectConjunct = exprs.NewTExpr() - if err := p.FileSelectConjunct.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { + return err + } + p.FileSelectConjunct = _field + return nil +} +func (p *TIcebergFileDesc) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.OriginalFilePath = _field + return nil +} +func (p *TIcebergFileDesc) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.RowCount = _field return nil } @@ -9118,7 +9684,14 @@ func (p *TIcebergFileDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9240,11 +9813,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *TIcebergFileDesc) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetOriginalFilePath() { + if err = oprot.WriteFieldBegin("original_file_path", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.OriginalFilePath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TIcebergFileDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetRowCount() { + if err = oprot.WriteFieldBegin("row_count", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RowCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + func (p *TIcebergFileDesc) String() string { if p == nil { return "" } return fmt.Sprintf("TIcebergFileDesc(%+v)", *p) + } func (p *TIcebergFileDesc) DeepEqual(ano *TIcebergFileDesc) bool { @@ -9268,6 +9880,12 @@ func (p *TIcebergFileDesc) DeepEqual(ano *TIcebergFileDesc) bool { if !p.Field5DeepEqual(ano.FileSelectConjunct) { return false } + if !p.Field6DeepEqual(ano.OriginalFilePath) { + return false + } + if !p.Field7DeepEqual(ano.RowCount) { + return false + } return true } @@ -9327,130 +9945,99 @@ func (p *TIcebergFileDesc) Field5DeepEqual(src *exprs.TExpr) bool { } return true } +func (p *TIcebergFileDesc) Field6DeepEqual(src *string) bool { -type TPaimonFileDesc struct { - PaimonSplit *string `thrift:"paimon_split,1,optional" frugal:"1,optional,string" json:"paimon_split,omitempty"` - PaimonColumnNames *string `thrift:"paimon_column_names,2,optional" frugal:"2,optional,string" json:"paimon_column_names,omitempty"` - DbName *string `thrift:"db_name,3,optional" frugal:"3,optional,string" json:"db_name,omitempty"` - TableName *string `thrift:"table_name,4,optional" frugal:"4,optional,string" json:"table_name,omitempty"` - PaimonPredicate *string `thrift:"paimon_predicate,5,optional" frugal:"5,optional,string" json:"paimon_predicate,omitempty"` - PaimonOptions map[string]string `thrift:"paimon_options,6,optional" frugal:"6,optional,map" json:"paimon_options,omitempty"` -} - -func NewTPaimonFileDesc() *TPaimonFileDesc { - return &TPaimonFileDesc{} -} - -func (p *TPaimonFileDesc) InitDefault() { - *p = TPaimonFileDesc{} + if p.OriginalFilePath == src { + return true + } else if p.OriginalFilePath == nil || src == nil { + return false + } + if strings.Compare(*p.OriginalFilePath, *src) != 0 { + return false + } + return true } +func (p *TIcebergFileDesc) Field7DeepEqual(src *int64) bool { -var TPaimonFileDesc_PaimonSplit_DEFAULT string - -func (p *TPaimonFileDesc) GetPaimonSplit() (v string) { - if !p.IsSetPaimonSplit() { - return TPaimonFileDesc_PaimonSplit_DEFAULT + if p.RowCount == src { + return true + } else if p.RowCount == nil || src == nil { + return false } - return *p.PaimonSplit + if *p.RowCount != *src { + return false + } + return true } -var TPaimonFileDesc_PaimonColumnNames_DEFAULT string - -func (p *TPaimonFileDesc) GetPaimonColumnNames() (v string) { - if !p.IsSetPaimonColumnNames() { - return TPaimonFileDesc_PaimonColumnNames_DEFAULT - } - return *p.PaimonColumnNames +type TPaimonDeletionFileDesc struct { + Path *string `thrift:"path,1,optional" frugal:"1,optional,string" json:"path,omitempty"` + Offset *int64 `thrift:"offset,2,optional" frugal:"2,optional,i64" json:"offset,omitempty"` + Length *int64 `thrift:"length,3,optional" frugal:"3,optional,i64" json:"length,omitempty"` } -var TPaimonFileDesc_DbName_DEFAULT string +func NewTPaimonDeletionFileDesc() *TPaimonDeletionFileDesc { + return &TPaimonDeletionFileDesc{} +} -func (p *TPaimonFileDesc) GetDbName() (v string) { - if !p.IsSetDbName() { - return TPaimonFileDesc_DbName_DEFAULT - } - return *p.DbName +func (p *TPaimonDeletionFileDesc) InitDefault() { } -var TPaimonFileDesc_TableName_DEFAULT string +var TPaimonDeletionFileDesc_Path_DEFAULT string -func (p *TPaimonFileDesc) GetTableName() (v string) { - if !p.IsSetTableName() { - return TPaimonFileDesc_TableName_DEFAULT +func (p *TPaimonDeletionFileDesc) GetPath() (v string) { + if !p.IsSetPath() { + return TPaimonDeletionFileDesc_Path_DEFAULT } - return *p.TableName + return *p.Path } -var TPaimonFileDesc_PaimonPredicate_DEFAULT string +var TPaimonDeletionFileDesc_Offset_DEFAULT int64 -func (p *TPaimonFileDesc) GetPaimonPredicate() (v string) { - if !p.IsSetPaimonPredicate() { - return TPaimonFileDesc_PaimonPredicate_DEFAULT +func (p *TPaimonDeletionFileDesc) GetOffset() (v int64) { + if !p.IsSetOffset() { + return TPaimonDeletionFileDesc_Offset_DEFAULT } - return *p.PaimonPredicate + return *p.Offset } -var TPaimonFileDesc_PaimonOptions_DEFAULT map[string]string +var TPaimonDeletionFileDesc_Length_DEFAULT int64 -func (p *TPaimonFileDesc) GetPaimonOptions() (v map[string]string) { - if !p.IsSetPaimonOptions() { - return TPaimonFileDesc_PaimonOptions_DEFAULT +func (p *TPaimonDeletionFileDesc) GetLength() (v int64) { + if !p.IsSetLength() { + return TPaimonDeletionFileDesc_Length_DEFAULT } - return p.PaimonOptions -} -func (p *TPaimonFileDesc) SetPaimonSplit(val *string) { - p.PaimonSplit = val -} -func (p *TPaimonFileDesc) SetPaimonColumnNames(val *string) { - p.PaimonColumnNames = val -} -func (p *TPaimonFileDesc) SetDbName(val *string) { - p.DbName = val -} -func (p *TPaimonFileDesc) SetTableName(val *string) { - p.TableName = val + return *p.Length } -func (p *TPaimonFileDesc) SetPaimonPredicate(val *string) { - p.PaimonPredicate = val -} -func (p *TPaimonFileDesc) SetPaimonOptions(val map[string]string) { - p.PaimonOptions = val -} - -var fieldIDToName_TPaimonFileDesc = map[int16]string{ - 1: "paimon_split", - 2: "paimon_column_names", - 3: "db_name", - 4: "table_name", - 5: "paimon_predicate", - 6: "paimon_options", +func (p *TPaimonDeletionFileDesc) SetPath(val *string) { + p.Path = val } - -func (p *TPaimonFileDesc) IsSetPaimonSplit() bool { - return p.PaimonSplit != nil +func (p *TPaimonDeletionFileDesc) SetOffset(val *int64) { + p.Offset = val } - -func (p *TPaimonFileDesc) IsSetPaimonColumnNames() bool { - return p.PaimonColumnNames != nil +func (p *TPaimonDeletionFileDesc) SetLength(val *int64) { + p.Length = val } -func (p *TPaimonFileDesc) IsSetDbName() bool { - return p.DbName != nil +var fieldIDToName_TPaimonDeletionFileDesc = map[int16]string{ + 1: "path", + 2: "offset", + 3: "length", } -func (p *TPaimonFileDesc) IsSetTableName() bool { - return p.TableName != nil +func (p *TPaimonDeletionFileDesc) IsSetPath() bool { + return p.Path != nil } -func (p *TPaimonFileDesc) IsSetPaimonPredicate() bool { - return p.PaimonPredicate != nil +func (p *TPaimonDeletionFileDesc) IsSetOffset() bool { + return p.Offset != nil } -func (p *TPaimonFileDesc) IsSetPaimonOptions() bool { - return p.PaimonOptions != nil +func (p *TPaimonDeletionFileDesc) IsSetLength() bool { + return p.Length != nil } -func (p *TPaimonFileDesc) Read(iprot thrift.TProtocol) (err error) { +func (p *TPaimonDeletionFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -9474,67 +10061,30 @@ func (p *TPaimonFileDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.MAP { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9549,7 +10099,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPaimonFileDesc[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPaimonDeletionFileDesc[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -9559,83 +10109,43 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPaimonFileDesc) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.PaimonSplit = &v - } - return nil -} - -func (p *TPaimonFileDesc) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.PaimonColumnNames = &v - } - return nil -} +func (p *TPaimonDeletionFileDesc) ReadField1(iprot thrift.TProtocol) error { -func (p *TPaimonFileDesc) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = &v + _field = &v } + p.Path = _field return nil } +func (p *TPaimonDeletionFileDesc) ReadField2(iprot thrift.TProtocol) error { -func (p *TPaimonFileDesc) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.Offset = _field return nil } +func (p *TPaimonDeletionFileDesc) ReadField3(iprot thrift.TProtocol) error { -func (p *TPaimonFileDesc) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PaimonPredicate = &v - } - return nil -} - -func (p *TPaimonFileDesc) ReadField6(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.PaimonOptions = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - - var _val string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _val = v - } - - p.PaimonOptions[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err + _field = &v } + p.Length = _field return nil } -func (p *TPaimonFileDesc) Write(oprot thrift.TProtocol) (err error) { +func (p *TPaimonDeletionFileDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TPaimonFileDesc"); err != nil { + if err = oprot.WriteStructBegin("TPaimonDeletionFileDesc"); err != nil { goto WriteStructBeginError } if p != nil { @@ -9651,19 +10161,6 @@ func (p *TPaimonFileDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9682,12 +10179,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TPaimonFileDesc) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetPaimonSplit() { - if err = oprot.WriteFieldBegin("paimon_split", thrift.STRING, 1); err != nil { +func (p *TPaimonDeletionFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPath() { + if err = oprot.WriteFieldBegin("path", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.PaimonSplit); err != nil { + if err := oprot.WriteString(*p.Path); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9701,12 +10198,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TPaimonFileDesc) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetPaimonColumnNames() { - if err = oprot.WriteFieldBegin("paimon_column_names", thrift.STRING, 2); err != nil { +func (p *TPaimonDeletionFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetOffset() { + if err = oprot.WriteFieldBegin("offset", thrift.I64, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.PaimonColumnNames); err != nil { + if err := oprot.WriteI64(*p.Offset); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9720,12 +10217,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TPaimonFileDesc) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDbName() { - if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 3); err != nil { +func (p *TPaimonDeletionFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetLength() { + if err = oprot.WriteFieldBegin("length", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.DbName); err != nil { + if err := oprot.WriteI64(*p.Length); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -9739,379 +10236,335 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TPaimonFileDesc) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTableName() { - if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.TableName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TPaimonFileDesc) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetPaimonPredicate() { - if err = oprot.WriteFieldBegin("paimon_predicate", thrift.STRING, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.PaimonPredicate); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TPaimonFileDesc) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetPaimonOptions() { - if err = oprot.WriteFieldBegin("paimon_options", thrift.MAP, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.PaimonOptions)); err != nil { - return err - } - for k, v := range p.PaimonOptions { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TPaimonFileDesc) String() string { +func (p *TPaimonDeletionFileDesc) String() string { if p == nil { return "" } - return fmt.Sprintf("TPaimonFileDesc(%+v)", *p) + return fmt.Sprintf("TPaimonDeletionFileDesc(%+v)", *p) + } -func (p *TPaimonFileDesc) DeepEqual(ano *TPaimonFileDesc) bool { +func (p *TPaimonDeletionFileDesc) DeepEqual(ano *TPaimonDeletionFileDesc) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.PaimonSplit) { - return false - } - if !p.Field2DeepEqual(ano.PaimonColumnNames) { - return false - } - if !p.Field3DeepEqual(ano.DbName) { - return false - } - if !p.Field4DeepEqual(ano.TableName) { + if !p.Field1DeepEqual(ano.Path) { return false } - if !p.Field5DeepEqual(ano.PaimonPredicate) { + if !p.Field2DeepEqual(ano.Offset) { return false } - if !p.Field6DeepEqual(ano.PaimonOptions) { + if !p.Field3DeepEqual(ano.Length) { return false } return true } -func (p *TPaimonFileDesc) Field1DeepEqual(src *string) bool { +func (p *TPaimonDeletionFileDesc) Field1DeepEqual(src *string) bool { - if p.PaimonSplit == src { + if p.Path == src { return true - } else if p.PaimonSplit == nil || src == nil { + } else if p.Path == nil || src == nil { return false } - if strings.Compare(*p.PaimonSplit, *src) != 0 { + if strings.Compare(*p.Path, *src) != 0 { return false } return true } -func (p *TPaimonFileDesc) Field2DeepEqual(src *string) bool { +func (p *TPaimonDeletionFileDesc) Field2DeepEqual(src *int64) bool { - if p.PaimonColumnNames == src { + if p.Offset == src { return true - } else if p.PaimonColumnNames == nil || src == nil { + } else if p.Offset == nil || src == nil { return false } - if strings.Compare(*p.PaimonColumnNames, *src) != 0 { + if *p.Offset != *src { return false } return true } -func (p *TPaimonFileDesc) Field3DeepEqual(src *string) bool { +func (p *TPaimonDeletionFileDesc) Field3DeepEqual(src *int64) bool { - if p.DbName == src { + if p.Length == src { return true - } else if p.DbName == nil || src == nil { + } else if p.Length == nil || src == nil { return false } - if strings.Compare(*p.DbName, *src) != 0 { + if *p.Length != *src { return false } return true } -func (p *TPaimonFileDesc) Field4DeepEqual(src *string) bool { - if p.TableName == src { - return true - } else if p.TableName == nil || src == nil { - return false - } - if strings.Compare(*p.TableName, *src) != 0 { - return false - } - return true +type TPaimonFileDesc struct { + PaimonSplit *string `thrift:"paimon_split,1,optional" frugal:"1,optional,string" json:"paimon_split,omitempty"` + PaimonColumnNames *string `thrift:"paimon_column_names,2,optional" frugal:"2,optional,string" json:"paimon_column_names,omitempty"` + DbName *string `thrift:"db_name,3,optional" frugal:"3,optional,string" json:"db_name,omitempty"` + TableName *string `thrift:"table_name,4,optional" frugal:"4,optional,string" json:"table_name,omitempty"` + PaimonPredicate *string `thrift:"paimon_predicate,5,optional" frugal:"5,optional,string" json:"paimon_predicate,omitempty"` + PaimonOptions map[string]string `thrift:"paimon_options,6,optional" frugal:"6,optional,map" json:"paimon_options,omitempty"` + CtlId *int64 `thrift:"ctl_id,7,optional" frugal:"7,optional,i64" json:"ctl_id,omitempty"` + DbId *int64 `thrift:"db_id,8,optional" frugal:"8,optional,i64" json:"db_id,omitempty"` + TblId *int64 `thrift:"tbl_id,9,optional" frugal:"9,optional,i64" json:"tbl_id,omitempty"` + LastUpdateTime *int64 `thrift:"last_update_time,10,optional" frugal:"10,optional,i64" json:"last_update_time,omitempty"` + FileFormat *string `thrift:"file_format,11,optional" frugal:"11,optional,string" json:"file_format,omitempty"` + DeletionFile *TPaimonDeletionFileDesc `thrift:"deletion_file,12,optional" frugal:"12,optional,TPaimonDeletionFileDesc" json:"deletion_file,omitempty"` + HadoopConf map[string]string `thrift:"hadoop_conf,13,optional" frugal:"13,optional,map" json:"hadoop_conf,omitempty"` + PaimonTable *string `thrift:"paimon_table,14,optional" frugal:"14,optional,string" json:"paimon_table,omitempty"` } -func (p *TPaimonFileDesc) Field5DeepEqual(src *string) bool { - if p.PaimonPredicate == src { - return true - } else if p.PaimonPredicate == nil || src == nil { - return false - } - if strings.Compare(*p.PaimonPredicate, *src) != 0 { - return false - } - return true +func NewTPaimonFileDesc() *TPaimonFileDesc { + return &TPaimonFileDesc{} } -func (p *TPaimonFileDesc) Field6DeepEqual(src map[string]string) bool { - if len(p.PaimonOptions) != len(src) { - return false - } - for k, v := range p.PaimonOptions { - _src := src[k] - if strings.Compare(v, _src) != 0 { - return false - } - } - return true +func (p *TPaimonFileDesc) InitDefault() { } -type THudiFileDesc struct { - InstantTime *string `thrift:"instant_time,1,optional" frugal:"1,optional,string" json:"instant_time,omitempty"` - Serde *string `thrift:"serde,2,optional" frugal:"2,optional,string" json:"serde,omitempty"` - InputFormat *string `thrift:"input_format,3,optional" frugal:"3,optional,string" json:"input_format,omitempty"` - BasePath *string `thrift:"base_path,4,optional" frugal:"4,optional,string" json:"base_path,omitempty"` - DataFilePath *string `thrift:"data_file_path,5,optional" frugal:"5,optional,string" json:"data_file_path,omitempty"` - DataFileLength *int64 `thrift:"data_file_length,6,optional" frugal:"6,optional,i64" json:"data_file_length,omitempty"` - DeltaLogs []string `thrift:"delta_logs,7,optional" frugal:"7,optional,list" json:"delta_logs,omitempty"` - ColumnNames []string `thrift:"column_names,8,optional" frugal:"8,optional,list" json:"column_names,omitempty"` - ColumnTypes []string `thrift:"column_types,9,optional" frugal:"9,optional,list" json:"column_types,omitempty"` - NestedFields []string `thrift:"nested_fields,10,optional" frugal:"10,optional,list" json:"nested_fields,omitempty"` -} +var TPaimonFileDesc_PaimonSplit_DEFAULT string -func NewTHudiFileDesc() *THudiFileDesc { - return &THudiFileDesc{} +func (p *TPaimonFileDesc) GetPaimonSplit() (v string) { + if !p.IsSetPaimonSplit() { + return TPaimonFileDesc_PaimonSplit_DEFAULT + } + return *p.PaimonSplit } -func (p *THudiFileDesc) InitDefault() { - *p = THudiFileDesc{} +var TPaimonFileDesc_PaimonColumnNames_DEFAULT string + +func (p *TPaimonFileDesc) GetPaimonColumnNames() (v string) { + if !p.IsSetPaimonColumnNames() { + return TPaimonFileDesc_PaimonColumnNames_DEFAULT + } + return *p.PaimonColumnNames } -var THudiFileDesc_InstantTime_DEFAULT string +var TPaimonFileDesc_DbName_DEFAULT string -func (p *THudiFileDesc) GetInstantTime() (v string) { - if !p.IsSetInstantTime() { - return THudiFileDesc_InstantTime_DEFAULT +func (p *TPaimonFileDesc) GetDbName() (v string) { + if !p.IsSetDbName() { + return TPaimonFileDesc_DbName_DEFAULT } - return *p.InstantTime + return *p.DbName } -var THudiFileDesc_Serde_DEFAULT string +var TPaimonFileDesc_TableName_DEFAULT string -func (p *THudiFileDesc) GetSerde() (v string) { - if !p.IsSetSerde() { - return THudiFileDesc_Serde_DEFAULT +func (p *TPaimonFileDesc) GetTableName() (v string) { + if !p.IsSetTableName() { + return TPaimonFileDesc_TableName_DEFAULT } - return *p.Serde + return *p.TableName } -var THudiFileDesc_InputFormat_DEFAULT string +var TPaimonFileDesc_PaimonPredicate_DEFAULT string -func (p *THudiFileDesc) GetInputFormat() (v string) { - if !p.IsSetInputFormat() { - return THudiFileDesc_InputFormat_DEFAULT +func (p *TPaimonFileDesc) GetPaimonPredicate() (v string) { + if !p.IsSetPaimonPredicate() { + return TPaimonFileDesc_PaimonPredicate_DEFAULT } - return *p.InputFormat + return *p.PaimonPredicate } -var THudiFileDesc_BasePath_DEFAULT string +var TPaimonFileDesc_PaimonOptions_DEFAULT map[string]string -func (p *THudiFileDesc) GetBasePath() (v string) { - if !p.IsSetBasePath() { - return THudiFileDesc_BasePath_DEFAULT +func (p *TPaimonFileDesc) GetPaimonOptions() (v map[string]string) { + if !p.IsSetPaimonOptions() { + return TPaimonFileDesc_PaimonOptions_DEFAULT } - return *p.BasePath + return p.PaimonOptions } -var THudiFileDesc_DataFilePath_DEFAULT string +var TPaimonFileDesc_CtlId_DEFAULT int64 -func (p *THudiFileDesc) GetDataFilePath() (v string) { - if !p.IsSetDataFilePath() { - return THudiFileDesc_DataFilePath_DEFAULT +func (p *TPaimonFileDesc) GetCtlId() (v int64) { + if !p.IsSetCtlId() { + return TPaimonFileDesc_CtlId_DEFAULT } - return *p.DataFilePath + return *p.CtlId } -var THudiFileDesc_DataFileLength_DEFAULT int64 +var TPaimonFileDesc_DbId_DEFAULT int64 -func (p *THudiFileDesc) GetDataFileLength() (v int64) { - if !p.IsSetDataFileLength() { - return THudiFileDesc_DataFileLength_DEFAULT +func (p *TPaimonFileDesc) GetDbId() (v int64) { + if !p.IsSetDbId() { + return TPaimonFileDesc_DbId_DEFAULT } - return *p.DataFileLength + return *p.DbId } -var THudiFileDesc_DeltaLogs_DEFAULT []string +var TPaimonFileDesc_TblId_DEFAULT int64 -func (p *THudiFileDesc) GetDeltaLogs() (v []string) { - if !p.IsSetDeltaLogs() { - return THudiFileDesc_DeltaLogs_DEFAULT +func (p *TPaimonFileDesc) GetTblId() (v int64) { + if !p.IsSetTblId() { + return TPaimonFileDesc_TblId_DEFAULT } - return p.DeltaLogs + return *p.TblId } -var THudiFileDesc_ColumnNames_DEFAULT []string +var TPaimonFileDesc_LastUpdateTime_DEFAULT int64 -func (p *THudiFileDesc) GetColumnNames() (v []string) { - if !p.IsSetColumnNames() { - return THudiFileDesc_ColumnNames_DEFAULT +func (p *TPaimonFileDesc) GetLastUpdateTime() (v int64) { + if !p.IsSetLastUpdateTime() { + return TPaimonFileDesc_LastUpdateTime_DEFAULT } - return p.ColumnNames + return *p.LastUpdateTime } -var THudiFileDesc_ColumnTypes_DEFAULT []string +var TPaimonFileDesc_FileFormat_DEFAULT string -func (p *THudiFileDesc) GetColumnTypes() (v []string) { - if !p.IsSetColumnTypes() { - return THudiFileDesc_ColumnTypes_DEFAULT +func (p *TPaimonFileDesc) GetFileFormat() (v string) { + if !p.IsSetFileFormat() { + return TPaimonFileDesc_FileFormat_DEFAULT } - return p.ColumnTypes + return *p.FileFormat } -var THudiFileDesc_NestedFields_DEFAULT []string +var TPaimonFileDesc_DeletionFile_DEFAULT *TPaimonDeletionFileDesc -func (p *THudiFileDesc) GetNestedFields() (v []string) { - if !p.IsSetNestedFields() { - return THudiFileDesc_NestedFields_DEFAULT +func (p *TPaimonFileDesc) GetDeletionFile() (v *TPaimonDeletionFileDesc) { + if !p.IsSetDeletionFile() { + return TPaimonFileDesc_DeletionFile_DEFAULT } - return p.NestedFields + return p.DeletionFile } -func (p *THudiFileDesc) SetInstantTime(val *string) { - p.InstantTime = val + +var TPaimonFileDesc_HadoopConf_DEFAULT map[string]string + +func (p *TPaimonFileDesc) GetHadoopConf() (v map[string]string) { + if !p.IsSetHadoopConf() { + return TPaimonFileDesc_HadoopConf_DEFAULT + } + return p.HadoopConf } -func (p *THudiFileDesc) SetSerde(val *string) { - p.Serde = val + +var TPaimonFileDesc_PaimonTable_DEFAULT string + +func (p *TPaimonFileDesc) GetPaimonTable() (v string) { + if !p.IsSetPaimonTable() { + return TPaimonFileDesc_PaimonTable_DEFAULT + } + return *p.PaimonTable } -func (p *THudiFileDesc) SetInputFormat(val *string) { - p.InputFormat = val +func (p *TPaimonFileDesc) SetPaimonSplit(val *string) { + p.PaimonSplit = val } -func (p *THudiFileDesc) SetBasePath(val *string) { - p.BasePath = val +func (p *TPaimonFileDesc) SetPaimonColumnNames(val *string) { + p.PaimonColumnNames = val } -func (p *THudiFileDesc) SetDataFilePath(val *string) { - p.DataFilePath = val +func (p *TPaimonFileDesc) SetDbName(val *string) { + p.DbName = val } -func (p *THudiFileDesc) SetDataFileLength(val *int64) { - p.DataFileLength = val +func (p *TPaimonFileDesc) SetTableName(val *string) { + p.TableName = val } -func (p *THudiFileDesc) SetDeltaLogs(val []string) { - p.DeltaLogs = val +func (p *TPaimonFileDesc) SetPaimonPredicate(val *string) { + p.PaimonPredicate = val } -func (p *THudiFileDesc) SetColumnNames(val []string) { - p.ColumnNames = val +func (p *TPaimonFileDesc) SetPaimonOptions(val map[string]string) { + p.PaimonOptions = val } -func (p *THudiFileDesc) SetColumnTypes(val []string) { - p.ColumnTypes = val +func (p *TPaimonFileDesc) SetCtlId(val *int64) { + p.CtlId = val } -func (p *THudiFileDesc) SetNestedFields(val []string) { - p.NestedFields = val +func (p *TPaimonFileDesc) SetDbId(val *int64) { + p.DbId = val +} +func (p *TPaimonFileDesc) SetTblId(val *int64) { + p.TblId = val +} +func (p *TPaimonFileDesc) SetLastUpdateTime(val *int64) { + p.LastUpdateTime = val +} +func (p *TPaimonFileDesc) SetFileFormat(val *string) { + p.FileFormat = val +} +func (p *TPaimonFileDesc) SetDeletionFile(val *TPaimonDeletionFileDesc) { + p.DeletionFile = val +} +func (p *TPaimonFileDesc) SetHadoopConf(val map[string]string) { + p.HadoopConf = val +} +func (p *TPaimonFileDesc) SetPaimonTable(val *string) { + p.PaimonTable = val } -var fieldIDToName_THudiFileDesc = map[int16]string{ - 1: "instant_time", - 2: "serde", - 3: "input_format", - 4: "base_path", - 5: "data_file_path", - 6: "data_file_length", - 7: "delta_logs", - 8: "column_names", - 9: "column_types", - 10: "nested_fields", +var fieldIDToName_TPaimonFileDesc = map[int16]string{ + 1: "paimon_split", + 2: "paimon_column_names", + 3: "db_name", + 4: "table_name", + 5: "paimon_predicate", + 6: "paimon_options", + 7: "ctl_id", + 8: "db_id", + 9: "tbl_id", + 10: "last_update_time", + 11: "file_format", + 12: "deletion_file", + 13: "hadoop_conf", + 14: "paimon_table", } -func (p *THudiFileDesc) IsSetInstantTime() bool { - return p.InstantTime != nil +func (p *TPaimonFileDesc) IsSetPaimonSplit() bool { + return p.PaimonSplit != nil } -func (p *THudiFileDesc) IsSetSerde() bool { - return p.Serde != nil +func (p *TPaimonFileDesc) IsSetPaimonColumnNames() bool { + return p.PaimonColumnNames != nil } -func (p *THudiFileDesc) IsSetInputFormat() bool { - return p.InputFormat != nil +func (p *TPaimonFileDesc) IsSetDbName() bool { + return p.DbName != nil } -func (p *THudiFileDesc) IsSetBasePath() bool { - return p.BasePath != nil +func (p *TPaimonFileDesc) IsSetTableName() bool { + return p.TableName != nil } -func (p *THudiFileDesc) IsSetDataFilePath() bool { - return p.DataFilePath != nil +func (p *TPaimonFileDesc) IsSetPaimonPredicate() bool { + return p.PaimonPredicate != nil } -func (p *THudiFileDesc) IsSetDataFileLength() bool { - return p.DataFileLength != nil +func (p *TPaimonFileDesc) IsSetPaimonOptions() bool { + return p.PaimonOptions != nil } -func (p *THudiFileDesc) IsSetDeltaLogs() bool { - return p.DeltaLogs != nil +func (p *TPaimonFileDesc) IsSetCtlId() bool { + return p.CtlId != nil } -func (p *THudiFileDesc) IsSetColumnNames() bool { - return p.ColumnNames != nil +func (p *TPaimonFileDesc) IsSetDbId() bool { + return p.DbId != nil } -func (p *THudiFileDesc) IsSetColumnTypes() bool { - return p.ColumnTypes != nil +func (p *TPaimonFileDesc) IsSetTblId() bool { + return p.TblId != nil } -func (p *THudiFileDesc) IsSetNestedFields() bool { - return p.NestedFields != nil +func (p *TPaimonFileDesc) IsSetLastUpdateTime() bool { + return p.LastUpdateTime != nil } -func (p *THudiFileDesc) Read(iprot thrift.TProtocol) (err error) { +func (p *TPaimonFileDesc) IsSetFileFormat() bool { + return p.FileFormat != nil +} + +func (p *TPaimonFileDesc) IsSetDeletionFile() bool { + return p.DeletionFile != nil +} + +func (p *TPaimonFileDesc) IsSetHadoopConf() bool { + return p.HadoopConf != nil +} + +func (p *TPaimonFileDesc) IsSetPaimonTable() bool { + return p.PaimonTable != nil +} + +func (p *TPaimonFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10135,107 +10588,118 @@ func (p *THudiFileDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.MAP { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.MAP { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.STRING { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10250,7 +10714,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THudiFileDesc[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPaimonFileDesc[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10260,151 +10724,197 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *THudiFileDesc) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.InstantTime = &v - } - return nil -} +func (p *TPaimonFileDesc) ReadField1(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField2(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Serde = &v + _field = &v } + p.PaimonSplit = _field return nil } +func (p *TPaimonFileDesc) ReadField2(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.InputFormat = &v + _field = &v } + p.PaimonColumnNames = _field return nil } +func (p *TPaimonFileDesc) ReadField3(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField4(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.BasePath = &v + _field = &v } + p.DbName = _field return nil } +func (p *TPaimonFileDesc) ReadField4(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField5(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DataFilePath = &v + _field = &v } + p.TableName = _field return nil } +func (p *TPaimonFileDesc) ReadField5(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.DataFileLength = &v + _field = &v } + p.PaimonPredicate = _field return nil } - -func (p *THudiFileDesc) ReadField7(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *TPaimonFileDesc) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.DeltaLogs = make([]string, 0, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, err := iprot.ReadString(); err != nil { return err } else { - _elem = v + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v } - p.DeltaLogs = append(p.DeltaLogs, _elem) + _field[_key] = _val } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadMapEnd(); err != nil { return err } + p.PaimonOptions = _field return nil } +func (p *TPaimonFileDesc) ReadField7(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField8(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } - p.ColumnNames = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } + p.CtlId = _field + return nil +} +func (p *TPaimonFileDesc) ReadField8(iprot thrift.TProtocol) error { - p.ColumnNames = append(p.ColumnNames, _elem) + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } - if err := iprot.ReadListEnd(); err != nil { + p.DbId = _field + return nil +} +func (p *TPaimonFileDesc) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } + p.TblId = _field return nil } +func (p *TPaimonFileDesc) ReadField10(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) ReadField9(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err + } else { + _field = &v } - p.ColumnTypes = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v - } + p.LastUpdateTime = _field + return nil +} +func (p *TPaimonFileDesc) ReadField11(iprot thrift.TProtocol) error { - p.ColumnTypes = append(p.ColumnTypes, _elem) + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - if err := iprot.ReadListEnd(); err != nil { + p.FileFormat = _field + return nil +} +func (p *TPaimonFileDesc) ReadField12(iprot thrift.TProtocol) error { + _field := NewTPaimonDeletionFileDesc() + if err := _field.Read(iprot); err != nil { return err } + p.DeletionFile = _field return nil } - -func (p *THudiFileDesc) ReadField10(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *TPaimonFileDesc) ReadField13(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.NestedFields = make([]string, 0, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, err := iprot.ReadString(); err != nil { return err } else { - _elem = v + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v } - p.NestedFields = append(p.NestedFields, _elem) + _field[_key] = _val } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadMapEnd(); err != nil { return err } + p.HadoopConf = _field return nil } +func (p *TPaimonFileDesc) ReadField14(iprot thrift.TProtocol) error { -func (p *THudiFileDesc) Write(oprot thrift.TProtocol) (err error) { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.PaimonTable = _field + return nil +} + +func (p *TPaimonFileDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("THudiFileDesc"); err != nil { + if err = oprot.WriteStructBegin("TPaimonFileDesc"); err != nil { goto WriteStructBeginError } if p != nil { @@ -10448,7 +10958,22 @@ func (p *THudiFileDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10467,12 +10992,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *THudiFileDesc) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetInstantTime() { - if err = oprot.WriteFieldBegin("instant_time", thrift.STRING, 1); err != nil { +func (p *TPaimonFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPaimonSplit() { + if err = oprot.WriteFieldBegin("paimon_split", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.InstantTime); err != nil { + if err := oprot.WriteString(*p.PaimonSplit); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10486,12 +11011,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *THudiFileDesc) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetSerde() { - if err = oprot.WriteFieldBegin("serde", thrift.STRING, 2); err != nil { +func (p *TPaimonFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPaimonColumnNames() { + if err = oprot.WriteFieldBegin("paimon_column_names", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Serde); err != nil { + if err := oprot.WriteString(*p.PaimonColumnNames); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10505,12 +11030,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *THudiFileDesc) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetInputFormat() { - if err = oprot.WriteFieldBegin("input_format", thrift.STRING, 3); err != nil { +func (p *TPaimonFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.InputFormat); err != nil { + if err := oprot.WriteString(*p.DbName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10524,12 +11049,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *THudiFileDesc) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetBasePath() { - if err = oprot.WriteFieldBegin("base_path", thrift.STRING, 4); err != nil { +func (p *TPaimonFileDesc) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.BasePath); err != nil { + if err := oprot.WriteString(*p.TableName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10543,12 +11068,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *THudiFileDesc) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetDataFilePath() { - if err = oprot.WriteFieldBegin("data_file_path", thrift.STRING, 5); err != nil { +func (p *TPaimonFileDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetPaimonPredicate() { + if err = oprot.WriteFieldBegin("paimon_predicate", thrift.STRING, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.DataFilePath); err != nil { + if err := oprot.WriteString(*p.PaimonPredicate); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10562,12 +11087,23 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *THudiFileDesc) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetDataFileLength() { - if err = oprot.WriteFieldBegin("data_file_length", thrift.I64, 6); err != nil { +func (p *TPaimonFileDesc) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPaimonOptions() { + if err = oprot.WriteFieldBegin("paimon_options", thrift.MAP, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.DataFileLength); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.PaimonOptions)); err != nil { + return err + } + for k, v := range p.PaimonOptions { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10581,20 +11117,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *THudiFileDesc) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetDeltaLogs() { - if err = oprot.WriteFieldBegin("delta_logs", thrift.LIST, 7); err != nil { +func (p *TPaimonFileDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetCtlId() { + if err = oprot.WriteFieldBegin("ctl_id", thrift.I64, 7); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.DeltaLogs)); err != nil { + if err := oprot.WriteI64(*p.CtlId); err != nil { return err } - for _, v := range p.DeltaLogs { - if err := oprot.WriteString(v); err != nil { - return err - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err := oprot.WriteListEnd(); err != nil { + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TPaimonFileDesc) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetDbId() { + if err = oprot.WriteFieldBegin("db_id", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DbId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10603,25 +11150,36 @@ func (p *THudiFileDesc) writeField7(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *THudiFileDesc) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnNames() { - if err = oprot.WriteFieldBegin("column_names", thrift.LIST, 8); err != nil { +func (p *TPaimonFileDesc) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTblId() { + if err = oprot.WriteFieldBegin("tbl_id", thrift.I64, 9); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnNames)); err != nil { + if err := oprot.WriteI64(*p.TblId); err != nil { return err } - for _, v := range p.ColumnNames { - if err := oprot.WriteString(v); err != nil { - return err - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err := oprot.WriteListEnd(); err != nil { + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TPaimonFileDesc) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetLastUpdateTime() { + if err = oprot.WriteFieldBegin("last_update_time", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.LastUpdateTime); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10630,25 +11188,36 @@ func (p *THudiFileDesc) writeField8(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *THudiFileDesc) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnTypes() { - if err = oprot.WriteFieldBegin("column_types", thrift.LIST, 9); err != nil { +func (p *TPaimonFileDesc) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetFileFormat() { + if err = oprot.WriteFieldBegin("file_format", thrift.STRING, 11); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnTypes)); err != nil { + if err := oprot.WriteString(*p.FileFormat); err != nil { return err } - for _, v := range p.ColumnTypes { - if err := oprot.WriteString(v); err != nil { - return err - } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err := oprot.WriteListEnd(); err != nil { + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TPaimonFileDesc) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDeletionFile() { + if err = oprot.WriteFieldBegin("deletion_file", thrift.STRUCT, 12); err != nil { + goto WriteFieldBeginError + } + if err := p.DeletionFile.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10657,25 +11226,28 @@ func (p *THudiFileDesc) writeField9(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } -func (p *THudiFileDesc) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetNestedFields() { - if err = oprot.WriteFieldBegin("nested_fields", thrift.LIST, 10); err != nil { +func (p *TPaimonFileDesc) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetHadoopConf() { + if err = oprot.WriteFieldBegin("hadoop_conf", thrift.MAP, 13); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.NestedFields)); err != nil { + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.HadoopConf)); err != nil { return err } - for _, v := range p.NestedFields { + for k, v := range p.HadoopConf { + if err := oprot.WriteString(k); err != nil { + return err + } if err := oprot.WriteString(v); err != nil { return err } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteMapEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -10684,233 +11256,467 @@ func (p *THudiFileDesc) writeField10(oprot thrift.TProtocol) (err error) { } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } -func (p *THudiFileDesc) String() string { +func (p *TPaimonFileDesc) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetPaimonTable() { + if err = oprot.WriteFieldBegin("paimon_table", thrift.STRING, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.PaimonTable); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TPaimonFileDesc) String() string { if p == nil { return "" } - return fmt.Sprintf("THudiFileDesc(%+v)", *p) + return fmt.Sprintf("TPaimonFileDesc(%+v)", *p) + } -func (p *THudiFileDesc) DeepEqual(ano *THudiFileDesc) bool { +func (p *TPaimonFileDesc) DeepEqual(ano *TPaimonFileDesc) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.InstantTime) { + if !p.Field1DeepEqual(ano.PaimonSplit) { return false } - if !p.Field2DeepEqual(ano.Serde) { + if !p.Field2DeepEqual(ano.PaimonColumnNames) { return false } - if !p.Field3DeepEqual(ano.InputFormat) { + if !p.Field3DeepEqual(ano.DbName) { return false } - if !p.Field4DeepEqual(ano.BasePath) { + if !p.Field4DeepEqual(ano.TableName) { return false } - if !p.Field5DeepEqual(ano.DataFilePath) { + if !p.Field5DeepEqual(ano.PaimonPredicate) { return false } - if !p.Field6DeepEqual(ano.DataFileLength) { + if !p.Field6DeepEqual(ano.PaimonOptions) { return false } - if !p.Field7DeepEqual(ano.DeltaLogs) { + if !p.Field7DeepEqual(ano.CtlId) { return false } - if !p.Field8DeepEqual(ano.ColumnNames) { + if !p.Field8DeepEqual(ano.DbId) { return false } - if !p.Field9DeepEqual(ano.ColumnTypes) { + if !p.Field9DeepEqual(ano.TblId) { return false } - if !p.Field10DeepEqual(ano.NestedFields) { + if !p.Field10DeepEqual(ano.LastUpdateTime) { + return false + } + if !p.Field11DeepEqual(ano.FileFormat) { + return false + } + if !p.Field12DeepEqual(ano.DeletionFile) { + return false + } + if !p.Field13DeepEqual(ano.HadoopConf) { + return false + } + if !p.Field14DeepEqual(ano.PaimonTable) { return false } return true } -func (p *THudiFileDesc) Field1DeepEqual(src *string) bool { +func (p *TPaimonFileDesc) Field1DeepEqual(src *string) bool { - if p.InstantTime == src { + if p.PaimonSplit == src { return true - } else if p.InstantTime == nil || src == nil { + } else if p.PaimonSplit == nil || src == nil { return false } - if strings.Compare(*p.InstantTime, *src) != 0 { + if strings.Compare(*p.PaimonSplit, *src) != 0 { return false } return true } -func (p *THudiFileDesc) Field2DeepEqual(src *string) bool { +func (p *TPaimonFileDesc) Field2DeepEqual(src *string) bool { - if p.Serde == src { + if p.PaimonColumnNames == src { return true - } else if p.Serde == nil || src == nil { + } else if p.PaimonColumnNames == nil || src == nil { return false } - if strings.Compare(*p.Serde, *src) != 0 { + if strings.Compare(*p.PaimonColumnNames, *src) != 0 { return false } return true } -func (p *THudiFileDesc) Field3DeepEqual(src *string) bool { +func (p *TPaimonFileDesc) Field3DeepEqual(src *string) bool { - if p.InputFormat == src { + if p.DbName == src { return true - } else if p.InputFormat == nil || src == nil { + } else if p.DbName == nil || src == nil { return false } - if strings.Compare(*p.InputFormat, *src) != 0 { + if strings.Compare(*p.DbName, *src) != 0 { return false } return true } -func (p *THudiFileDesc) Field4DeepEqual(src *string) bool { +func (p *TPaimonFileDesc) Field4DeepEqual(src *string) bool { - if p.BasePath == src { + if p.TableName == src { return true - } else if p.BasePath == nil || src == nil { + } else if p.TableName == nil || src == nil { return false } - if strings.Compare(*p.BasePath, *src) != 0 { + if strings.Compare(*p.TableName, *src) != 0 { return false } return true } -func (p *THudiFileDesc) Field5DeepEqual(src *string) bool { +func (p *TPaimonFileDesc) Field5DeepEqual(src *string) bool { - if p.DataFilePath == src { + if p.PaimonPredicate == src { return true - } else if p.DataFilePath == nil || src == nil { + } else if p.PaimonPredicate == nil || src == nil { return false } - if strings.Compare(*p.DataFilePath, *src) != 0 { + if strings.Compare(*p.PaimonPredicate, *src) != 0 { return false } return true } -func (p *THudiFileDesc) Field6DeepEqual(src *int64) bool { +func (p *TPaimonFileDesc) Field6DeepEqual(src map[string]string) bool { - if p.DataFileLength == src { + if len(p.PaimonOptions) != len(src) { + return false + } + for k, v := range p.PaimonOptions { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TPaimonFileDesc) Field7DeepEqual(src *int64) bool { + + if p.CtlId == src { return true - } else if p.DataFileLength == nil || src == nil { + } else if p.CtlId == nil || src == nil { return false } - if *p.DataFileLength != *src { + if *p.CtlId != *src { return false } return true } -func (p *THudiFileDesc) Field7DeepEqual(src []string) bool { +func (p *TPaimonFileDesc) Field8DeepEqual(src *int64) bool { - if len(p.DeltaLogs) != len(src) { + if p.DbId == src { + return true + } else if p.DbId == nil || src == nil { return false } - for i, v := range p.DeltaLogs { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } + if *p.DbId != *src { + return false } return true } -func (p *THudiFileDesc) Field8DeepEqual(src []string) bool { +func (p *TPaimonFileDesc) Field9DeepEqual(src *int64) bool { - if len(p.ColumnNames) != len(src) { + if p.TblId == src { + return true + } else if p.TblId == nil || src == nil { return false } - for i, v := range p.ColumnNames { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } + if *p.TblId != *src { + return false } return true } -func (p *THudiFileDesc) Field9DeepEqual(src []string) bool { +func (p *TPaimonFileDesc) Field10DeepEqual(src *int64) bool { - if len(p.ColumnTypes) != len(src) { + if p.LastUpdateTime == src { + return true + } else if p.LastUpdateTime == nil || src == nil { return false } - for i, v := range p.ColumnTypes { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } + if *p.LastUpdateTime != *src { + return false } return true } -func (p *THudiFileDesc) Field10DeepEqual(src []string) bool { +func (p *TPaimonFileDesc) Field11DeepEqual(src *string) bool { - if len(p.NestedFields) != len(src) { + if p.FileFormat == src { + return true + } else if p.FileFormat == nil || src == nil { return false } - for i, v := range p.NestedFields { - _src := src[i] + if strings.Compare(*p.FileFormat, *src) != 0 { + return false + } + return true +} +func (p *TPaimonFileDesc) Field12DeepEqual(src *TPaimonDeletionFileDesc) bool { + + if !p.DeletionFile.DeepEqual(src) { + return false + } + return true +} +func (p *TPaimonFileDesc) Field13DeepEqual(src map[string]string) bool { + + if len(p.HadoopConf) != len(src) { + return false + } + for k, v := range p.HadoopConf { + _src := src[k] if strings.Compare(v, _src) != 0 { return false } } return true } +func (p *TPaimonFileDesc) Field14DeepEqual(src *string) bool { -type TTransactionalHiveDeleteDeltaDesc struct { - DirectoryLocation *string `thrift:"directory_location,1,optional" frugal:"1,optional,string" json:"directory_location,omitempty"` - FileNames []string `thrift:"file_names,2,optional" frugal:"2,optional,list" json:"file_names,omitempty"` + if p.PaimonTable == src { + return true + } else if p.PaimonTable == nil || src == nil { + return false + } + if strings.Compare(*p.PaimonTable, *src) != 0 { + return false + } + return true } -func NewTTransactionalHiveDeleteDeltaDesc() *TTransactionalHiveDeleteDeltaDesc { - return &TTransactionalHiveDeleteDeltaDesc{} +type TTrinoConnectorFileDesc struct { + CatalogName *string `thrift:"catalog_name,1,optional" frugal:"1,optional,string" json:"catalog_name,omitempty"` + DbName *string `thrift:"db_name,2,optional" frugal:"2,optional,string" json:"db_name,omitempty"` + TableName *string `thrift:"table_name,3,optional" frugal:"3,optional,string" json:"table_name,omitempty"` + TrinoConnectorOptions map[string]string `thrift:"trino_connector_options,4,optional" frugal:"4,optional,map" json:"trino_connector_options,omitempty"` + TrinoConnectorTableHandle *string `thrift:"trino_connector_table_handle,5,optional" frugal:"5,optional,string" json:"trino_connector_table_handle,omitempty"` + TrinoConnectorColumnHandles *string `thrift:"trino_connector_column_handles,6,optional" frugal:"6,optional,string" json:"trino_connector_column_handles,omitempty"` + TrinoConnectorColumnMetadata *string `thrift:"trino_connector_column_metadata,7,optional" frugal:"7,optional,string" json:"trino_connector_column_metadata,omitempty"` + TrinoConnectorColumnNames *string `thrift:"trino_connector_column_names,8,optional" frugal:"8,optional,string" json:"trino_connector_column_names,omitempty"` + TrinoConnectorSplit *string `thrift:"trino_connector_split,9,optional" frugal:"9,optional,string" json:"trino_connector_split,omitempty"` + TrinoConnectorPredicate *string `thrift:"trino_connector_predicate,10,optional" frugal:"10,optional,string" json:"trino_connector_predicate,omitempty"` + TrinoConnectorTrascationHandle *string `thrift:"trino_connector_trascation_handle,11,optional" frugal:"11,optional,string" json:"trino_connector_trascation_handle,omitempty"` } -func (p *TTransactionalHiveDeleteDeltaDesc) InitDefault() { - *p = TTransactionalHiveDeleteDeltaDesc{} +func NewTTrinoConnectorFileDesc() *TTrinoConnectorFileDesc { + return &TTrinoConnectorFileDesc{} } -var TTransactionalHiveDeleteDeltaDesc_DirectoryLocation_DEFAULT string +func (p *TTrinoConnectorFileDesc) InitDefault() { +} -func (p *TTransactionalHiveDeleteDeltaDesc) GetDirectoryLocation() (v string) { - if !p.IsSetDirectoryLocation() { - return TTransactionalHiveDeleteDeltaDesc_DirectoryLocation_DEFAULT +var TTrinoConnectorFileDesc_CatalogName_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetCatalogName() (v string) { + if !p.IsSetCatalogName() { + return TTrinoConnectorFileDesc_CatalogName_DEFAULT } - return *p.DirectoryLocation + return *p.CatalogName } -var TTransactionalHiveDeleteDeltaDesc_FileNames_DEFAULT []string +var TTrinoConnectorFileDesc_DbName_DEFAULT string -func (p *TTransactionalHiveDeleteDeltaDesc) GetFileNames() (v []string) { - if !p.IsSetFileNames() { - return TTransactionalHiveDeleteDeltaDesc_FileNames_DEFAULT +func (p *TTrinoConnectorFileDesc) GetDbName() (v string) { + if !p.IsSetDbName() { + return TTrinoConnectorFileDesc_DbName_DEFAULT } - return p.FileNames + return *p.DbName } -func (p *TTransactionalHiveDeleteDeltaDesc) SetDirectoryLocation(val *string) { - p.DirectoryLocation = val + +var TTrinoConnectorFileDesc_TableName_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTableName() (v string) { + if !p.IsSetTableName() { + return TTrinoConnectorFileDesc_TableName_DEFAULT + } + return *p.TableName } -func (p *TTransactionalHiveDeleteDeltaDesc) SetFileNames(val []string) { - p.FileNames = val + +var TTrinoConnectorFileDesc_TrinoConnectorOptions_DEFAULT map[string]string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorOptions() (v map[string]string) { + if !p.IsSetTrinoConnectorOptions() { + return TTrinoConnectorFileDesc_TrinoConnectorOptions_DEFAULT + } + return p.TrinoConnectorOptions } -var fieldIDToName_TTransactionalHiveDeleteDeltaDesc = map[int16]string{ - 1: "directory_location", - 2: "file_names", +var TTrinoConnectorFileDesc_TrinoConnectorTableHandle_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorTableHandle() (v string) { + if !p.IsSetTrinoConnectorTableHandle() { + return TTrinoConnectorFileDesc_TrinoConnectorTableHandle_DEFAULT + } + return *p.TrinoConnectorTableHandle } -func (p *TTransactionalHiveDeleteDeltaDesc) IsSetDirectoryLocation() bool { - return p.DirectoryLocation != nil +var TTrinoConnectorFileDesc_TrinoConnectorColumnHandles_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorColumnHandles() (v string) { + if !p.IsSetTrinoConnectorColumnHandles() { + return TTrinoConnectorFileDesc_TrinoConnectorColumnHandles_DEFAULT + } + return *p.TrinoConnectorColumnHandles } -func (p *TTransactionalHiveDeleteDeltaDesc) IsSetFileNames() bool { - return p.FileNames != nil +var TTrinoConnectorFileDesc_TrinoConnectorColumnMetadata_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorColumnMetadata() (v string) { + if !p.IsSetTrinoConnectorColumnMetadata() { + return TTrinoConnectorFileDesc_TrinoConnectorColumnMetadata_DEFAULT + } + return *p.TrinoConnectorColumnMetadata } -func (p *TTransactionalHiveDeleteDeltaDesc) Read(iprot thrift.TProtocol) (err error) { +var TTrinoConnectorFileDesc_TrinoConnectorColumnNames_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorColumnNames() (v string) { + if !p.IsSetTrinoConnectorColumnNames() { + return TTrinoConnectorFileDesc_TrinoConnectorColumnNames_DEFAULT + } + return *p.TrinoConnectorColumnNames +} + +var TTrinoConnectorFileDesc_TrinoConnectorSplit_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorSplit() (v string) { + if !p.IsSetTrinoConnectorSplit() { + return TTrinoConnectorFileDesc_TrinoConnectorSplit_DEFAULT + } + return *p.TrinoConnectorSplit +} + +var TTrinoConnectorFileDesc_TrinoConnectorPredicate_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorPredicate() (v string) { + if !p.IsSetTrinoConnectorPredicate() { + return TTrinoConnectorFileDesc_TrinoConnectorPredicate_DEFAULT + } + return *p.TrinoConnectorPredicate +} + +var TTrinoConnectorFileDesc_TrinoConnectorTrascationHandle_DEFAULT string + +func (p *TTrinoConnectorFileDesc) GetTrinoConnectorTrascationHandle() (v string) { + if !p.IsSetTrinoConnectorTrascationHandle() { + return TTrinoConnectorFileDesc_TrinoConnectorTrascationHandle_DEFAULT + } + return *p.TrinoConnectorTrascationHandle +} +func (p *TTrinoConnectorFileDesc) SetCatalogName(val *string) { + p.CatalogName = val +} +func (p *TTrinoConnectorFileDesc) SetDbName(val *string) { + p.DbName = val +} +func (p *TTrinoConnectorFileDesc) SetTableName(val *string) { + p.TableName = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorOptions(val map[string]string) { + p.TrinoConnectorOptions = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorTableHandle(val *string) { + p.TrinoConnectorTableHandle = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorColumnHandles(val *string) { + p.TrinoConnectorColumnHandles = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorColumnMetadata(val *string) { + p.TrinoConnectorColumnMetadata = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorColumnNames(val *string) { + p.TrinoConnectorColumnNames = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorSplit(val *string) { + p.TrinoConnectorSplit = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorPredicate(val *string) { + p.TrinoConnectorPredicate = val +} +func (p *TTrinoConnectorFileDesc) SetTrinoConnectorTrascationHandle(val *string) { + p.TrinoConnectorTrascationHandle = val +} + +var fieldIDToName_TTrinoConnectorFileDesc = map[int16]string{ + 1: "catalog_name", + 2: "db_name", + 3: "table_name", + 4: "trino_connector_options", + 5: "trino_connector_table_handle", + 6: "trino_connector_column_handles", + 7: "trino_connector_column_metadata", + 8: "trino_connector_column_names", + 9: "trino_connector_split", + 10: "trino_connector_predicate", + 11: "trino_connector_trascation_handle", +} + +func (p *TTrinoConnectorFileDesc) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetDbName() bool { + return p.DbName != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorOptions() bool { + return p.TrinoConnectorOptions != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorTableHandle() bool { + return p.TrinoConnectorTableHandle != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorColumnHandles() bool { + return p.TrinoConnectorColumnHandles != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorColumnMetadata() bool { + return p.TrinoConnectorColumnMetadata != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorColumnNames() bool { + return p.TrinoConnectorColumnNames != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorSplit() bool { + return p.TrinoConnectorSplit != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorPredicate() bool { + return p.TrinoConnectorPredicate != nil +} + +func (p *TTrinoConnectorFileDesc) IsSetTrinoConnectorTrascationHandle() bool { + return p.TrinoConnectorTrascationHandle != nil +} + +func (p *TTrinoConnectorFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -10934,27 +11740,94 @@ func (p *TTransactionalHiveDeleteDeltaDesc) Read(iprot thrift.TProtocol) (err er if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRING { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRING { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRING { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRING { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10969,7 +11842,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDeleteDeltaDesc[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTrinoConnectorFileDesc[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -10979,40 +11852,149 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) ReadField1(iprot thrift.TProtocol) error { +func (p *TTrinoConnectorFileDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DirectoryLocation = &v + _field = &v } + p.CatalogName = _field return nil } +func (p *TTrinoConnectorFileDesc) ReadField2(iprot thrift.TProtocol) error { -func (p *TTransactionalHiveDeleteDeltaDesc) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DbName = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableName = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.FileNames = make([]string, 0, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, err := iprot.ReadString(); err != nil { return err } else { - _elem = v + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v } - p.FileNames = append(p.FileNames, _elem) + _field[_key] = _val } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadMapEnd(); err != nil { return err } + p.TrinoConnectorOptions = _field return nil } +func (p *TTrinoConnectorFileDesc) ReadField5(iprot thrift.TProtocol) error { -func (p *TTransactionalHiveDeleteDeltaDesc) Write(oprot thrift.TProtocol) (err error) { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorTableHandle = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField6(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorColumnHandles = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField7(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorColumnMetadata = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField8(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorColumnNames = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField9(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorSplit = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField10(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorPredicate = _field + return nil +} +func (p *TTrinoConnectorFileDesc) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TrinoConnectorTrascationHandle = _field + return nil +} + +func (p *TTrinoConnectorFileDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTransactionalHiveDeleteDeltaDesc"); err != nil { + if err = oprot.WriteStructBegin("TTrinoConnectorFileDesc"); err != nil { goto WriteStructBeginError } if p != nil { @@ -11024,7 +12006,42 @@ func (p *TTransactionalHiveDeleteDeltaDesc) Write(oprot thrift.TProtocol) (err e fieldId = 2 goto WriteFieldError } - + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11043,12 +12060,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDirectoryLocation() { - if err = oprot.WriteFieldBegin("directory_location", thrift.STRING, 1); err != nil { +func (p *TTrinoConnectorFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err = oprot.WriteFieldBegin("catalog_name", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.DirectoryLocation); err != nil { + if err := oprot.WriteString(*p.CatalogName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11062,20 +12079,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetFileNames() { - if err = oprot.WriteFieldBegin("file_names", thrift.LIST, 2); err != nil { +func (p *TTrinoConnectorFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDbName() { + if err = oprot.WriteFieldBegin("db_name", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRING, len(p.FileNames)); err != nil { - return err - } - for _, v := range p.FileNames { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.DbName); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11089,241 +12098,156 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) String() string { - if p == nil { - return "" +func (p *TTrinoConnectorFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err = oprot.WriteFieldBegin("table_name", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return fmt.Sprintf("TTransactionalHiveDeleteDeltaDesc(%+v)", *p) + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) DeepEqual(ano *TTransactionalHiveDeleteDeltaDesc) bool { - if p == ano { - return true - } else if p == nil || ano == nil { - return false - } - if !p.Field1DeepEqual(ano.DirectoryLocation) { - return false - } - if !p.Field2DeepEqual(ano.FileNames) { - return false +func (p *TTrinoConnectorFileDesc) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorOptions() { + if err = oprot.WriteFieldBegin("trino_connector_options", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.TrinoConnectorOptions)); err != nil { + return err + } + for k, v := range p.TrinoConnectorOptions { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) Field1DeepEqual(src *string) bool { - - if p.DirectoryLocation == src { - return true - } else if p.DirectoryLocation == nil || src == nil { - return false - } - if strings.Compare(*p.DirectoryLocation, *src) != 0 { - return false +func (p *TTrinoConnectorFileDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorTableHandle() { + if err = oprot.WriteFieldBegin("trino_connector_table_handle", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TrinoConnectorTableHandle); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) Field2DeepEqual(src []string) bool { - if len(p.FileNames) != len(src) { - return false - } - for i, v := range p.FileNames { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false +func (p *TTrinoConnectorFileDesc) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorColumnHandles() { + if err = oprot.WriteFieldBegin("trino_connector_column_handles", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TrinoConnectorColumnHandles); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - return true + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -type TTransactionalHiveDesc struct { - Partition *string `thrift:"partition,1,optional" frugal:"1,optional,string" json:"partition,omitempty"` - DeleteDeltas []*TTransactionalHiveDeleteDeltaDesc `thrift:"delete_deltas,2,optional" frugal:"2,optional,list" json:"delete_deltas,omitempty"` +func (p *TTrinoConnectorFileDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorColumnMetadata() { + if err = oprot.WriteFieldBegin("trino_connector_column_metadata", thrift.STRING, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TrinoConnectorColumnMetadata); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func NewTTransactionalHiveDesc() *TTransactionalHiveDesc { - return &TTransactionalHiveDesc{} -} - -func (p *TTransactionalHiveDesc) InitDefault() { - *p = TTransactionalHiveDesc{} -} - -var TTransactionalHiveDesc_Partition_DEFAULT string - -func (p *TTransactionalHiveDesc) GetPartition() (v string) { - if !p.IsSetPartition() { - return TTransactionalHiveDesc_Partition_DEFAULT - } - return *p.Partition -} - -var TTransactionalHiveDesc_DeleteDeltas_DEFAULT []*TTransactionalHiveDeleteDeltaDesc - -func (p *TTransactionalHiveDesc) GetDeleteDeltas() (v []*TTransactionalHiveDeleteDeltaDesc) { - if !p.IsSetDeleteDeltas() { - return TTransactionalHiveDesc_DeleteDeltas_DEFAULT - } - return p.DeleteDeltas -} -func (p *TTransactionalHiveDesc) SetPartition(val *string) { - p.Partition = val -} -func (p *TTransactionalHiveDesc) SetDeleteDeltas(val []*TTransactionalHiveDeleteDeltaDesc) { - p.DeleteDeltas = val -} - -var fieldIDToName_TTransactionalHiveDesc = map[int16]string{ - 1: "partition", - 2: "delete_deltas", -} - -func (p *TTransactionalHiveDesc) IsSetPartition() bool { - return p.Partition != nil -} - -func (p *TTransactionalHiveDesc) IsSetDeleteDeltas() bool { - return p.DeleteDeltas != nil -} - -func (p *TTransactionalHiveDesc) Read(iprot thrift.TProtocol) (err error) { - - var fieldTypeId thrift.TType - var fieldId int16 - - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break +func (p *TTrinoConnectorFileDesc) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorColumnNames() { + if err = oprot.WriteFieldBegin("trino_connector_column_names", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err := oprot.WriteString(*p.TrinoConnectorColumnNames); err != nil { + return err } - - if err = iprot.ReadFieldEnd(); err != nil { - goto ReadFieldEndError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } } - if err = iprot.ReadStructEnd(); err != nil { - goto ReadStructEndError - } - - return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDesc[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TTransactionalHiveDesc) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Partition = &v - } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TTransactionalHiveDesc) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.DeleteDeltas = make([]*TTransactionalHiveDeleteDeltaDesc, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTransactionalHiveDeleteDeltaDesc() - if err := _elem.Read(iprot); err != nil { - return err +func (p *TTrinoConnectorFileDesc) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorSplit() { + if err = oprot.WriteFieldBegin("trino_connector_split", thrift.STRING, 9); err != nil { + goto WriteFieldBeginError } - - p.DeleteDeltas = append(p.DeleteDeltas, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TTransactionalHiveDesc) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TTransactionalHiveDesc"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError + if err := oprot.WriteString(*p.TrinoConnectorSplit); err != nil { + return err } - if err = p.writeField2(oprot); err != nil { - fieldId = 2 - goto WriteFieldError + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError } return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TTransactionalHiveDesc) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetPartition() { - if err = oprot.WriteFieldBegin("partition", thrift.STRING, 1); err != nil { +func (p *TTrinoConnectorFileDesc) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorPredicate() { + if err = oprot.WriteFieldBegin("trino_connector_predicate", thrift.STRING, 10); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Partition); err != nil { + if err := oprot.WriteString(*p.TrinoConnectorPredicate); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11332,25 +12256,17 @@ func (p *TTransactionalHiveDesc) writeField1(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TTransactionalHiveDesc) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDeleteDeltas() { - if err = oprot.WriteFieldBegin("delete_deltas", thrift.LIST, 2); err != nil { +func (p *TTrinoConnectorFileDesc) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorTrascationHandle() { + if err = oprot.WriteFieldBegin("trino_connector_trascation_handle", thrift.STRING, 11); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DeleteDeltas)); err != nil { - return err - } - for _, v := range p.DeleteDeltas { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.TrinoConnectorTrascationHandle); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11359,164 +12275,263 @@ func (p *TTransactionalHiveDesc) writeField2(oprot thrift.TProtocol) (err error) } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TTransactionalHiveDesc) String() string { +func (p *TTrinoConnectorFileDesc) String() string { if p == nil { return "" } - return fmt.Sprintf("TTransactionalHiveDesc(%+v)", *p) + return fmt.Sprintf("TTrinoConnectorFileDesc(%+v)", *p) + } -func (p *TTransactionalHiveDesc) DeepEqual(ano *TTransactionalHiveDesc) bool { +func (p *TTrinoConnectorFileDesc) DeepEqual(ano *TTrinoConnectorFileDesc) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Partition) { + if !p.Field1DeepEqual(ano.CatalogName) { return false } - if !p.Field2DeepEqual(ano.DeleteDeltas) { + if !p.Field2DeepEqual(ano.DbName) { + return false + } + if !p.Field3DeepEqual(ano.TableName) { + return false + } + if !p.Field4DeepEqual(ano.TrinoConnectorOptions) { + return false + } + if !p.Field5DeepEqual(ano.TrinoConnectorTableHandle) { + return false + } + if !p.Field6DeepEqual(ano.TrinoConnectorColumnHandles) { + return false + } + if !p.Field7DeepEqual(ano.TrinoConnectorColumnMetadata) { + return false + } + if !p.Field8DeepEqual(ano.TrinoConnectorColumnNames) { + return false + } + if !p.Field9DeepEqual(ano.TrinoConnectorSplit) { + return false + } + if !p.Field10DeepEqual(ano.TrinoConnectorPredicate) { + return false + } + if !p.Field11DeepEqual(ano.TrinoConnectorTrascationHandle) { return false } return true } -func (p *TTransactionalHiveDesc) Field1DeepEqual(src *string) bool { +func (p *TTrinoConnectorFileDesc) Field1DeepEqual(src *string) bool { - if p.Partition == src { + if p.CatalogName == src { return true - } else if p.Partition == nil || src == nil { + } else if p.CatalogName == nil || src == nil { return false } - if strings.Compare(*p.Partition, *src) != 0 { + if strings.Compare(*p.CatalogName, *src) != 0 { return false } return true } -func (p *TTransactionalHiveDesc) Field2DeepEqual(src []*TTransactionalHiveDeleteDeltaDesc) bool { +func (p *TTrinoConnectorFileDesc) Field2DeepEqual(src *string) bool { - if len(p.DeleteDeltas) != len(src) { + if p.DbName == src { + return true + } else if p.DbName == nil || src == nil { return false } - for i, v := range p.DeleteDeltas { - _src := src[i] - if !v.DeepEqual(_src) { + if strings.Compare(*p.DbName, *src) != 0 { + return false + } + return true +} +func (p *TTrinoConnectorFileDesc) Field3DeepEqual(src *string) bool { + + if p.TableName == src { + return true + } else if p.TableName == nil || src == nil { + return false + } + if strings.Compare(*p.TableName, *src) != 0 { + return false + } + return true +} +func (p *TTrinoConnectorFileDesc) Field4DeepEqual(src map[string]string) bool { + + if len(p.TrinoConnectorOptions) != len(src) { + return false + } + for k, v := range p.TrinoConnectorOptions { + _src := src[k] + if strings.Compare(v, _src) != 0 { return false } } return true } +func (p *TTrinoConnectorFileDesc) Field5DeepEqual(src *string) bool { -type TTableFormatFileDesc struct { - TableFormatType *string `thrift:"table_format_type,1,optional" frugal:"1,optional,string" json:"table_format_type,omitempty"` - IcebergParams *TIcebergFileDesc `thrift:"iceberg_params,2,optional" frugal:"2,optional,TIcebergFileDesc" json:"iceberg_params,omitempty"` - HudiParams *THudiFileDesc `thrift:"hudi_params,3,optional" frugal:"3,optional,THudiFileDesc" json:"hudi_params,omitempty"` - PaimonParams *TPaimonFileDesc `thrift:"paimon_params,4,optional" frugal:"4,optional,TPaimonFileDesc" json:"paimon_params,omitempty"` - TransactionalHiveParams *TTransactionalHiveDesc `thrift:"transactional_hive_params,5,optional" frugal:"5,optional,TTransactionalHiveDesc" json:"transactional_hive_params,omitempty"` + if p.TrinoConnectorTableHandle == src { + return true + } else if p.TrinoConnectorTableHandle == nil || src == nil { + return false + } + if strings.Compare(*p.TrinoConnectorTableHandle, *src) != 0 { + return false + } + return true } +func (p *TTrinoConnectorFileDesc) Field6DeepEqual(src *string) bool { -func NewTTableFormatFileDesc() *TTableFormatFileDesc { - return &TTableFormatFileDesc{} + if p.TrinoConnectorColumnHandles == src { + return true + } else if p.TrinoConnectorColumnHandles == nil || src == nil { + return false + } + if strings.Compare(*p.TrinoConnectorColumnHandles, *src) != 0 { + return false + } + return true } +func (p *TTrinoConnectorFileDesc) Field7DeepEqual(src *string) bool { -func (p *TTableFormatFileDesc) InitDefault() { - *p = TTableFormatFileDesc{} + if p.TrinoConnectorColumnMetadata == src { + return true + } else if p.TrinoConnectorColumnMetadata == nil || src == nil { + return false + } + if strings.Compare(*p.TrinoConnectorColumnMetadata, *src) != 0 { + return false + } + return true } +func (p *TTrinoConnectorFileDesc) Field8DeepEqual(src *string) bool { -var TTableFormatFileDesc_TableFormatType_DEFAULT string + if p.TrinoConnectorColumnNames == src { + return true + } else if p.TrinoConnectorColumnNames == nil || src == nil { + return false + } + if strings.Compare(*p.TrinoConnectorColumnNames, *src) != 0 { + return false + } + return true +} +func (p *TTrinoConnectorFileDesc) Field9DeepEqual(src *string) bool { -func (p *TTableFormatFileDesc) GetTableFormatType() (v string) { - if !p.IsSetTableFormatType() { - return TTableFormatFileDesc_TableFormatType_DEFAULT + if p.TrinoConnectorSplit == src { + return true + } else if p.TrinoConnectorSplit == nil || src == nil { + return false } - return *p.TableFormatType + if strings.Compare(*p.TrinoConnectorSplit, *src) != 0 { + return false + } + return true } +func (p *TTrinoConnectorFileDesc) Field10DeepEqual(src *string) bool { -var TTableFormatFileDesc_IcebergParams_DEFAULT *TIcebergFileDesc + if p.TrinoConnectorPredicate == src { + return true + } else if p.TrinoConnectorPredicate == nil || src == nil { + return false + } + if strings.Compare(*p.TrinoConnectorPredicate, *src) != 0 { + return false + } + return true +} +func (p *TTrinoConnectorFileDesc) Field11DeepEqual(src *string) bool { -func (p *TTableFormatFileDesc) GetIcebergParams() (v *TIcebergFileDesc) { - if !p.IsSetIcebergParams() { - return TTableFormatFileDesc_IcebergParams_DEFAULT + if p.TrinoConnectorTrascationHandle == src { + return true + } else if p.TrinoConnectorTrascationHandle == nil || src == nil { + return false } - return p.IcebergParams + if strings.Compare(*p.TrinoConnectorTrascationHandle, *src) != 0 { + return false + } + return true } -var TTableFormatFileDesc_HudiParams_DEFAULT *THudiFileDesc +type TMaxComputeFileDesc struct { + PartitionSpec *string `thrift:"partition_spec,1,optional" frugal:"1,optional,string" json:"partition_spec,omitempty"` + SessionId *string `thrift:"session_id,2,optional" frugal:"2,optional,string" json:"session_id,omitempty"` + TableBatchReadSession *string `thrift:"table_batch_read_session,3,optional" frugal:"3,optional,string" json:"table_batch_read_session,omitempty"` +} -func (p *TTableFormatFileDesc) GetHudiParams() (v *THudiFileDesc) { - if !p.IsSetHudiParams() { - return TTableFormatFileDesc_HudiParams_DEFAULT - } - return p.HudiParams +func NewTMaxComputeFileDesc() *TMaxComputeFileDesc { + return &TMaxComputeFileDesc{} } -var TTableFormatFileDesc_PaimonParams_DEFAULT *TPaimonFileDesc +func (p *TMaxComputeFileDesc) InitDefault() { +} -func (p *TTableFormatFileDesc) GetPaimonParams() (v *TPaimonFileDesc) { - if !p.IsSetPaimonParams() { - return TTableFormatFileDesc_PaimonParams_DEFAULT +var TMaxComputeFileDesc_PartitionSpec_DEFAULT string + +func (p *TMaxComputeFileDesc) GetPartitionSpec() (v string) { + if !p.IsSetPartitionSpec() { + return TMaxComputeFileDesc_PartitionSpec_DEFAULT } - return p.PaimonParams + return *p.PartitionSpec } -var TTableFormatFileDesc_TransactionalHiveParams_DEFAULT *TTransactionalHiveDesc +var TMaxComputeFileDesc_SessionId_DEFAULT string -func (p *TTableFormatFileDesc) GetTransactionalHiveParams() (v *TTransactionalHiveDesc) { - if !p.IsSetTransactionalHiveParams() { - return TTableFormatFileDesc_TransactionalHiveParams_DEFAULT +func (p *TMaxComputeFileDesc) GetSessionId() (v string) { + if !p.IsSetSessionId() { + return TMaxComputeFileDesc_SessionId_DEFAULT } - return p.TransactionalHiveParams -} -func (p *TTableFormatFileDesc) SetTableFormatType(val *string) { - p.TableFormatType = val -} -func (p *TTableFormatFileDesc) SetIcebergParams(val *TIcebergFileDesc) { - p.IcebergParams = val -} -func (p *TTableFormatFileDesc) SetHudiParams(val *THudiFileDesc) { - p.HudiParams = val + return *p.SessionId } -func (p *TTableFormatFileDesc) SetPaimonParams(val *TPaimonFileDesc) { - p.PaimonParams = val + +var TMaxComputeFileDesc_TableBatchReadSession_DEFAULT string + +func (p *TMaxComputeFileDesc) GetTableBatchReadSession() (v string) { + if !p.IsSetTableBatchReadSession() { + return TMaxComputeFileDesc_TableBatchReadSession_DEFAULT + } + return *p.TableBatchReadSession } -func (p *TTableFormatFileDesc) SetTransactionalHiveParams(val *TTransactionalHiveDesc) { - p.TransactionalHiveParams = val +func (p *TMaxComputeFileDesc) SetPartitionSpec(val *string) { + p.PartitionSpec = val } - -var fieldIDToName_TTableFormatFileDesc = map[int16]string{ - 1: "table_format_type", - 2: "iceberg_params", - 3: "hudi_params", - 4: "paimon_params", - 5: "transactional_hive_params", +func (p *TMaxComputeFileDesc) SetSessionId(val *string) { + p.SessionId = val } - -func (p *TTableFormatFileDesc) IsSetTableFormatType() bool { - return p.TableFormatType != nil +func (p *TMaxComputeFileDesc) SetTableBatchReadSession(val *string) { + p.TableBatchReadSession = val } -func (p *TTableFormatFileDesc) IsSetIcebergParams() bool { - return p.IcebergParams != nil +var fieldIDToName_TMaxComputeFileDesc = map[int16]string{ + 1: "partition_spec", + 2: "session_id", + 3: "table_batch_read_session", } -func (p *TTableFormatFileDesc) IsSetHudiParams() bool { - return p.HudiParams != nil +func (p *TMaxComputeFileDesc) IsSetPartitionSpec() bool { + return p.PartitionSpec != nil } -func (p *TTableFormatFileDesc) IsSetPaimonParams() bool { - return p.PaimonParams != nil +func (p *TMaxComputeFileDesc) IsSetSessionId() bool { + return p.SessionId != nil } -func (p *TTableFormatFileDesc) IsSetTransactionalHiveParams() bool { - return p.TransactionalHiveParams != nil +func (p *TMaxComputeFileDesc) IsSetTableBatchReadSession() bool { + return p.TableBatchReadSession != nil } -func (p *TTableFormatFileDesc) Read(iprot thrift.TProtocol) (err error) { +func (p *TMaxComputeFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -11540,57 +12555,30 @@ func (p *TTableFormatFileDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11605,7 +12593,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableFormatFileDesc[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMaxComputeFileDesc[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -11615,50 +12603,43 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableFormatFileDesc) ReadField1(iprot thrift.TProtocol) error { +func (p *TMaxComputeFileDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableFormatType = &v - } - return nil -} - -func (p *TTableFormatFileDesc) ReadField2(iprot thrift.TProtocol) error { - p.IcebergParams = NewTIcebergFileDesc() - if err := p.IcebergParams.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TTableFormatFileDesc) ReadField3(iprot thrift.TProtocol) error { - p.HudiParams = NewTHudiFileDesc() - if err := p.HudiParams.Read(iprot); err != nil { - return err + _field = &v } + p.PartitionSpec = _field return nil } +func (p *TMaxComputeFileDesc) ReadField2(iprot thrift.TProtocol) error { -func (p *TTableFormatFileDesc) ReadField4(iprot thrift.TProtocol) error { - p.PaimonParams = NewTPaimonFileDesc() - if err := p.PaimonParams.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.SessionId = _field return nil } +func (p *TMaxComputeFileDesc) ReadField3(iprot thrift.TProtocol) error { -func (p *TTableFormatFileDesc) ReadField5(iprot thrift.TProtocol) error { - p.TransactionalHiveParams = NewTTransactionalHiveDesc() - if err := p.TransactionalHiveParams.Read(iprot); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.TableBatchReadSession = _field return nil } -func (p *TTableFormatFileDesc) Write(oprot thrift.TProtocol) (err error) { +func (p *TMaxComputeFileDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTableFormatFileDesc"); err != nil { + if err = oprot.WriteStructBegin("TMaxComputeFileDesc"); err != nil { goto WriteStructBeginError } if p != nil { @@ -11674,15 +12655,6 @@ func (p *TTableFormatFileDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11701,12 +12673,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTableFormatFileDesc) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTableFormatType() { - if err = oprot.WriteFieldBegin("table_format_type", thrift.STRING, 1); err != nil { +func (p *TMaxComputeFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionSpec() { + if err = oprot.WriteFieldBegin("partition_spec", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.TableFormatType); err != nil { + if err := oprot.WriteString(*p.PartitionSpec); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11720,12 +12692,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTableFormatFileDesc) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetIcebergParams() { - if err = oprot.WriteFieldBegin("iceberg_params", thrift.STRUCT, 2); err != nil { +func (p *TMaxComputeFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSessionId() { + if err = oprot.WriteFieldBegin("session_id", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := p.IcebergParams.Write(oprot); err != nil { + if err := oprot.WriteString(*p.SessionId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11739,12 +12711,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TTableFormatFileDesc) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetHudiParams() { - if err = oprot.WriteFieldBegin("hudi_params", thrift.STRUCT, 3); err != nil { +func (p *TMaxComputeFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableBatchReadSession() { + if err = oprot.WriteFieldBegin("table_batch_read_session", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := p.HudiParams.Write(oprot); err != nil { + if err := oprot.WriteString(*p.TableBatchReadSession); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -11758,527 +12730,263 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TTableFormatFileDesc) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetPaimonParams() { - if err = oprot.WriteFieldBegin("paimon_params", thrift.STRUCT, 4); err != nil { - goto WriteFieldBeginError - } - if err := p.PaimonParams.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TTableFormatFileDesc) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTransactionalHiveParams() { - if err = oprot.WriteFieldBegin("transactional_hive_params", thrift.STRUCT, 5); err != nil { - goto WriteFieldBeginError - } - if err := p.TransactionalHiveParams.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TTableFormatFileDesc) String() string { +func (p *TMaxComputeFileDesc) String() string { if p == nil { return "" } - return fmt.Sprintf("TTableFormatFileDesc(%+v)", *p) + return fmt.Sprintf("TMaxComputeFileDesc(%+v)", *p) + } -func (p *TTableFormatFileDesc) DeepEqual(ano *TTableFormatFileDesc) bool { +func (p *TMaxComputeFileDesc) DeepEqual(ano *TMaxComputeFileDesc) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TableFormatType) { - return false - } - if !p.Field2DeepEqual(ano.IcebergParams) { - return false - } - if !p.Field3DeepEqual(ano.HudiParams) { + if !p.Field1DeepEqual(ano.PartitionSpec) { return false } - if !p.Field4DeepEqual(ano.PaimonParams) { + if !p.Field2DeepEqual(ano.SessionId) { return false } - if !p.Field5DeepEqual(ano.TransactionalHiveParams) { + if !p.Field3DeepEqual(ano.TableBatchReadSession) { return false } return true } -func (p *TTableFormatFileDesc) Field1DeepEqual(src *string) bool { +func (p *TMaxComputeFileDesc) Field1DeepEqual(src *string) bool { - if p.TableFormatType == src { + if p.PartitionSpec == src { return true - } else if p.TableFormatType == nil || src == nil { + } else if p.PartitionSpec == nil || src == nil { return false } - if strings.Compare(*p.TableFormatType, *src) != 0 { + if strings.Compare(*p.PartitionSpec, *src) != 0 { return false } return true } -func (p *TTableFormatFileDesc) Field2DeepEqual(src *TIcebergFileDesc) bool { +func (p *TMaxComputeFileDesc) Field2DeepEqual(src *string) bool { - if !p.IcebergParams.DeepEqual(src) { + if p.SessionId == src { + return true + } else if p.SessionId == nil || src == nil { return false } - return true -} -func (p *TTableFormatFileDesc) Field3DeepEqual(src *THudiFileDesc) bool { - - if !p.HudiParams.DeepEqual(src) { + if strings.Compare(*p.SessionId, *src) != 0 { return false } return true } -func (p *TTableFormatFileDesc) Field4DeepEqual(src *TPaimonFileDesc) bool { +func (p *TMaxComputeFileDesc) Field3DeepEqual(src *string) bool { - if !p.PaimonParams.DeepEqual(src) { + if p.TableBatchReadSession == src { + return true + } else if p.TableBatchReadSession == nil || src == nil { return false } - return true -} -func (p *TTableFormatFileDesc) Field5DeepEqual(src *TTransactionalHiveDesc) bool { - - if !p.TransactionalHiveParams.DeepEqual(src) { + if strings.Compare(*p.TableBatchReadSession, *src) != 0 { return false } return true } -type TFileScanRangeParams struct { - FileType *types.TFileType `thrift:"file_type,1,optional" frugal:"1,optional,TFileType" json:"file_type,omitempty"` - FormatType *TFileFormatType `thrift:"format_type,2,optional" frugal:"2,optional,TFileFormatType" json:"format_type,omitempty"` - CompressType *TFileCompressType `thrift:"compress_type,3,optional" frugal:"3,optional,TFileCompressType" json:"compress_type,omitempty"` - SrcTupleId *types.TTupleId `thrift:"src_tuple_id,4,optional" frugal:"4,optional,i32" json:"src_tuple_id,omitempty"` - DestTupleId *types.TTupleId `thrift:"dest_tuple_id,5,optional" frugal:"5,optional,i32" json:"dest_tuple_id,omitempty"` - NumOfColumnsFromFile *int32 `thrift:"num_of_columns_from_file,6,optional" frugal:"6,optional,i32" json:"num_of_columns_from_file,omitempty"` - RequiredSlots []*TFileScanSlotInfo `thrift:"required_slots,7,optional" frugal:"7,optional,list" json:"required_slots,omitempty"` - HdfsParams *THdfsParams `thrift:"hdfs_params,8,optional" frugal:"8,optional,THdfsParams" json:"hdfs_params,omitempty"` - Properties map[string]string `thrift:"properties,9,optional" frugal:"9,optional,map" json:"properties,omitempty"` - ExprOfDestSlot map[types.TSlotId]*exprs.TExpr `thrift:"expr_of_dest_slot,10,optional" frugal:"10,optional,map" json:"expr_of_dest_slot,omitempty"` - DefaultValueOfSrcSlot map[types.TSlotId]*exprs.TExpr `thrift:"default_value_of_src_slot,11,optional" frugal:"11,optional,map" json:"default_value_of_src_slot,omitempty"` - DestSidToSrcSidWithoutTrans map[types.TSlotId]types.TSlotId `thrift:"dest_sid_to_src_sid_without_trans,12,optional" frugal:"12,optional,map" json:"dest_sid_to_src_sid_without_trans,omitempty"` - StrictMode *bool `thrift:"strict_mode,13,optional" frugal:"13,optional,bool" json:"strict_mode,omitempty"` - BrokerAddresses []*types.TNetworkAddress `thrift:"broker_addresses,14,optional" frugal:"14,optional,list" json:"broker_addresses,omitempty"` - FileAttributes *TFileAttributes `thrift:"file_attributes,15,optional" frugal:"15,optional,TFileAttributes" json:"file_attributes,omitempty"` - PreFilterExprs *exprs.TExpr `thrift:"pre_filter_exprs,16,optional" frugal:"16,optional,exprs.TExpr" json:"pre_filter_exprs,omitempty"` - TableFormatParams *TTableFormatFileDesc `thrift:"table_format_params,17,optional" frugal:"17,optional,TTableFormatFileDesc" json:"table_format_params,omitempty"` - ColumnIdxs []int32 `thrift:"column_idxs,18,optional" frugal:"18,optional,list" json:"column_idxs,omitempty"` - SlotNameToSchemaPos map[string]int32 `thrift:"slot_name_to_schema_pos,19,optional" frugal:"19,optional,map" json:"slot_name_to_schema_pos,omitempty"` - PreFilterExprsList []*exprs.TExpr `thrift:"pre_filter_exprs_list,20,optional" frugal:"20,optional,list" json:"pre_filter_exprs_list,omitempty"` - LoadId *types.TUniqueId `thrift:"load_id,21,optional" frugal:"21,optional,types.TUniqueId" json:"load_id,omitempty"` - TextSerdeType *TTextSerdeType `thrift:"text_serde_type,22,optional" frugal:"22,optional,TTextSerdeType" json:"text_serde_type,omitempty"` -} - -func NewTFileScanRangeParams() *TFileScanRangeParams { - return &TFileScanRangeParams{} -} - -func (p *TFileScanRangeParams) InitDefault() { - *p = TFileScanRangeParams{} -} - -var TFileScanRangeParams_FileType_DEFAULT types.TFileType - -func (p *TFileScanRangeParams) GetFileType() (v types.TFileType) { - if !p.IsSetFileType() { - return TFileScanRangeParams_FileType_DEFAULT - } - return *p.FileType -} - -var TFileScanRangeParams_FormatType_DEFAULT TFileFormatType - -func (p *TFileScanRangeParams) GetFormatType() (v TFileFormatType) { - if !p.IsSetFormatType() { - return TFileScanRangeParams_FormatType_DEFAULT - } - return *p.FormatType -} - -var TFileScanRangeParams_CompressType_DEFAULT TFileCompressType - -func (p *TFileScanRangeParams) GetCompressType() (v TFileCompressType) { - if !p.IsSetCompressType() { - return TFileScanRangeParams_CompressType_DEFAULT - } - return *p.CompressType -} - -var TFileScanRangeParams_SrcTupleId_DEFAULT types.TTupleId - -func (p *TFileScanRangeParams) GetSrcTupleId() (v types.TTupleId) { - if !p.IsSetSrcTupleId() { - return TFileScanRangeParams_SrcTupleId_DEFAULT - } - return *p.SrcTupleId -} - -var TFileScanRangeParams_DestTupleId_DEFAULT types.TTupleId - -func (p *TFileScanRangeParams) GetDestTupleId() (v types.TTupleId) { - if !p.IsSetDestTupleId() { - return TFileScanRangeParams_DestTupleId_DEFAULT - } - return *p.DestTupleId -} - -var TFileScanRangeParams_NumOfColumnsFromFile_DEFAULT int32 - -func (p *TFileScanRangeParams) GetNumOfColumnsFromFile() (v int32) { - if !p.IsSetNumOfColumnsFromFile() { - return TFileScanRangeParams_NumOfColumnsFromFile_DEFAULT - } - return *p.NumOfColumnsFromFile -} - -var TFileScanRangeParams_RequiredSlots_DEFAULT []*TFileScanSlotInfo - -func (p *TFileScanRangeParams) GetRequiredSlots() (v []*TFileScanSlotInfo) { - if !p.IsSetRequiredSlots() { - return TFileScanRangeParams_RequiredSlots_DEFAULT - } - return p.RequiredSlots -} - -var TFileScanRangeParams_HdfsParams_DEFAULT *THdfsParams - -func (p *TFileScanRangeParams) GetHdfsParams() (v *THdfsParams) { - if !p.IsSetHdfsParams() { - return TFileScanRangeParams_HdfsParams_DEFAULT - } - return p.HdfsParams -} - -var TFileScanRangeParams_Properties_DEFAULT map[string]string - -func (p *TFileScanRangeParams) GetProperties() (v map[string]string) { - if !p.IsSetProperties() { - return TFileScanRangeParams_Properties_DEFAULT - } - return p.Properties -} - -var TFileScanRangeParams_ExprOfDestSlot_DEFAULT map[types.TSlotId]*exprs.TExpr - -func (p *TFileScanRangeParams) GetExprOfDestSlot() (v map[types.TSlotId]*exprs.TExpr) { - if !p.IsSetExprOfDestSlot() { - return TFileScanRangeParams_ExprOfDestSlot_DEFAULT - } - return p.ExprOfDestSlot +type THudiFileDesc struct { + InstantTime *string `thrift:"instant_time,1,optional" frugal:"1,optional,string" json:"instant_time,omitempty"` + Serde *string `thrift:"serde,2,optional" frugal:"2,optional,string" json:"serde,omitempty"` + InputFormat *string `thrift:"input_format,3,optional" frugal:"3,optional,string" json:"input_format,omitempty"` + BasePath *string `thrift:"base_path,4,optional" frugal:"4,optional,string" json:"base_path,omitempty"` + DataFilePath *string `thrift:"data_file_path,5,optional" frugal:"5,optional,string" json:"data_file_path,omitempty"` + DataFileLength *int64 `thrift:"data_file_length,6,optional" frugal:"6,optional,i64" json:"data_file_length,omitempty"` + DeltaLogs []string `thrift:"delta_logs,7,optional" frugal:"7,optional,list" json:"delta_logs,omitempty"` + ColumnNames []string `thrift:"column_names,8,optional" frugal:"8,optional,list" json:"column_names,omitempty"` + ColumnTypes []string `thrift:"column_types,9,optional" frugal:"9,optional,list" json:"column_types,omitempty"` + NestedFields []string `thrift:"nested_fields,10,optional" frugal:"10,optional,list" json:"nested_fields,omitempty"` } -var TFileScanRangeParams_DefaultValueOfSrcSlot_DEFAULT map[types.TSlotId]*exprs.TExpr - -func (p *TFileScanRangeParams) GetDefaultValueOfSrcSlot() (v map[types.TSlotId]*exprs.TExpr) { - if !p.IsSetDefaultValueOfSrcSlot() { - return TFileScanRangeParams_DefaultValueOfSrcSlot_DEFAULT - } - return p.DefaultValueOfSrcSlot +func NewTHudiFileDesc() *THudiFileDesc { + return &THudiFileDesc{} } -var TFileScanRangeParams_DestSidToSrcSidWithoutTrans_DEFAULT map[types.TSlotId]types.TSlotId - -func (p *TFileScanRangeParams) GetDestSidToSrcSidWithoutTrans() (v map[types.TSlotId]types.TSlotId) { - if !p.IsSetDestSidToSrcSidWithoutTrans() { - return TFileScanRangeParams_DestSidToSrcSidWithoutTrans_DEFAULT - } - return p.DestSidToSrcSidWithoutTrans +func (p *THudiFileDesc) InitDefault() { } -var TFileScanRangeParams_StrictMode_DEFAULT bool +var THudiFileDesc_InstantTime_DEFAULT string -func (p *TFileScanRangeParams) GetStrictMode() (v bool) { - if !p.IsSetStrictMode() { - return TFileScanRangeParams_StrictMode_DEFAULT +func (p *THudiFileDesc) GetInstantTime() (v string) { + if !p.IsSetInstantTime() { + return THudiFileDesc_InstantTime_DEFAULT } - return *p.StrictMode + return *p.InstantTime } -var TFileScanRangeParams_BrokerAddresses_DEFAULT []*types.TNetworkAddress +var THudiFileDesc_Serde_DEFAULT string -func (p *TFileScanRangeParams) GetBrokerAddresses() (v []*types.TNetworkAddress) { - if !p.IsSetBrokerAddresses() { - return TFileScanRangeParams_BrokerAddresses_DEFAULT +func (p *THudiFileDesc) GetSerde() (v string) { + if !p.IsSetSerde() { + return THudiFileDesc_Serde_DEFAULT } - return p.BrokerAddresses + return *p.Serde } -var TFileScanRangeParams_FileAttributes_DEFAULT *TFileAttributes +var THudiFileDesc_InputFormat_DEFAULT string -func (p *TFileScanRangeParams) GetFileAttributes() (v *TFileAttributes) { - if !p.IsSetFileAttributes() { - return TFileScanRangeParams_FileAttributes_DEFAULT +func (p *THudiFileDesc) GetInputFormat() (v string) { + if !p.IsSetInputFormat() { + return THudiFileDesc_InputFormat_DEFAULT } - return p.FileAttributes + return *p.InputFormat } -var TFileScanRangeParams_PreFilterExprs_DEFAULT *exprs.TExpr +var THudiFileDesc_BasePath_DEFAULT string -func (p *TFileScanRangeParams) GetPreFilterExprs() (v *exprs.TExpr) { - if !p.IsSetPreFilterExprs() { - return TFileScanRangeParams_PreFilterExprs_DEFAULT +func (p *THudiFileDesc) GetBasePath() (v string) { + if !p.IsSetBasePath() { + return THudiFileDesc_BasePath_DEFAULT } - return p.PreFilterExprs + return *p.BasePath } -var TFileScanRangeParams_TableFormatParams_DEFAULT *TTableFormatFileDesc +var THudiFileDesc_DataFilePath_DEFAULT string -func (p *TFileScanRangeParams) GetTableFormatParams() (v *TTableFormatFileDesc) { - if !p.IsSetTableFormatParams() { - return TFileScanRangeParams_TableFormatParams_DEFAULT +func (p *THudiFileDesc) GetDataFilePath() (v string) { + if !p.IsSetDataFilePath() { + return THudiFileDesc_DataFilePath_DEFAULT } - return p.TableFormatParams + return *p.DataFilePath } -var TFileScanRangeParams_ColumnIdxs_DEFAULT []int32 +var THudiFileDesc_DataFileLength_DEFAULT int64 -func (p *TFileScanRangeParams) GetColumnIdxs() (v []int32) { - if !p.IsSetColumnIdxs() { - return TFileScanRangeParams_ColumnIdxs_DEFAULT +func (p *THudiFileDesc) GetDataFileLength() (v int64) { + if !p.IsSetDataFileLength() { + return THudiFileDesc_DataFileLength_DEFAULT } - return p.ColumnIdxs + return *p.DataFileLength } -var TFileScanRangeParams_SlotNameToSchemaPos_DEFAULT map[string]int32 +var THudiFileDesc_DeltaLogs_DEFAULT []string -func (p *TFileScanRangeParams) GetSlotNameToSchemaPos() (v map[string]int32) { - if !p.IsSetSlotNameToSchemaPos() { - return TFileScanRangeParams_SlotNameToSchemaPos_DEFAULT +func (p *THudiFileDesc) GetDeltaLogs() (v []string) { + if !p.IsSetDeltaLogs() { + return THudiFileDesc_DeltaLogs_DEFAULT } - return p.SlotNameToSchemaPos + return p.DeltaLogs } -var TFileScanRangeParams_PreFilterExprsList_DEFAULT []*exprs.TExpr +var THudiFileDesc_ColumnNames_DEFAULT []string -func (p *TFileScanRangeParams) GetPreFilterExprsList() (v []*exprs.TExpr) { - if !p.IsSetPreFilterExprsList() { - return TFileScanRangeParams_PreFilterExprsList_DEFAULT +func (p *THudiFileDesc) GetColumnNames() (v []string) { + if !p.IsSetColumnNames() { + return THudiFileDesc_ColumnNames_DEFAULT } - return p.PreFilterExprsList + return p.ColumnNames } -var TFileScanRangeParams_LoadId_DEFAULT *types.TUniqueId +var THudiFileDesc_ColumnTypes_DEFAULT []string -func (p *TFileScanRangeParams) GetLoadId() (v *types.TUniqueId) { - if !p.IsSetLoadId() { - return TFileScanRangeParams_LoadId_DEFAULT +func (p *THudiFileDesc) GetColumnTypes() (v []string) { + if !p.IsSetColumnTypes() { + return THudiFileDesc_ColumnTypes_DEFAULT } - return p.LoadId + return p.ColumnTypes } -var TFileScanRangeParams_TextSerdeType_DEFAULT TTextSerdeType +var THudiFileDesc_NestedFields_DEFAULT []string -func (p *TFileScanRangeParams) GetTextSerdeType() (v TTextSerdeType) { - if !p.IsSetTextSerdeType() { - return TFileScanRangeParams_TextSerdeType_DEFAULT +func (p *THudiFileDesc) GetNestedFields() (v []string) { + if !p.IsSetNestedFields() { + return THudiFileDesc_NestedFields_DEFAULT } - return *p.TextSerdeType -} -func (p *TFileScanRangeParams) SetFileType(val *types.TFileType) { - p.FileType = val -} -func (p *TFileScanRangeParams) SetFormatType(val *TFileFormatType) { - p.FormatType = val -} -func (p *TFileScanRangeParams) SetCompressType(val *TFileCompressType) { - p.CompressType = val -} -func (p *TFileScanRangeParams) SetSrcTupleId(val *types.TTupleId) { - p.SrcTupleId = val -} -func (p *TFileScanRangeParams) SetDestTupleId(val *types.TTupleId) { - p.DestTupleId = val -} -func (p *TFileScanRangeParams) SetNumOfColumnsFromFile(val *int32) { - p.NumOfColumnsFromFile = val -} -func (p *TFileScanRangeParams) SetRequiredSlots(val []*TFileScanSlotInfo) { - p.RequiredSlots = val -} -func (p *TFileScanRangeParams) SetHdfsParams(val *THdfsParams) { - p.HdfsParams = val -} -func (p *TFileScanRangeParams) SetProperties(val map[string]string) { - p.Properties = val -} -func (p *TFileScanRangeParams) SetExprOfDestSlot(val map[types.TSlotId]*exprs.TExpr) { - p.ExprOfDestSlot = val -} -func (p *TFileScanRangeParams) SetDefaultValueOfSrcSlot(val map[types.TSlotId]*exprs.TExpr) { - p.DefaultValueOfSrcSlot = val -} -func (p *TFileScanRangeParams) SetDestSidToSrcSidWithoutTrans(val map[types.TSlotId]types.TSlotId) { - p.DestSidToSrcSidWithoutTrans = val -} -func (p *TFileScanRangeParams) SetStrictMode(val *bool) { - p.StrictMode = val -} -func (p *TFileScanRangeParams) SetBrokerAddresses(val []*types.TNetworkAddress) { - p.BrokerAddresses = val -} -func (p *TFileScanRangeParams) SetFileAttributes(val *TFileAttributes) { - p.FileAttributes = val -} -func (p *TFileScanRangeParams) SetPreFilterExprs(val *exprs.TExpr) { - p.PreFilterExprs = val -} -func (p *TFileScanRangeParams) SetTableFormatParams(val *TTableFormatFileDesc) { - p.TableFormatParams = val -} -func (p *TFileScanRangeParams) SetColumnIdxs(val []int32) { - p.ColumnIdxs = val -} -func (p *TFileScanRangeParams) SetSlotNameToSchemaPos(val map[string]int32) { - p.SlotNameToSchemaPos = val -} -func (p *TFileScanRangeParams) SetPreFilterExprsList(val []*exprs.TExpr) { - p.PreFilterExprsList = val -} -func (p *TFileScanRangeParams) SetLoadId(val *types.TUniqueId) { - p.LoadId = val -} -func (p *TFileScanRangeParams) SetTextSerdeType(val *TTextSerdeType) { - p.TextSerdeType = val -} - -var fieldIDToName_TFileScanRangeParams = map[int16]string{ - 1: "file_type", - 2: "format_type", - 3: "compress_type", - 4: "src_tuple_id", - 5: "dest_tuple_id", - 6: "num_of_columns_from_file", - 7: "required_slots", - 8: "hdfs_params", - 9: "properties", - 10: "expr_of_dest_slot", - 11: "default_value_of_src_slot", - 12: "dest_sid_to_src_sid_without_trans", - 13: "strict_mode", - 14: "broker_addresses", - 15: "file_attributes", - 16: "pre_filter_exprs", - 17: "table_format_params", - 18: "column_idxs", - 19: "slot_name_to_schema_pos", - 20: "pre_filter_exprs_list", - 21: "load_id", - 22: "text_serde_type", -} - -func (p *TFileScanRangeParams) IsSetFileType() bool { - return p.FileType != nil + return p.NestedFields } - -func (p *TFileScanRangeParams) IsSetFormatType() bool { - return p.FormatType != nil +func (p *THudiFileDesc) SetInstantTime(val *string) { + p.InstantTime = val } - -func (p *TFileScanRangeParams) IsSetCompressType() bool { - return p.CompressType != nil +func (p *THudiFileDesc) SetSerde(val *string) { + p.Serde = val } - -func (p *TFileScanRangeParams) IsSetSrcTupleId() bool { - return p.SrcTupleId != nil +func (p *THudiFileDesc) SetInputFormat(val *string) { + p.InputFormat = val } - -func (p *TFileScanRangeParams) IsSetDestTupleId() bool { - return p.DestTupleId != nil +func (p *THudiFileDesc) SetBasePath(val *string) { + p.BasePath = val } - -func (p *TFileScanRangeParams) IsSetNumOfColumnsFromFile() bool { - return p.NumOfColumnsFromFile != nil +func (p *THudiFileDesc) SetDataFilePath(val *string) { + p.DataFilePath = val } - -func (p *TFileScanRangeParams) IsSetRequiredSlots() bool { - return p.RequiredSlots != nil +func (p *THudiFileDesc) SetDataFileLength(val *int64) { + p.DataFileLength = val } - -func (p *TFileScanRangeParams) IsSetHdfsParams() bool { - return p.HdfsParams != nil +func (p *THudiFileDesc) SetDeltaLogs(val []string) { + p.DeltaLogs = val } - -func (p *TFileScanRangeParams) IsSetProperties() bool { - return p.Properties != nil +func (p *THudiFileDesc) SetColumnNames(val []string) { + p.ColumnNames = val } - -func (p *TFileScanRangeParams) IsSetExprOfDestSlot() bool { - return p.ExprOfDestSlot != nil +func (p *THudiFileDesc) SetColumnTypes(val []string) { + p.ColumnTypes = val } - -func (p *TFileScanRangeParams) IsSetDefaultValueOfSrcSlot() bool { - return p.DefaultValueOfSrcSlot != nil +func (p *THudiFileDesc) SetNestedFields(val []string) { + p.NestedFields = val } -func (p *TFileScanRangeParams) IsSetDestSidToSrcSidWithoutTrans() bool { - return p.DestSidToSrcSidWithoutTrans != nil +var fieldIDToName_THudiFileDesc = map[int16]string{ + 1: "instant_time", + 2: "serde", + 3: "input_format", + 4: "base_path", + 5: "data_file_path", + 6: "data_file_length", + 7: "delta_logs", + 8: "column_names", + 9: "column_types", + 10: "nested_fields", } -func (p *TFileScanRangeParams) IsSetStrictMode() bool { - return p.StrictMode != nil +func (p *THudiFileDesc) IsSetInstantTime() bool { + return p.InstantTime != nil } -func (p *TFileScanRangeParams) IsSetBrokerAddresses() bool { - return p.BrokerAddresses != nil +func (p *THudiFileDesc) IsSetSerde() bool { + return p.Serde != nil } -func (p *TFileScanRangeParams) IsSetFileAttributes() bool { - return p.FileAttributes != nil +func (p *THudiFileDesc) IsSetInputFormat() bool { + return p.InputFormat != nil } -func (p *TFileScanRangeParams) IsSetPreFilterExprs() bool { - return p.PreFilterExprs != nil +func (p *THudiFileDesc) IsSetBasePath() bool { + return p.BasePath != nil } -func (p *TFileScanRangeParams) IsSetTableFormatParams() bool { - return p.TableFormatParams != nil +func (p *THudiFileDesc) IsSetDataFilePath() bool { + return p.DataFilePath != nil } -func (p *TFileScanRangeParams) IsSetColumnIdxs() bool { - return p.ColumnIdxs != nil +func (p *THudiFileDesc) IsSetDataFileLength() bool { + return p.DataFileLength != nil } -func (p *TFileScanRangeParams) IsSetSlotNameToSchemaPos() bool { - return p.SlotNameToSchemaPos != nil +func (p *THudiFileDesc) IsSetDeltaLogs() bool { + return p.DeltaLogs != nil } -func (p *TFileScanRangeParams) IsSetPreFilterExprsList() bool { - return p.PreFilterExprsList != nil +func (p *THudiFileDesc) IsSetColumnNames() bool { + return p.ColumnNames != nil } -func (p *TFileScanRangeParams) IsSetLoadId() bool { - return p.LoadId != nil +func (p *THudiFileDesc) IsSetColumnTypes() bool { + return p.ColumnTypes != nil } -func (p *TFileScanRangeParams) IsSetTextSerdeType() bool { - return p.TextSerdeType != nil +func (p *THudiFileDesc) IsSetNestedFields() bool { + return p.NestedFields != nil } -func (p *TFileScanRangeParams) Read(iprot thrift.TProtocol) (err error) { +func (p *THudiFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -12298,231 +13006,90 @@ func (p *TFileScanRangeParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.MAP { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } - case 12: - if fieldTypeId == thrift.MAP { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.BOOL { - if err = p.ReadField13(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.LIST { - if err = p.ReadField14(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField15(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 16: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField16(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 17: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField17(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.LIST { - if err = p.ReadField18(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.MAP { - if err = p.ReadField19(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.LIST { - if err = p.ReadField20(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField21(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.I32 { - if err = p.ReadField22(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12537,7 +13104,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRangeParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THudiFileDesc[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -12547,346 +13114,168 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRangeParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *THudiFileDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - tmp := types.TFileType(v) - p.FileType = &tmp + _field = &v } + p.InstantTime = _field return nil } +func (p *THudiFileDesc) ReadField2(iprot thrift.TProtocol) error { -func (p *TFileScanRangeParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - tmp := TFileFormatType(v) - p.FormatType = &tmp + _field = &v } + p.Serde = _field return nil } +func (p *THudiFileDesc) ReadField3(iprot thrift.TProtocol) error { -func (p *TFileScanRangeParams) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - tmp := TFileCompressType(v) - p.CompressType = &tmp + _field = &v } + p.InputFormat = _field return nil } +func (p *THudiFileDesc) ReadField4(iprot thrift.TProtocol) error { -func (p *TFileScanRangeParams) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.SrcTupleId = &v + _field = &v } + p.BasePath = _field return nil } +func (p *THudiFileDesc) ReadField5(iprot thrift.TProtocol) error { -func (p *TFileScanRangeParams) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.DestTupleId = &v + _field = &v } + p.DataFilePath = _field return nil } +func (p *THudiFileDesc) ReadField6(iprot thrift.TProtocol) error { -func (p *TFileScanRangeParams) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { return err } else { - p.NumOfColumnsFromFile = &v + _field = &v } + p.DataFileLength = _field return nil } - -func (p *TFileScanRangeParams) ReadField7(iprot thrift.TProtocol) error { +func (p *THudiFileDesc) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RequiredSlots = make([]*TFileScanSlotInfo, 0, size) - for i := 0; i < size; i++ { - _elem := NewTFileScanSlotInfo() - if err := _elem.Read(iprot); err != nil { - return err - } - - p.RequiredSlots = append(p.RequiredSlots, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField8(iprot thrift.TProtocol) error { - p.HdfsParams = NewTHdfsParams() - if err := p.HdfsParams.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField9(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.Properties = make(map[string]string, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - var _val string + var _elem string if v, err := iprot.ReadString(); err != nil { return err } else { - _val = v - } - - p.Properties[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField10(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.ExprOfDestSlot = make(map[types.TSlotId]*exprs.TExpr, size) - for i := 0; i < size; i++ { - var _key types.TSlotId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - _val := exprs.NewTExpr() - if err := _val.Read(iprot); err != nil { - return err - } - - p.ExprOfDestSlot[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField11(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return err - } - p.DefaultValueOfSrcSlot = make(map[types.TSlotId]*exprs.TExpr, size) - for i := 0; i < size; i++ { - var _key types.TSlotId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - _val := exprs.NewTExpr() - if err := _val.Read(iprot); err != nil { - return err + _elem = v } - p.DefaultValueOfSrcSlot[_key] = _val + _field = append(_field, _elem) } - if err := iprot.ReadMapEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } + p.DeltaLogs = _field return nil } - -func (p *TFileScanRangeParams) ReadField12(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() +func (p *THudiFileDesc) ReadField8(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DestSidToSrcSidWithoutTrans = make(map[types.TSlotId]types.TSlotId, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { - var _key types.TSlotId - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - _key = v - } - var _val types.TSlotId - if v, err := iprot.ReadI32(); err != nil { + var _elem string + if v, err := iprot.ReadString(); err != nil { return err } else { - _val = v - } - - p.DestSidToSrcSidWithoutTrans[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return err - } else { - p.StrictMode = &v - } - return nil -} - -func (p *TFileScanRangeParams) ReadField14(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() - if err := _elem.Read(iprot); err != nil { - return err + _elem = v } - p.BrokerAddresses = append(p.BrokerAddresses, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnNames = _field return nil } - -func (p *TFileScanRangeParams) ReadField15(iprot thrift.TProtocol) error { - p.FileAttributes = NewTFileAttributes() - if err := p.FileAttributes.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField16(iprot thrift.TProtocol) error { - p.PreFilterExprs = exprs.NewTExpr() - if err := p.PreFilterExprs.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField17(iprot thrift.TProtocol) error { - p.TableFormatParams = NewTTableFormatFileDesc() - if err := p.TableFormatParams.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField18(iprot thrift.TProtocol) error { +func (p *THudiFileDesc) ReadField9(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnIdxs = make([]int32, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { - var _elem int32 - if v, err := iprot.ReadI32(); err != nil { + + var _elem string + if v, err := iprot.ReadString(); err != nil { return err } else { _elem = v } - p.ColumnIdxs = append(p.ColumnIdxs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnTypes = _field return nil } - -func (p *TFileScanRangeParams) ReadField19(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() +func (p *THudiFileDesc) ReadField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SlotNameToSchemaPos = make(map[string]int32, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _key = v - } - var _val int32 - if v, err := iprot.ReadI32(); err != nil { + var _elem string + if v, err := iprot.ReadString(); err != nil { return err } else { - _val = v - } - - p.SlotNameToSchemaPos[_key] = _val - } - if err := iprot.ReadMapEnd(); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField20(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.PreFilterExprsList = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if err := _elem.Read(iprot); err != nil { - return err + _elem = v } - p.PreFilterExprsList = append(p.PreFilterExprsList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.NestedFields = _field return nil } -func (p *TFileScanRangeParams) ReadField21(iprot thrift.TProtocol) error { - p.LoadId = types.NewTUniqueId() - if err := p.LoadId.Read(iprot); err != nil { - return err - } - return nil -} - -func (p *TFileScanRangeParams) ReadField22(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := TTextSerdeType(v) - p.TextSerdeType = &tmp - } - return nil -} - -func (p *TFileScanRangeParams) Write(oprot thrift.TProtocol) (err error) { +func (p *THudiFileDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFileScanRangeParams"); err != nil { + if err = oprot.WriteStructBegin("THudiFileDesc"); err != nil { goto WriteStructBeginError } if p != nil { @@ -12930,55 +13319,6 @@ func (p *TFileScanRangeParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - if err = p.writeField13(oprot); err != nil { - fieldId = 13 - goto WriteFieldError - } - if err = p.writeField14(oprot); err != nil { - fieldId = 14 - goto WriteFieldError - } - if err = p.writeField15(oprot); err != nil { - fieldId = 15 - goto WriteFieldError - } - if err = p.writeField16(oprot); err != nil { - fieldId = 16 - goto WriteFieldError - } - if err = p.writeField17(oprot); err != nil { - fieldId = 17 - goto WriteFieldError - } - if err = p.writeField18(oprot); err != nil { - fieldId = 18 - goto WriteFieldError - } - if err = p.writeField19(oprot); err != nil { - fieldId = 19 - goto WriteFieldError - } - if err = p.writeField20(oprot); err != nil { - fieldId = 20 - goto WriteFieldError - } - if err = p.writeField21(oprot); err != nil { - fieldId = 21 - goto WriteFieldError - } - if err = p.writeField22(oprot); err != nil { - fieldId = 22 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12997,12 +13337,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFileScanRangeParams) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetFileType() { - if err = oprot.WriteFieldBegin("file_type", thrift.I32, 1); err != nil { +func (p *THudiFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetInstantTime() { + if err = oprot.WriteFieldBegin("instant_time", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.FileType)); err != nil { + if err := oprot.WriteString(*p.InstantTime); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13016,12 +13356,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetFormatType() { - if err = oprot.WriteFieldBegin("format_type", thrift.I32, 2); err != nil { +func (p *THudiFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSerde() { + if err = oprot.WriteFieldBegin("serde", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.FormatType)); err != nil { + if err := oprot.WriteString(*p.Serde); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13035,12 +13375,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetCompressType() { - if err = oprot.WriteFieldBegin("compress_type", thrift.I32, 3); err != nil { +func (p *THudiFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetInputFormat() { + if err = oprot.WriteFieldBegin("input_format", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.CompressType)); err != nil { + if err := oprot.WriteString(*p.InputFormat); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13054,12 +13394,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetSrcTupleId() { - if err = oprot.WriteFieldBegin("src_tuple_id", thrift.I32, 4); err != nil { +func (p *THudiFileDesc) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBasePath() { + if err = oprot.WriteFieldBegin("base_path", thrift.STRING, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.SrcTupleId); err != nil { + if err := oprot.WriteString(*p.BasePath); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13073,12 +13413,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetDestTupleId() { - if err = oprot.WriteFieldBegin("dest_tuple_id", thrift.I32, 5); err != nil { +func (p *THudiFileDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetDataFilePath() { + if err = oprot.WriteFieldBegin("data_file_path", thrift.STRING, 5); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.DestTupleId); err != nil { + if err := oprot.WriteString(*p.DataFilePath); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13092,12 +13432,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetNumOfColumnsFromFile() { - if err = oprot.WriteFieldBegin("num_of_columns_from_file", thrift.I32, 6); err != nil { +func (p *THudiFileDesc) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetDataFileLength() { + if err = oprot.WriteFieldBegin("data_file_length", thrift.I64, 6); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(*p.NumOfColumnsFromFile); err != nil { + if err := oprot.WriteI64(*p.DataFileLength); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13111,16 +13451,16 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetRequiredSlots() { - if err = oprot.WriteFieldBegin("required_slots", thrift.LIST, 7); err != nil { +func (p *THudiFileDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetDeltaLogs() { + if err = oprot.WriteFieldBegin("delta_logs", thrift.LIST, 7); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RequiredSlots)); err != nil { + if err := oprot.WriteListBegin(thrift.STRING, len(p.DeltaLogs)); err != nil { return err } - for _, v := range p.RequiredSlots { - if err := v.Write(oprot); err != nil { + for _, v := range p.DeltaLogs { + if err := oprot.WriteString(v); err != nil { return err } } @@ -13138,12 +13478,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetHdfsParams() { - if err = oprot.WriteFieldBegin("hdfs_params", thrift.STRUCT, 8); err != nil { +func (p *THudiFileDesc) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnNames() { + if err = oprot.WriteFieldBegin("column_names", thrift.LIST, 8); err != nil { goto WriteFieldBeginError } - if err := p.HdfsParams.Write(oprot); err != nil { + if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnNames)); err != nil { + return err + } + for _, v := range p.ColumnNames { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13157,25 +13505,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetProperties() { - if err = oprot.WriteFieldBegin("properties", thrift.MAP, 9); err != nil { +func (p *THudiFileDesc) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnTypes() { + if err = oprot.WriteFieldBegin("column_types", thrift.LIST, 9); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnTypes)); err != nil { return err } - for k, v := range p.Properties { - - if err := oprot.WriteString(k); err != nil { - return err - } - + for _, v := range p.ColumnTypes { if err := oprot.WriteString(v); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13189,25 +13532,20 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetExprOfDestSlot() { - if err = oprot.WriteFieldBegin("expr_of_dest_slot", thrift.MAP, 10); err != nil { +func (p *THudiFileDesc) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetNestedFields() { + if err = oprot.WriteFieldBegin("nested_fields", thrift.LIST, 10); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.ExprOfDestSlot)); err != nil { + if err := oprot.WriteListBegin(thrift.STRING, len(p.NestedFields)); err != nil { return err } - for k, v := range p.ExprOfDestSlot { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := v.Write(oprot); err != nil { + for _, v := range p.NestedFields { + if err := oprot.WriteString(v); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -13221,865 +13559,6171 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TFileScanRangeParams) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultValueOfSrcSlot() { - if err = oprot.WriteFieldBegin("default_value_of_src_slot", thrift.MAP, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.DefaultValueOfSrcSlot)); err != nil { - return err - } - for k, v := range p.DefaultValueOfSrcSlot { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *THudiFileDesc) String() string { + if p == nil { + return "" } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) + return fmt.Sprintf("THudiFileDesc(%+v)", *p) + } -func (p *TFileScanRangeParams) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetDestSidToSrcSidWithoutTrans() { - if err = oprot.WriteFieldBegin("dest_sid_to_src_sid_without_trans", thrift.MAP, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.DestSidToSrcSidWithoutTrans)); err != nil { - return err - } - for k, v := range p.DestSidToSrcSidWithoutTrans { - - if err := oprot.WriteI32(k); err != nil { - return err - } - - if err := oprot.WriteI32(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField13(oprot thrift.TProtocol) (err error) { - if p.IsSetStrictMode() { - if err = oprot.WriteFieldBegin("strict_mode", thrift.BOOL, 13); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteBool(*p.StrictMode); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetBrokerAddresses() { - if err = oprot.WriteFieldBegin("broker_addresses", thrift.LIST, 14); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BrokerAddresses)); err != nil { - return err - } - for _, v := range p.BrokerAddresses { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField15(oprot thrift.TProtocol) (err error) { - if p.IsSetFileAttributes() { - if err = oprot.WriteFieldBegin("file_attributes", thrift.STRUCT, 15); err != nil { - goto WriteFieldBeginError - } - if err := p.FileAttributes.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField16(oprot thrift.TProtocol) (err error) { - if p.IsSetPreFilterExprs() { - if err = oprot.WriteFieldBegin("pre_filter_exprs", thrift.STRUCT, 16); err != nil { - goto WriteFieldBeginError - } - if err := p.PreFilterExprs.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField17(oprot thrift.TProtocol) (err error) { - if p.IsSetTableFormatParams() { - if err = oprot.WriteFieldBegin("table_format_params", thrift.STRUCT, 17); err != nil { - goto WriteFieldBeginError - } - if err := p.TableFormatParams.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField18(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnIdxs() { - if err = oprot.WriteFieldBegin("column_idxs", thrift.LIST, 18); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.I32, len(p.ColumnIdxs)); err != nil { - return err - } - for _, v := range p.ColumnIdxs { - if err := oprot.WriteI32(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField19(oprot thrift.TProtocol) (err error) { - if p.IsSetSlotNameToSchemaPos() { - if err = oprot.WriteFieldBegin("slot_name_to_schema_pos", thrift.MAP, 19); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.I32, len(p.SlotNameToSchemaPos)); err != nil { - return err - } - for k, v := range p.SlotNameToSchemaPos { - - if err := oprot.WriteString(k); err != nil { - return err - } - - if err := oprot.WriteI32(v); err != nil { - return err - } - } - if err := oprot.WriteMapEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField20(oprot thrift.TProtocol) (err error) { - if p.IsSetPreFilterExprsList() { - if err = oprot.WriteFieldBegin("pre_filter_exprs_list", thrift.LIST, 20); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PreFilterExprsList)); err != nil { - return err - } - for _, v := range p.PreFilterExprsList { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField21(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadId() { - if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 21); err != nil { - goto WriteFieldBeginError - } - if err := p.LoadId.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) -} - -func (p *TFileScanRangeParams) writeField22(oprot thrift.TProtocol) (err error) { - if p.IsSetTextSerdeType() { - if err = oprot.WriteFieldBegin("text_serde_type", thrift.I32, 22); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.TextSerdeType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) -} - -func (p *TFileScanRangeParams) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TFileScanRangeParams(%+v)", *p) -} - -func (p *TFileScanRangeParams) DeepEqual(ano *TFileScanRangeParams) bool { +func (p *THudiFileDesc) DeepEqual(ano *THudiFileDesc) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.FileType) { - return false - } - if !p.Field2DeepEqual(ano.FormatType) { - return false - } - if !p.Field3DeepEqual(ano.CompressType) { - return false - } - if !p.Field4DeepEqual(ano.SrcTupleId) { - return false - } - if !p.Field5DeepEqual(ano.DestTupleId) { - return false - } - if !p.Field6DeepEqual(ano.NumOfColumnsFromFile) { - return false - } - if !p.Field7DeepEqual(ano.RequiredSlots) { - return false - } - if !p.Field8DeepEqual(ano.HdfsParams) { - return false - } - if !p.Field9DeepEqual(ano.Properties) { - return false - } - if !p.Field10DeepEqual(ano.ExprOfDestSlot) { - return false - } - if !p.Field11DeepEqual(ano.DefaultValueOfSrcSlot) { - return false - } - if !p.Field12DeepEqual(ano.DestSidToSrcSidWithoutTrans) { - return false - } - if !p.Field13DeepEqual(ano.StrictMode) { + if !p.Field1DeepEqual(ano.InstantTime) { return false } - if !p.Field14DeepEqual(ano.BrokerAddresses) { + if !p.Field2DeepEqual(ano.Serde) { return false } - if !p.Field15DeepEqual(ano.FileAttributes) { + if !p.Field3DeepEqual(ano.InputFormat) { return false } - if !p.Field16DeepEqual(ano.PreFilterExprs) { + if !p.Field4DeepEqual(ano.BasePath) { return false } - if !p.Field17DeepEqual(ano.TableFormatParams) { + if !p.Field5DeepEqual(ano.DataFilePath) { return false } - if !p.Field18DeepEqual(ano.ColumnIdxs) { + if !p.Field6DeepEqual(ano.DataFileLength) { return false } - if !p.Field19DeepEqual(ano.SlotNameToSchemaPos) { + if !p.Field7DeepEqual(ano.DeltaLogs) { return false } - if !p.Field20DeepEqual(ano.PreFilterExprsList) { + if !p.Field8DeepEqual(ano.ColumnNames) { return false } - if !p.Field21DeepEqual(ano.LoadId) { + if !p.Field9DeepEqual(ano.ColumnTypes) { return false } - if !p.Field22DeepEqual(ano.TextSerdeType) { + if !p.Field10DeepEqual(ano.NestedFields) { return false } return true } -func (p *TFileScanRangeParams) Field1DeepEqual(src *types.TFileType) bool { +func (p *THudiFileDesc) Field1DeepEqual(src *string) bool { - if p.FileType == src { + if p.InstantTime == src { return true - } else if p.FileType == nil || src == nil { + } else if p.InstantTime == nil || src == nil { return false } - if *p.FileType != *src { + if strings.Compare(*p.InstantTime, *src) != 0 { return false } return true } -func (p *TFileScanRangeParams) Field2DeepEqual(src *TFileFormatType) bool { +func (p *THudiFileDesc) Field2DeepEqual(src *string) bool { - if p.FormatType == src { + if p.Serde == src { return true - } else if p.FormatType == nil || src == nil { + } else if p.Serde == nil || src == nil { return false } - if *p.FormatType != *src { + if strings.Compare(*p.Serde, *src) != 0 { return false } return true } -func (p *TFileScanRangeParams) Field3DeepEqual(src *TFileCompressType) bool { +func (p *THudiFileDesc) Field3DeepEqual(src *string) bool { - if p.CompressType == src { + if p.InputFormat == src { return true - } else if p.CompressType == nil || src == nil { + } else if p.InputFormat == nil || src == nil { return false } - if *p.CompressType != *src { + if strings.Compare(*p.InputFormat, *src) != 0 { return false } return true } -func (p *TFileScanRangeParams) Field4DeepEqual(src *types.TTupleId) bool { +func (p *THudiFileDesc) Field4DeepEqual(src *string) bool { - if p.SrcTupleId == src { + if p.BasePath == src { return true - } else if p.SrcTupleId == nil || src == nil { + } else if p.BasePath == nil || src == nil { return false } - if *p.SrcTupleId != *src { + if strings.Compare(*p.BasePath, *src) != 0 { return false } return true } -func (p *TFileScanRangeParams) Field5DeepEqual(src *types.TTupleId) bool { +func (p *THudiFileDesc) Field5DeepEqual(src *string) bool { - if p.DestTupleId == src { + if p.DataFilePath == src { return true - } else if p.DestTupleId == nil || src == nil { + } else if p.DataFilePath == nil || src == nil { return false } - if *p.DestTupleId != *src { + if strings.Compare(*p.DataFilePath, *src) != 0 { return false } return true } -func (p *TFileScanRangeParams) Field6DeepEqual(src *int32) bool { +func (p *THudiFileDesc) Field6DeepEqual(src *int64) bool { - if p.NumOfColumnsFromFile == src { + if p.DataFileLength == src { return true - } else if p.NumOfColumnsFromFile == nil || src == nil { + } else if p.DataFileLength == nil || src == nil { return false } - if *p.NumOfColumnsFromFile != *src { + if *p.DataFileLength != *src { return false } return true } -func (p *TFileScanRangeParams) Field7DeepEqual(src []*TFileScanSlotInfo) bool { +func (p *THudiFileDesc) Field7DeepEqual(src []string) bool { - if len(p.RequiredSlots) != len(src) { + if len(p.DeltaLogs) != len(src) { return false } - for i, v := range p.RequiredSlots { + for i, v := range p.DeltaLogs { _src := src[i] - if !v.DeepEqual(_src) { + if strings.Compare(v, _src) != 0 { return false } } return true } -func (p *TFileScanRangeParams) Field8DeepEqual(src *THdfsParams) bool { - - if !p.HdfsParams.DeepEqual(src) { - return false - } - return true -} -func (p *TFileScanRangeParams) Field9DeepEqual(src map[string]string) bool { +func (p *THudiFileDesc) Field8DeepEqual(src []string) bool { - if len(p.Properties) != len(src) { + if len(p.ColumnNames) != len(src) { return false } - for k, v := range p.Properties { - _src := src[k] + for i, v := range p.ColumnNames { + _src := src[i] if strings.Compare(v, _src) != 0 { return false } } return true } -func (p *TFileScanRangeParams) Field10DeepEqual(src map[types.TSlotId]*exprs.TExpr) bool { +func (p *THudiFileDesc) Field9DeepEqual(src []string) bool { - if len(p.ExprOfDestSlot) != len(src) { + if len(p.ColumnTypes) != len(src) { return false } - for k, v := range p.ExprOfDestSlot { - _src := src[k] - if !v.DeepEqual(_src) { + for i, v := range p.ColumnTypes { + _src := src[i] + if strings.Compare(v, _src) != 0 { return false } } return true } -func (p *TFileScanRangeParams) Field11DeepEqual(src map[types.TSlotId]*exprs.TExpr) bool { +func (p *THudiFileDesc) Field10DeepEqual(src []string) bool { - if len(p.DefaultValueOfSrcSlot) != len(src) { + if len(p.NestedFields) != len(src) { return false } - for k, v := range p.DefaultValueOfSrcSlot { - _src := src[k] - if !v.DeepEqual(_src) { + for i, v := range p.NestedFields { + _src := src[i] + if strings.Compare(v, _src) != 0 { return false } } return true } -func (p *TFileScanRangeParams) Field12DeepEqual(src map[types.TSlotId]types.TSlotId) bool { - if len(p.DestSidToSrcSidWithoutTrans) != len(src) { - return false - } - for k, v := range p.DestSidToSrcSidWithoutTrans { - _src := src[k] - if v != _src { - return false - } - } - return true +type TLakeSoulFileDesc struct { + FilePaths []string `thrift:"file_paths,1,optional" frugal:"1,optional,list" json:"file_paths,omitempty"` + PrimaryKeys []string `thrift:"primary_keys,2,optional" frugal:"2,optional,list" json:"primary_keys,omitempty"` + PartitionDescs []string `thrift:"partition_descs,3,optional" frugal:"3,optional,list" json:"partition_descs,omitempty"` + TableSchema *string `thrift:"table_schema,4,optional" frugal:"4,optional,string" json:"table_schema,omitempty"` + Options *string `thrift:"options,5,optional" frugal:"5,optional,string" json:"options,omitempty"` } -func (p *TFileScanRangeParams) Field13DeepEqual(src *bool) bool { - if p.StrictMode == src { - return true - } else if p.StrictMode == nil || src == nil { - return false - } - if *p.StrictMode != *src { - return false - } - return true +func NewTLakeSoulFileDesc() *TLakeSoulFileDesc { + return &TLakeSoulFileDesc{} } -func (p *TFileScanRangeParams) Field14DeepEqual(src []*types.TNetworkAddress) bool { - if len(p.BrokerAddresses) != len(src) { - return false - } - for i, v := range p.BrokerAddresses { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +func (p *TLakeSoulFileDesc) InitDefault() { } -func (p *TFileScanRangeParams) Field15DeepEqual(src *TFileAttributes) bool { - if !p.FileAttributes.DeepEqual(src) { - return false +var TLakeSoulFileDesc_FilePaths_DEFAULT []string + +func (p *TLakeSoulFileDesc) GetFilePaths() (v []string) { + if !p.IsSetFilePaths() { + return TLakeSoulFileDesc_FilePaths_DEFAULT } - return true + return p.FilePaths } -func (p *TFileScanRangeParams) Field16DeepEqual(src *exprs.TExpr) bool { - if !p.PreFilterExprs.DeepEqual(src) { - return false +var TLakeSoulFileDesc_PrimaryKeys_DEFAULT []string + +func (p *TLakeSoulFileDesc) GetPrimaryKeys() (v []string) { + if !p.IsSetPrimaryKeys() { + return TLakeSoulFileDesc_PrimaryKeys_DEFAULT } - return true + return p.PrimaryKeys } -func (p *TFileScanRangeParams) Field17DeepEqual(src *TTableFormatFileDesc) bool { - if !p.TableFormatParams.DeepEqual(src) { - return false +var TLakeSoulFileDesc_PartitionDescs_DEFAULT []string + +func (p *TLakeSoulFileDesc) GetPartitionDescs() (v []string) { + if !p.IsSetPartitionDescs() { + return TLakeSoulFileDesc_PartitionDescs_DEFAULT } - return true + return p.PartitionDescs } -func (p *TFileScanRangeParams) Field18DeepEqual(src []int32) bool { - if len(p.ColumnIdxs) != len(src) { - return false - } - for i, v := range p.ColumnIdxs { - _src := src[i] - if v != _src { - return false - } +var TLakeSoulFileDesc_TableSchema_DEFAULT string + +func (p *TLakeSoulFileDesc) GetTableSchema() (v string) { + if !p.IsSetTableSchema() { + return TLakeSoulFileDesc_TableSchema_DEFAULT } - return true + return *p.TableSchema } -func (p *TFileScanRangeParams) Field19DeepEqual(src map[string]int32) bool { - if len(p.SlotNameToSchemaPos) != len(src) { - return false - } - for k, v := range p.SlotNameToSchemaPos { - _src := src[k] - if v != _src { - return false - } +var TLakeSoulFileDesc_Options_DEFAULT string + +func (p *TLakeSoulFileDesc) GetOptions() (v string) { + if !p.IsSetOptions() { + return TLakeSoulFileDesc_Options_DEFAULT } - return true + return *p.Options +} +func (p *TLakeSoulFileDesc) SetFilePaths(val []string) { + p.FilePaths = val +} +func (p *TLakeSoulFileDesc) SetPrimaryKeys(val []string) { + p.PrimaryKeys = val +} +func (p *TLakeSoulFileDesc) SetPartitionDescs(val []string) { + p.PartitionDescs = val +} +func (p *TLakeSoulFileDesc) SetTableSchema(val *string) { + p.TableSchema = val +} +func (p *TLakeSoulFileDesc) SetOptions(val *string) { + p.Options = val } -func (p *TFileScanRangeParams) Field20DeepEqual(src []*exprs.TExpr) bool { - if len(p.PreFilterExprsList) != len(src) { - return false - } - for i, v := range p.PreFilterExprsList { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } - } - return true +var fieldIDToName_TLakeSoulFileDesc = map[int16]string{ + 1: "file_paths", + 2: "primary_keys", + 3: "partition_descs", + 4: "table_schema", + 5: "options", } -func (p *TFileScanRangeParams) Field21DeepEqual(src *types.TUniqueId) bool { - if !p.LoadId.DeepEqual(src) { - return false - } - return true +func (p *TLakeSoulFileDesc) IsSetFilePaths() bool { + return p.FilePaths != nil } -func (p *TFileScanRangeParams) Field22DeepEqual(src *TTextSerdeType) bool { - if p.TextSerdeType == src { - return true - } else if p.TextSerdeType == nil || src == nil { - return false - } - if *p.TextSerdeType != *src { - return false +func (p *TLakeSoulFileDesc) IsSetPrimaryKeys() bool { + return p.PrimaryKeys != nil +} + +func (p *TLakeSoulFileDesc) IsSetPartitionDescs() bool { + return p.PartitionDescs != nil +} + +func (p *TLakeSoulFileDesc) IsSetTableSchema() bool { + return p.TableSchema != nil +} + +func (p *TLakeSoulFileDesc) IsSetOptions() bool { + return p.Options != nil +} + +func (p *TLakeSoulFileDesc) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLakeSoulFileDesc[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FilePaths = _field + return nil +} +func (p *TLakeSoulFileDesc) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PrimaryKeys = _field + return nil +} +func (p *TLakeSoulFileDesc) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PartitionDescs = _field + return nil +} +func (p *TLakeSoulFileDesc) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableSchema = _field + return nil +} +func (p *TLakeSoulFileDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Options = _field + return nil +} + +func (p *TLakeSoulFileDesc) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TLakeSoulFileDesc"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFilePaths() { + if err = oprot.WriteFieldBegin("file_paths", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.FilePaths)); err != nil { + return err + } + for _, v := range p.FilePaths { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPrimaryKeys() { + if err = oprot.WriteFieldBegin("primary_keys", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.PrimaryKeys)); err != nil { + return err + } + for _, v := range p.PrimaryKeys { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionDescs() { + if err = oprot.WriteFieldBegin("partition_descs", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.PartitionDescs)); err != nil { + return err + } + for _, v := range p.PartitionDescs { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTableSchema() { + if err = oprot.WriteFieldBegin("table_schema", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableSchema); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetOptions() { + if err = oprot.WriteFieldBegin("options", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Options); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TLakeSoulFileDesc(%+v)", *p) + +} + +func (p *TLakeSoulFileDesc) DeepEqual(ano *TLakeSoulFileDesc) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FilePaths) { + return false + } + if !p.Field2DeepEqual(ano.PrimaryKeys) { + return false + } + if !p.Field3DeepEqual(ano.PartitionDescs) { + return false + } + if !p.Field4DeepEqual(ano.TableSchema) { + return false + } + if !p.Field5DeepEqual(ano.Options) { + return false + } + return true +} + +func (p *TLakeSoulFileDesc) Field1DeepEqual(src []string) bool { + + if len(p.FilePaths) != len(src) { + return false + } + for i, v := range p.FilePaths { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TLakeSoulFileDesc) Field2DeepEqual(src []string) bool { + + if len(p.PrimaryKeys) != len(src) { + return false + } + for i, v := range p.PrimaryKeys { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TLakeSoulFileDesc) Field3DeepEqual(src []string) bool { + + if len(p.PartitionDescs) != len(src) { + return false + } + for i, v := range p.PartitionDescs { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TLakeSoulFileDesc) Field4DeepEqual(src *string) bool { + + if p.TableSchema == src { + return true + } else if p.TableSchema == nil || src == nil { + return false + } + if strings.Compare(*p.TableSchema, *src) != 0 { + return false + } + return true +} +func (p *TLakeSoulFileDesc) Field5DeepEqual(src *string) bool { + + if p.Options == src { + return true + } else if p.Options == nil || src == nil { + return false + } + if strings.Compare(*p.Options, *src) != 0 { + return false + } + return true +} + +type TTransactionalHiveDeleteDeltaDesc struct { + DirectoryLocation *string `thrift:"directory_location,1,optional" frugal:"1,optional,string" json:"directory_location,omitempty"` + FileNames []string `thrift:"file_names,2,optional" frugal:"2,optional,list" json:"file_names,omitempty"` +} + +func NewTTransactionalHiveDeleteDeltaDesc() *TTransactionalHiveDeleteDeltaDesc { + return &TTransactionalHiveDeleteDeltaDesc{} +} + +func (p *TTransactionalHiveDeleteDeltaDesc) InitDefault() { +} + +var TTransactionalHiveDeleteDeltaDesc_DirectoryLocation_DEFAULT string + +func (p *TTransactionalHiveDeleteDeltaDesc) GetDirectoryLocation() (v string) { + if !p.IsSetDirectoryLocation() { + return TTransactionalHiveDeleteDeltaDesc_DirectoryLocation_DEFAULT + } + return *p.DirectoryLocation +} + +var TTransactionalHiveDeleteDeltaDesc_FileNames_DEFAULT []string + +func (p *TTransactionalHiveDeleteDeltaDesc) GetFileNames() (v []string) { + if !p.IsSetFileNames() { + return TTransactionalHiveDeleteDeltaDesc_FileNames_DEFAULT + } + return p.FileNames +} +func (p *TTransactionalHiveDeleteDeltaDesc) SetDirectoryLocation(val *string) { + p.DirectoryLocation = val +} +func (p *TTransactionalHiveDeleteDeltaDesc) SetFileNames(val []string) { + p.FileNames = val +} + +var fieldIDToName_TTransactionalHiveDeleteDeltaDesc = map[int16]string{ + 1: "directory_location", + 2: "file_names", +} + +func (p *TTransactionalHiveDeleteDeltaDesc) IsSetDirectoryLocation() bool { + return p.DirectoryLocation != nil +} + +func (p *TTransactionalHiveDeleteDeltaDesc) IsSetFileNames() bool { + return p.FileNames != nil +} + +func (p *TTransactionalHiveDeleteDeltaDesc) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDeleteDeltaDesc[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTransactionalHiveDeleteDeltaDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DirectoryLocation = _field + return nil +} +func (p *TTransactionalHiveDeleteDeltaDesc) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FileNames = _field + return nil +} + +func (p *TTransactionalHiveDeleteDeltaDesc) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTransactionalHiveDeleteDeltaDesc"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTransactionalHiveDeleteDeltaDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDirectoryLocation() { + if err = oprot.WriteFieldBegin("directory_location", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DirectoryLocation); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTransactionalHiveDeleteDeltaDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFileNames() { + if err = oprot.WriteFieldBegin("file_names", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.FileNames)); err != nil { + return err + } + for _, v := range p.FileNames { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTransactionalHiveDeleteDeltaDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTransactionalHiveDeleteDeltaDesc(%+v)", *p) + +} + +func (p *TTransactionalHiveDeleteDeltaDesc) DeepEqual(ano *TTransactionalHiveDeleteDeltaDesc) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DirectoryLocation) { + return false + } + if !p.Field2DeepEqual(ano.FileNames) { + return false + } + return true +} + +func (p *TTransactionalHiveDeleteDeltaDesc) Field1DeepEqual(src *string) bool { + + if p.DirectoryLocation == src { + return true + } else if p.DirectoryLocation == nil || src == nil { + return false + } + if strings.Compare(*p.DirectoryLocation, *src) != 0 { + return false + } + return true +} +func (p *TTransactionalHiveDeleteDeltaDesc) Field2DeepEqual(src []string) bool { + + if len(p.FileNames) != len(src) { + return false + } + for i, v := range p.FileNames { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} + +type TTransactionalHiveDesc struct { + Partition *string `thrift:"partition,1,optional" frugal:"1,optional,string" json:"partition,omitempty"` + DeleteDeltas []*TTransactionalHiveDeleteDeltaDesc `thrift:"delete_deltas,2,optional" frugal:"2,optional,list" json:"delete_deltas,omitempty"` +} + +func NewTTransactionalHiveDesc() *TTransactionalHiveDesc { + return &TTransactionalHiveDesc{} +} + +func (p *TTransactionalHiveDesc) InitDefault() { +} + +var TTransactionalHiveDesc_Partition_DEFAULT string + +func (p *TTransactionalHiveDesc) GetPartition() (v string) { + if !p.IsSetPartition() { + return TTransactionalHiveDesc_Partition_DEFAULT + } + return *p.Partition +} + +var TTransactionalHiveDesc_DeleteDeltas_DEFAULT []*TTransactionalHiveDeleteDeltaDesc + +func (p *TTransactionalHiveDesc) GetDeleteDeltas() (v []*TTransactionalHiveDeleteDeltaDesc) { + if !p.IsSetDeleteDeltas() { + return TTransactionalHiveDesc_DeleteDeltas_DEFAULT + } + return p.DeleteDeltas +} +func (p *TTransactionalHiveDesc) SetPartition(val *string) { + p.Partition = val +} +func (p *TTransactionalHiveDesc) SetDeleteDeltas(val []*TTransactionalHiveDeleteDeltaDesc) { + p.DeleteDeltas = val +} + +var fieldIDToName_TTransactionalHiveDesc = map[int16]string{ + 1: "partition", + 2: "delete_deltas", +} + +func (p *TTransactionalHiveDesc) IsSetPartition() bool { + return p.Partition != nil +} + +func (p *TTransactionalHiveDesc) IsSetDeleteDeltas() bool { + return p.DeleteDeltas != nil +} + +func (p *TTransactionalHiveDesc) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDesc[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTransactionalHiveDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Partition = _field + return nil +} +func (p *TTransactionalHiveDesc) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TTransactionalHiveDeleteDeltaDesc, 0, size) + values := make([]TTransactionalHiveDeleteDeltaDesc, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DeleteDeltas = _field + return nil +} + +func (p *TTransactionalHiveDesc) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTransactionalHiveDesc"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTransactionalHiveDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPartition() { + if err = oprot.WriteFieldBegin("partition", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Partition); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTransactionalHiveDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDeleteDeltas() { + if err = oprot.WriteFieldBegin("delete_deltas", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DeleteDeltas)); err != nil { + return err + } + for _, v := range p.DeleteDeltas { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTransactionalHiveDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTransactionalHiveDesc(%+v)", *p) + +} + +func (p *TTransactionalHiveDesc) DeepEqual(ano *TTransactionalHiveDesc) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Partition) { + return false + } + if !p.Field2DeepEqual(ano.DeleteDeltas) { + return false + } + return true +} + +func (p *TTransactionalHiveDesc) Field1DeepEqual(src *string) bool { + + if p.Partition == src { + return true + } else if p.Partition == nil || src == nil { + return false + } + if strings.Compare(*p.Partition, *src) != 0 { + return false + } + return true +} +func (p *TTransactionalHiveDesc) Field2DeepEqual(src []*TTransactionalHiveDeleteDeltaDesc) bool { + + if len(p.DeleteDeltas) != len(src) { + return false + } + for i, v := range p.DeleteDeltas { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TTableFormatFileDesc struct { + TableFormatType *string `thrift:"table_format_type,1,optional" frugal:"1,optional,string" json:"table_format_type,omitempty"` + IcebergParams *TIcebergFileDesc `thrift:"iceberg_params,2,optional" frugal:"2,optional,TIcebergFileDesc" json:"iceberg_params,omitempty"` + HudiParams *THudiFileDesc `thrift:"hudi_params,3,optional" frugal:"3,optional,THudiFileDesc" json:"hudi_params,omitempty"` + PaimonParams *TPaimonFileDesc `thrift:"paimon_params,4,optional" frugal:"4,optional,TPaimonFileDesc" json:"paimon_params,omitempty"` + TransactionalHiveParams *TTransactionalHiveDesc `thrift:"transactional_hive_params,5,optional" frugal:"5,optional,TTransactionalHiveDesc" json:"transactional_hive_params,omitempty"` + MaxComputeParams *TMaxComputeFileDesc `thrift:"max_compute_params,6,optional" frugal:"6,optional,TMaxComputeFileDesc" json:"max_compute_params,omitempty"` + TrinoConnectorParams *TTrinoConnectorFileDesc `thrift:"trino_connector_params,7,optional" frugal:"7,optional,TTrinoConnectorFileDesc" json:"trino_connector_params,omitempty"` + LakesoulParams *TLakeSoulFileDesc `thrift:"lakesoul_params,8,optional" frugal:"8,optional,TLakeSoulFileDesc" json:"lakesoul_params,omitempty"` +} + +func NewTTableFormatFileDesc() *TTableFormatFileDesc { + return &TTableFormatFileDesc{} +} + +func (p *TTableFormatFileDesc) InitDefault() { +} + +var TTableFormatFileDesc_TableFormatType_DEFAULT string + +func (p *TTableFormatFileDesc) GetTableFormatType() (v string) { + if !p.IsSetTableFormatType() { + return TTableFormatFileDesc_TableFormatType_DEFAULT + } + return *p.TableFormatType +} + +var TTableFormatFileDesc_IcebergParams_DEFAULT *TIcebergFileDesc + +func (p *TTableFormatFileDesc) GetIcebergParams() (v *TIcebergFileDesc) { + if !p.IsSetIcebergParams() { + return TTableFormatFileDesc_IcebergParams_DEFAULT + } + return p.IcebergParams +} + +var TTableFormatFileDesc_HudiParams_DEFAULT *THudiFileDesc + +func (p *TTableFormatFileDesc) GetHudiParams() (v *THudiFileDesc) { + if !p.IsSetHudiParams() { + return TTableFormatFileDesc_HudiParams_DEFAULT + } + return p.HudiParams +} + +var TTableFormatFileDesc_PaimonParams_DEFAULT *TPaimonFileDesc + +func (p *TTableFormatFileDesc) GetPaimonParams() (v *TPaimonFileDesc) { + if !p.IsSetPaimonParams() { + return TTableFormatFileDesc_PaimonParams_DEFAULT + } + return p.PaimonParams +} + +var TTableFormatFileDesc_TransactionalHiveParams_DEFAULT *TTransactionalHiveDesc + +func (p *TTableFormatFileDesc) GetTransactionalHiveParams() (v *TTransactionalHiveDesc) { + if !p.IsSetTransactionalHiveParams() { + return TTableFormatFileDesc_TransactionalHiveParams_DEFAULT + } + return p.TransactionalHiveParams +} + +var TTableFormatFileDesc_MaxComputeParams_DEFAULT *TMaxComputeFileDesc + +func (p *TTableFormatFileDesc) GetMaxComputeParams() (v *TMaxComputeFileDesc) { + if !p.IsSetMaxComputeParams() { + return TTableFormatFileDesc_MaxComputeParams_DEFAULT + } + return p.MaxComputeParams +} + +var TTableFormatFileDesc_TrinoConnectorParams_DEFAULT *TTrinoConnectorFileDesc + +func (p *TTableFormatFileDesc) GetTrinoConnectorParams() (v *TTrinoConnectorFileDesc) { + if !p.IsSetTrinoConnectorParams() { + return TTableFormatFileDesc_TrinoConnectorParams_DEFAULT + } + return p.TrinoConnectorParams +} + +var TTableFormatFileDesc_LakesoulParams_DEFAULT *TLakeSoulFileDesc + +func (p *TTableFormatFileDesc) GetLakesoulParams() (v *TLakeSoulFileDesc) { + if !p.IsSetLakesoulParams() { + return TTableFormatFileDesc_LakesoulParams_DEFAULT + } + return p.LakesoulParams +} +func (p *TTableFormatFileDesc) SetTableFormatType(val *string) { + p.TableFormatType = val +} +func (p *TTableFormatFileDesc) SetIcebergParams(val *TIcebergFileDesc) { + p.IcebergParams = val +} +func (p *TTableFormatFileDesc) SetHudiParams(val *THudiFileDesc) { + p.HudiParams = val +} +func (p *TTableFormatFileDesc) SetPaimonParams(val *TPaimonFileDesc) { + p.PaimonParams = val +} +func (p *TTableFormatFileDesc) SetTransactionalHiveParams(val *TTransactionalHiveDesc) { + p.TransactionalHiveParams = val +} +func (p *TTableFormatFileDesc) SetMaxComputeParams(val *TMaxComputeFileDesc) { + p.MaxComputeParams = val +} +func (p *TTableFormatFileDesc) SetTrinoConnectorParams(val *TTrinoConnectorFileDesc) { + p.TrinoConnectorParams = val +} +func (p *TTableFormatFileDesc) SetLakesoulParams(val *TLakeSoulFileDesc) { + p.LakesoulParams = val +} + +var fieldIDToName_TTableFormatFileDesc = map[int16]string{ + 1: "table_format_type", + 2: "iceberg_params", + 3: "hudi_params", + 4: "paimon_params", + 5: "transactional_hive_params", + 6: "max_compute_params", + 7: "trino_connector_params", + 8: "lakesoul_params", +} + +func (p *TTableFormatFileDesc) IsSetTableFormatType() bool { + return p.TableFormatType != nil +} + +func (p *TTableFormatFileDesc) IsSetIcebergParams() bool { + return p.IcebergParams != nil +} + +func (p *TTableFormatFileDesc) IsSetHudiParams() bool { + return p.HudiParams != nil +} + +func (p *TTableFormatFileDesc) IsSetPaimonParams() bool { + return p.PaimonParams != nil +} + +func (p *TTableFormatFileDesc) IsSetTransactionalHiveParams() bool { + return p.TransactionalHiveParams != nil +} + +func (p *TTableFormatFileDesc) IsSetMaxComputeParams() bool { + return p.MaxComputeParams != nil +} + +func (p *TTableFormatFileDesc) IsSetTrinoConnectorParams() bool { + return p.TrinoConnectorParams != nil +} + +func (p *TTableFormatFileDesc) IsSetLakesoulParams() bool { + return p.LakesoulParams != nil +} + +func (p *TTableFormatFileDesc) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableFormatFileDesc[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableFormatFileDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableFormatType = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField2(iprot thrift.TProtocol) error { + _field := NewTIcebergFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.IcebergParams = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField3(iprot thrift.TProtocol) error { + _field := NewTHudiFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.HudiParams = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField4(iprot thrift.TProtocol) error { + _field := NewTPaimonFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.PaimonParams = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField5(iprot thrift.TProtocol) error { + _field := NewTTransactionalHiveDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.TransactionalHiveParams = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField6(iprot thrift.TProtocol) error { + _field := NewTMaxComputeFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.MaxComputeParams = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField7(iprot thrift.TProtocol) error { + _field := NewTTrinoConnectorFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.TrinoConnectorParams = _field + return nil +} +func (p *TTableFormatFileDesc) ReadField8(iprot thrift.TProtocol) error { + _field := NewTLakeSoulFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.LakesoulParams = _field + return nil +} + +func (p *TTableFormatFileDesc) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTableFormatFileDesc"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTableFormatType() { + if err = oprot.WriteFieldBegin("table_format_type", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableFormatType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetIcebergParams() { + if err = oprot.WriteFieldBegin("iceberg_params", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.IcebergParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHudiParams() { + if err = oprot.WriteFieldBegin("hudi_params", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.HudiParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPaimonParams() { + if err = oprot.WriteFieldBegin("paimon_params", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.PaimonParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTransactionalHiveParams() { + if err = oprot.WriteFieldBegin("transactional_hive_params", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.TransactionalHiveParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxComputeParams() { + if err = oprot.WriteFieldBegin("max_compute_params", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.MaxComputeParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetTrinoConnectorParams() { + if err = oprot.WriteFieldBegin("trino_connector_params", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.TrinoConnectorParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetLakesoulParams() { + if err = oprot.WriteFieldBegin("lakesoul_params", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.LakesoulParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TTableFormatFileDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTableFormatFileDesc(%+v)", *p) + +} + +func (p *TTableFormatFileDesc) DeepEqual(ano *TTableFormatFileDesc) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TableFormatType) { + return false + } + if !p.Field2DeepEqual(ano.IcebergParams) { + return false + } + if !p.Field3DeepEqual(ano.HudiParams) { + return false + } + if !p.Field4DeepEqual(ano.PaimonParams) { + return false + } + if !p.Field5DeepEqual(ano.TransactionalHiveParams) { + return false + } + if !p.Field6DeepEqual(ano.MaxComputeParams) { + return false + } + if !p.Field7DeepEqual(ano.TrinoConnectorParams) { + return false + } + if !p.Field8DeepEqual(ano.LakesoulParams) { + return false + } + return true +} + +func (p *TTableFormatFileDesc) Field1DeepEqual(src *string) bool { + + if p.TableFormatType == src { + return true + } else if p.TableFormatType == nil || src == nil { + return false + } + if strings.Compare(*p.TableFormatType, *src) != 0 { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field2DeepEqual(src *TIcebergFileDesc) bool { + + if !p.IcebergParams.DeepEqual(src) { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field3DeepEqual(src *THudiFileDesc) bool { + + if !p.HudiParams.DeepEqual(src) { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field4DeepEqual(src *TPaimonFileDesc) bool { + + if !p.PaimonParams.DeepEqual(src) { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field5DeepEqual(src *TTransactionalHiveDesc) bool { + + if !p.TransactionalHiveParams.DeepEqual(src) { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field6DeepEqual(src *TMaxComputeFileDesc) bool { + + if !p.MaxComputeParams.DeepEqual(src) { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field7DeepEqual(src *TTrinoConnectorFileDesc) bool { + + if !p.TrinoConnectorParams.DeepEqual(src) { + return false + } + return true +} +func (p *TTableFormatFileDesc) Field8DeepEqual(src *TLakeSoulFileDesc) bool { + + if !p.LakesoulParams.DeepEqual(src) { + return false + } + return true +} + +type TFileScanRangeParams struct { + FileType *types.TFileType `thrift:"file_type,1,optional" frugal:"1,optional,TFileType" json:"file_type,omitempty"` + FormatType *TFileFormatType `thrift:"format_type,2,optional" frugal:"2,optional,TFileFormatType" json:"format_type,omitempty"` + CompressType *TFileCompressType `thrift:"compress_type,3,optional" frugal:"3,optional,TFileCompressType" json:"compress_type,omitempty"` + SrcTupleId *types.TTupleId `thrift:"src_tuple_id,4,optional" frugal:"4,optional,i32" json:"src_tuple_id,omitempty"` + DestTupleId *types.TTupleId `thrift:"dest_tuple_id,5,optional" frugal:"5,optional,i32" json:"dest_tuple_id,omitempty"` + NumOfColumnsFromFile *int32 `thrift:"num_of_columns_from_file,6,optional" frugal:"6,optional,i32" json:"num_of_columns_from_file,omitempty"` + RequiredSlots []*TFileScanSlotInfo `thrift:"required_slots,7,optional" frugal:"7,optional,list" json:"required_slots,omitempty"` + HdfsParams *THdfsParams `thrift:"hdfs_params,8,optional" frugal:"8,optional,THdfsParams" json:"hdfs_params,omitempty"` + Properties map[string]string `thrift:"properties,9,optional" frugal:"9,optional,map" json:"properties,omitempty"` + ExprOfDestSlot map[types.TSlotId]*exprs.TExpr `thrift:"expr_of_dest_slot,10,optional" frugal:"10,optional,map" json:"expr_of_dest_slot,omitempty"` + DefaultValueOfSrcSlot map[types.TSlotId]*exprs.TExpr `thrift:"default_value_of_src_slot,11,optional" frugal:"11,optional,map" json:"default_value_of_src_slot,omitempty"` + DestSidToSrcSidWithoutTrans map[types.TSlotId]types.TSlotId `thrift:"dest_sid_to_src_sid_without_trans,12,optional" frugal:"12,optional,map" json:"dest_sid_to_src_sid_without_trans,omitempty"` + StrictMode *bool `thrift:"strict_mode,13,optional" frugal:"13,optional,bool" json:"strict_mode,omitempty"` + BrokerAddresses []*types.TNetworkAddress `thrift:"broker_addresses,14,optional" frugal:"14,optional,list" json:"broker_addresses,omitempty"` + FileAttributes *TFileAttributes `thrift:"file_attributes,15,optional" frugal:"15,optional,TFileAttributes" json:"file_attributes,omitempty"` + PreFilterExprs *exprs.TExpr `thrift:"pre_filter_exprs,16,optional" frugal:"16,optional,exprs.TExpr" json:"pre_filter_exprs,omitempty"` + TableFormatParams *TTableFormatFileDesc `thrift:"table_format_params,17,optional" frugal:"17,optional,TTableFormatFileDesc" json:"table_format_params,omitempty"` + ColumnIdxs []int32 `thrift:"column_idxs,18,optional" frugal:"18,optional,list" json:"column_idxs,omitempty"` + SlotNameToSchemaPos map[string]int32 `thrift:"slot_name_to_schema_pos,19,optional" frugal:"19,optional,map" json:"slot_name_to_schema_pos,omitempty"` + PreFilterExprsList []*exprs.TExpr `thrift:"pre_filter_exprs_list,20,optional" frugal:"20,optional,list" json:"pre_filter_exprs_list,omitempty"` + LoadId *types.TUniqueId `thrift:"load_id,21,optional" frugal:"21,optional,types.TUniqueId" json:"load_id,omitempty"` + TextSerdeType *TTextSerdeType `thrift:"text_serde_type,22,optional" frugal:"22,optional,TTextSerdeType" json:"text_serde_type,omitempty"` + SequenceMapCol *string `thrift:"sequence_map_col,23,optional" frugal:"23,optional,string" json:"sequence_map_col,omitempty"` + SerializedTable *string `thrift:"serialized_table,24,optional" frugal:"24,optional,string" json:"serialized_table,omitempty"` +} + +func NewTFileScanRangeParams() *TFileScanRangeParams { + return &TFileScanRangeParams{} +} + +func (p *TFileScanRangeParams) InitDefault() { +} + +var TFileScanRangeParams_FileType_DEFAULT types.TFileType + +func (p *TFileScanRangeParams) GetFileType() (v types.TFileType) { + if !p.IsSetFileType() { + return TFileScanRangeParams_FileType_DEFAULT + } + return *p.FileType +} + +var TFileScanRangeParams_FormatType_DEFAULT TFileFormatType + +func (p *TFileScanRangeParams) GetFormatType() (v TFileFormatType) { + if !p.IsSetFormatType() { + return TFileScanRangeParams_FormatType_DEFAULT + } + return *p.FormatType +} + +var TFileScanRangeParams_CompressType_DEFAULT TFileCompressType + +func (p *TFileScanRangeParams) GetCompressType() (v TFileCompressType) { + if !p.IsSetCompressType() { + return TFileScanRangeParams_CompressType_DEFAULT + } + return *p.CompressType +} + +var TFileScanRangeParams_SrcTupleId_DEFAULT types.TTupleId + +func (p *TFileScanRangeParams) GetSrcTupleId() (v types.TTupleId) { + if !p.IsSetSrcTupleId() { + return TFileScanRangeParams_SrcTupleId_DEFAULT + } + return *p.SrcTupleId +} + +var TFileScanRangeParams_DestTupleId_DEFAULT types.TTupleId + +func (p *TFileScanRangeParams) GetDestTupleId() (v types.TTupleId) { + if !p.IsSetDestTupleId() { + return TFileScanRangeParams_DestTupleId_DEFAULT + } + return *p.DestTupleId +} + +var TFileScanRangeParams_NumOfColumnsFromFile_DEFAULT int32 + +func (p *TFileScanRangeParams) GetNumOfColumnsFromFile() (v int32) { + if !p.IsSetNumOfColumnsFromFile() { + return TFileScanRangeParams_NumOfColumnsFromFile_DEFAULT + } + return *p.NumOfColumnsFromFile +} + +var TFileScanRangeParams_RequiredSlots_DEFAULT []*TFileScanSlotInfo + +func (p *TFileScanRangeParams) GetRequiredSlots() (v []*TFileScanSlotInfo) { + if !p.IsSetRequiredSlots() { + return TFileScanRangeParams_RequiredSlots_DEFAULT + } + return p.RequiredSlots +} + +var TFileScanRangeParams_HdfsParams_DEFAULT *THdfsParams + +func (p *TFileScanRangeParams) GetHdfsParams() (v *THdfsParams) { + if !p.IsSetHdfsParams() { + return TFileScanRangeParams_HdfsParams_DEFAULT + } + return p.HdfsParams +} + +var TFileScanRangeParams_Properties_DEFAULT map[string]string + +func (p *TFileScanRangeParams) GetProperties() (v map[string]string) { + if !p.IsSetProperties() { + return TFileScanRangeParams_Properties_DEFAULT + } + return p.Properties +} + +var TFileScanRangeParams_ExprOfDestSlot_DEFAULT map[types.TSlotId]*exprs.TExpr + +func (p *TFileScanRangeParams) GetExprOfDestSlot() (v map[types.TSlotId]*exprs.TExpr) { + if !p.IsSetExprOfDestSlot() { + return TFileScanRangeParams_ExprOfDestSlot_DEFAULT + } + return p.ExprOfDestSlot +} + +var TFileScanRangeParams_DefaultValueOfSrcSlot_DEFAULT map[types.TSlotId]*exprs.TExpr + +func (p *TFileScanRangeParams) GetDefaultValueOfSrcSlot() (v map[types.TSlotId]*exprs.TExpr) { + if !p.IsSetDefaultValueOfSrcSlot() { + return TFileScanRangeParams_DefaultValueOfSrcSlot_DEFAULT + } + return p.DefaultValueOfSrcSlot +} + +var TFileScanRangeParams_DestSidToSrcSidWithoutTrans_DEFAULT map[types.TSlotId]types.TSlotId + +func (p *TFileScanRangeParams) GetDestSidToSrcSidWithoutTrans() (v map[types.TSlotId]types.TSlotId) { + if !p.IsSetDestSidToSrcSidWithoutTrans() { + return TFileScanRangeParams_DestSidToSrcSidWithoutTrans_DEFAULT + } + return p.DestSidToSrcSidWithoutTrans +} + +var TFileScanRangeParams_StrictMode_DEFAULT bool + +func (p *TFileScanRangeParams) GetStrictMode() (v bool) { + if !p.IsSetStrictMode() { + return TFileScanRangeParams_StrictMode_DEFAULT + } + return *p.StrictMode +} + +var TFileScanRangeParams_BrokerAddresses_DEFAULT []*types.TNetworkAddress + +func (p *TFileScanRangeParams) GetBrokerAddresses() (v []*types.TNetworkAddress) { + if !p.IsSetBrokerAddresses() { + return TFileScanRangeParams_BrokerAddresses_DEFAULT + } + return p.BrokerAddresses +} + +var TFileScanRangeParams_FileAttributes_DEFAULT *TFileAttributes + +func (p *TFileScanRangeParams) GetFileAttributes() (v *TFileAttributes) { + if !p.IsSetFileAttributes() { + return TFileScanRangeParams_FileAttributes_DEFAULT + } + return p.FileAttributes +} + +var TFileScanRangeParams_PreFilterExprs_DEFAULT *exprs.TExpr + +func (p *TFileScanRangeParams) GetPreFilterExprs() (v *exprs.TExpr) { + if !p.IsSetPreFilterExprs() { + return TFileScanRangeParams_PreFilterExprs_DEFAULT + } + return p.PreFilterExprs +} + +var TFileScanRangeParams_TableFormatParams_DEFAULT *TTableFormatFileDesc + +func (p *TFileScanRangeParams) GetTableFormatParams() (v *TTableFormatFileDesc) { + if !p.IsSetTableFormatParams() { + return TFileScanRangeParams_TableFormatParams_DEFAULT + } + return p.TableFormatParams +} + +var TFileScanRangeParams_ColumnIdxs_DEFAULT []int32 + +func (p *TFileScanRangeParams) GetColumnIdxs() (v []int32) { + if !p.IsSetColumnIdxs() { + return TFileScanRangeParams_ColumnIdxs_DEFAULT + } + return p.ColumnIdxs +} + +var TFileScanRangeParams_SlotNameToSchemaPos_DEFAULT map[string]int32 + +func (p *TFileScanRangeParams) GetSlotNameToSchemaPos() (v map[string]int32) { + if !p.IsSetSlotNameToSchemaPos() { + return TFileScanRangeParams_SlotNameToSchemaPos_DEFAULT + } + return p.SlotNameToSchemaPos +} + +var TFileScanRangeParams_PreFilterExprsList_DEFAULT []*exprs.TExpr + +func (p *TFileScanRangeParams) GetPreFilterExprsList() (v []*exprs.TExpr) { + if !p.IsSetPreFilterExprsList() { + return TFileScanRangeParams_PreFilterExprsList_DEFAULT + } + return p.PreFilterExprsList +} + +var TFileScanRangeParams_LoadId_DEFAULT *types.TUniqueId + +func (p *TFileScanRangeParams) GetLoadId() (v *types.TUniqueId) { + if !p.IsSetLoadId() { + return TFileScanRangeParams_LoadId_DEFAULT + } + return p.LoadId +} + +var TFileScanRangeParams_TextSerdeType_DEFAULT TTextSerdeType + +func (p *TFileScanRangeParams) GetTextSerdeType() (v TTextSerdeType) { + if !p.IsSetTextSerdeType() { + return TFileScanRangeParams_TextSerdeType_DEFAULT + } + return *p.TextSerdeType +} + +var TFileScanRangeParams_SequenceMapCol_DEFAULT string + +func (p *TFileScanRangeParams) GetSequenceMapCol() (v string) { + if !p.IsSetSequenceMapCol() { + return TFileScanRangeParams_SequenceMapCol_DEFAULT + } + return *p.SequenceMapCol +} + +var TFileScanRangeParams_SerializedTable_DEFAULT string + +func (p *TFileScanRangeParams) GetSerializedTable() (v string) { + if !p.IsSetSerializedTable() { + return TFileScanRangeParams_SerializedTable_DEFAULT + } + return *p.SerializedTable +} +func (p *TFileScanRangeParams) SetFileType(val *types.TFileType) { + p.FileType = val +} +func (p *TFileScanRangeParams) SetFormatType(val *TFileFormatType) { + p.FormatType = val +} +func (p *TFileScanRangeParams) SetCompressType(val *TFileCompressType) { + p.CompressType = val +} +func (p *TFileScanRangeParams) SetSrcTupleId(val *types.TTupleId) { + p.SrcTupleId = val +} +func (p *TFileScanRangeParams) SetDestTupleId(val *types.TTupleId) { + p.DestTupleId = val +} +func (p *TFileScanRangeParams) SetNumOfColumnsFromFile(val *int32) { + p.NumOfColumnsFromFile = val +} +func (p *TFileScanRangeParams) SetRequiredSlots(val []*TFileScanSlotInfo) { + p.RequiredSlots = val +} +func (p *TFileScanRangeParams) SetHdfsParams(val *THdfsParams) { + p.HdfsParams = val +} +func (p *TFileScanRangeParams) SetProperties(val map[string]string) { + p.Properties = val +} +func (p *TFileScanRangeParams) SetExprOfDestSlot(val map[types.TSlotId]*exprs.TExpr) { + p.ExprOfDestSlot = val +} +func (p *TFileScanRangeParams) SetDefaultValueOfSrcSlot(val map[types.TSlotId]*exprs.TExpr) { + p.DefaultValueOfSrcSlot = val +} +func (p *TFileScanRangeParams) SetDestSidToSrcSidWithoutTrans(val map[types.TSlotId]types.TSlotId) { + p.DestSidToSrcSidWithoutTrans = val +} +func (p *TFileScanRangeParams) SetStrictMode(val *bool) { + p.StrictMode = val +} +func (p *TFileScanRangeParams) SetBrokerAddresses(val []*types.TNetworkAddress) { + p.BrokerAddresses = val +} +func (p *TFileScanRangeParams) SetFileAttributes(val *TFileAttributes) { + p.FileAttributes = val +} +func (p *TFileScanRangeParams) SetPreFilterExprs(val *exprs.TExpr) { + p.PreFilterExprs = val +} +func (p *TFileScanRangeParams) SetTableFormatParams(val *TTableFormatFileDesc) { + p.TableFormatParams = val +} +func (p *TFileScanRangeParams) SetColumnIdxs(val []int32) { + p.ColumnIdxs = val +} +func (p *TFileScanRangeParams) SetSlotNameToSchemaPos(val map[string]int32) { + p.SlotNameToSchemaPos = val +} +func (p *TFileScanRangeParams) SetPreFilterExprsList(val []*exprs.TExpr) { + p.PreFilterExprsList = val +} +func (p *TFileScanRangeParams) SetLoadId(val *types.TUniqueId) { + p.LoadId = val +} +func (p *TFileScanRangeParams) SetTextSerdeType(val *TTextSerdeType) { + p.TextSerdeType = val +} +func (p *TFileScanRangeParams) SetSequenceMapCol(val *string) { + p.SequenceMapCol = val +} +func (p *TFileScanRangeParams) SetSerializedTable(val *string) { + p.SerializedTable = val +} + +var fieldIDToName_TFileScanRangeParams = map[int16]string{ + 1: "file_type", + 2: "format_type", + 3: "compress_type", + 4: "src_tuple_id", + 5: "dest_tuple_id", + 6: "num_of_columns_from_file", + 7: "required_slots", + 8: "hdfs_params", + 9: "properties", + 10: "expr_of_dest_slot", + 11: "default_value_of_src_slot", + 12: "dest_sid_to_src_sid_without_trans", + 13: "strict_mode", + 14: "broker_addresses", + 15: "file_attributes", + 16: "pre_filter_exprs", + 17: "table_format_params", + 18: "column_idxs", + 19: "slot_name_to_schema_pos", + 20: "pre_filter_exprs_list", + 21: "load_id", + 22: "text_serde_type", + 23: "sequence_map_col", + 24: "serialized_table", +} + +func (p *TFileScanRangeParams) IsSetFileType() bool { + return p.FileType != nil +} + +func (p *TFileScanRangeParams) IsSetFormatType() bool { + return p.FormatType != nil +} + +func (p *TFileScanRangeParams) IsSetCompressType() bool { + return p.CompressType != nil +} + +func (p *TFileScanRangeParams) IsSetSrcTupleId() bool { + return p.SrcTupleId != nil +} + +func (p *TFileScanRangeParams) IsSetDestTupleId() bool { + return p.DestTupleId != nil +} + +func (p *TFileScanRangeParams) IsSetNumOfColumnsFromFile() bool { + return p.NumOfColumnsFromFile != nil +} + +func (p *TFileScanRangeParams) IsSetRequiredSlots() bool { + return p.RequiredSlots != nil +} + +func (p *TFileScanRangeParams) IsSetHdfsParams() bool { + return p.HdfsParams != nil +} + +func (p *TFileScanRangeParams) IsSetProperties() bool { + return p.Properties != nil +} + +func (p *TFileScanRangeParams) IsSetExprOfDestSlot() bool { + return p.ExprOfDestSlot != nil +} + +func (p *TFileScanRangeParams) IsSetDefaultValueOfSrcSlot() bool { + return p.DefaultValueOfSrcSlot != nil +} + +func (p *TFileScanRangeParams) IsSetDestSidToSrcSidWithoutTrans() bool { + return p.DestSidToSrcSidWithoutTrans != nil +} + +func (p *TFileScanRangeParams) IsSetStrictMode() bool { + return p.StrictMode != nil +} + +func (p *TFileScanRangeParams) IsSetBrokerAddresses() bool { + return p.BrokerAddresses != nil +} + +func (p *TFileScanRangeParams) IsSetFileAttributes() bool { + return p.FileAttributes != nil +} + +func (p *TFileScanRangeParams) IsSetPreFilterExprs() bool { + return p.PreFilterExprs != nil +} + +func (p *TFileScanRangeParams) IsSetTableFormatParams() bool { + return p.TableFormatParams != nil +} + +func (p *TFileScanRangeParams) IsSetColumnIdxs() bool { + return p.ColumnIdxs != nil +} + +func (p *TFileScanRangeParams) IsSetSlotNameToSchemaPos() bool { + return p.SlotNameToSchemaPos != nil +} + +func (p *TFileScanRangeParams) IsSetPreFilterExprsList() bool { + return p.PreFilterExprsList != nil +} + +func (p *TFileScanRangeParams) IsSetLoadId() bool { + return p.LoadId != nil +} + +func (p *TFileScanRangeParams) IsSetTextSerdeType() bool { + return p.TextSerdeType != nil +} + +func (p *TFileScanRangeParams) IsSetSequenceMapCol() bool { + return p.SequenceMapCol != nil +} + +func (p *TFileScanRangeParams) IsSetSerializedTable() bool { + return p.SerializedTable != nil +} + +func (p *TFileScanRangeParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I32 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.MAP { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.MAP { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.MAP { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.MAP { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.LIST { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 17: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField17(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.LIST { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.MAP { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.LIST { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 21: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField21(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 22: + if fieldTypeId == thrift.I32 { + if err = p.ReadField22(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 23: + if fieldTypeId == thrift.STRING { + if err = p.ReadField23(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 24: + if fieldTypeId == thrift.STRING { + if err = p.ReadField24(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRangeParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFileScanRangeParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TFileType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TFileType(v) + _field = &tmp + } + p.FileType = _field + return nil +} +func (p *TFileScanRangeParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *TFileFormatType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TFileFormatType(v) + _field = &tmp + } + p.FormatType = _field + return nil +} +func (p *TFileScanRangeParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *TFileCompressType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TFileCompressType(v) + _field = &tmp + } + p.CompressType = _field + return nil +} +func (p *TFileScanRangeParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TTupleId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.SrcTupleId = _field + return nil +} +func (p *TFileScanRangeParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *types.TTupleId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.DestTupleId = _field + return nil +} +func (p *TFileScanRangeParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.NumOfColumnsFromFile = _field + return nil +} +func (p *TFileScanRangeParams) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TFileScanSlotInfo, 0, size) + values := make([]TFileScanSlotInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.RequiredSlots = _field + return nil +} +func (p *TFileScanRangeParams) ReadField8(iprot thrift.TProtocol) error { + _field := NewTHdfsParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.HdfsParams = _field + return nil +} +func (p *TFileScanRangeParams) ReadField9(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.Properties = _field + return nil +} +func (p *TFileScanRangeParams) ReadField10(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TSlotId]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key types.TSlotId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.ExprOfDestSlot = _field + return nil +} +func (p *TFileScanRangeParams) ReadField11(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TSlotId]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key types.TSlotId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.DefaultValueOfSrcSlot = _field + return nil +} +func (p *TFileScanRangeParams) ReadField12(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TSlotId]types.TSlotId, size) + for i := 0; i < size; i++ { + var _key types.TSlotId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + var _val types.TSlotId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.DestSidToSrcSidWithoutTrans = _field + return nil +} +func (p *TFileScanRangeParams) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.StrictMode = _field + return nil +} +func (p *TFileScanRangeParams) ReadField14(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.BrokerAddresses = _field + return nil +} +func (p *TFileScanRangeParams) ReadField15(iprot thrift.TProtocol) error { + _field := NewTFileAttributes() + if err := _field.Read(iprot); err != nil { + return err + } + p.FileAttributes = _field + return nil +} +func (p *TFileScanRangeParams) ReadField16(iprot thrift.TProtocol) error { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { + return err + } + p.PreFilterExprs = _field + return nil +} +func (p *TFileScanRangeParams) ReadField17(iprot thrift.TProtocol) error { + _field := NewTTableFormatFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.TableFormatParams = _field + return nil +} +func (p *TFileScanRangeParams) ReadField18(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ColumnIdxs = _field + return nil +} +func (p *TFileScanRangeParams) ReadField19(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[string]int32, size) + for i := 0; i < size; i++ { + var _key string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _key = v + } + + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.SlotNameToSchemaPos = _field + return nil +} +func (p *TFileScanRangeParams) ReadField20(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.PreFilterExprsList = _field + return nil +} +func (p *TFileScanRangeParams) ReadField21(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.LoadId = _field + return nil +} +func (p *TFileScanRangeParams) ReadField22(iprot thrift.TProtocol) error { + + var _field *TTextSerdeType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TTextSerdeType(v) + _field = &tmp + } + p.TextSerdeType = _field + return nil +} +func (p *TFileScanRangeParams) ReadField23(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SequenceMapCol = _field + return nil +} +func (p *TFileScanRangeParams) ReadField24(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SerializedTable = _field + return nil +} + +func (p *TFileScanRangeParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFileScanRangeParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } + if err = p.writeField17(oprot); err != nil { + fieldId = 17 + goto WriteFieldError + } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } + if err = p.writeField21(oprot); err != nil { + fieldId = 21 + goto WriteFieldError + } + if err = p.writeField22(oprot); err != nil { + fieldId = 22 + goto WriteFieldError + } + if err = p.writeField23(oprot); err != nil { + fieldId = 23 + goto WriteFieldError + } + if err = p.writeField24(oprot); err != nil { + fieldId = 24 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFileType() { + if err = oprot.WriteFieldBegin("file_type", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFormatType() { + if err = oprot.WriteFieldBegin("format_type", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FormatType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressType() { + if err = oprot.WriteFieldBegin("compress_type", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.CompressType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetSrcTupleId() { + if err = oprot.WriteFieldBegin("src_tuple_id", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.SrcTupleId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetDestTupleId() { + if err = oprot.WriteFieldBegin("dest_tuple_id", thrift.I32, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.DestTupleId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetNumOfColumnsFromFile() { + if err = oprot.WriteFieldBegin("num_of_columns_from_file", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NumOfColumnsFromFile); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetRequiredSlots() { + if err = oprot.WriteFieldBegin("required_slots", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RequiredSlots)); err != nil { + return err + } + for _, v := range p.RequiredSlots { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHdfsParams() { + if err = oprot.WriteFieldBegin("hdfs_params", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.HdfsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetProperties() { + if err = oprot.WriteFieldBegin("properties", thrift.MAP, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + return err + } + for k, v := range p.Properties { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetExprOfDestSlot() { + if err = oprot.WriteFieldBegin("expr_of_dest_slot", thrift.MAP, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.ExprOfDestSlot)); err != nil { + return err + } + for k, v := range p.ExprOfDestSlot { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultValueOfSrcSlot() { + if err = oprot.WriteFieldBegin("default_value_of_src_slot", thrift.MAP, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.DefaultValueOfSrcSlot)); err != nil { + return err + } + for k, v := range p.DefaultValueOfSrcSlot { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDestSidToSrcSidWithoutTrans() { + if err = oprot.WriteFieldBegin("dest_sid_to_src_sid_without_trans", thrift.MAP, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.DestSidToSrcSidWithoutTrans)); err != nil { + return err + } + for k, v := range p.DestSidToSrcSidWithoutTrans { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetStrictMode() { + if err = oprot.WriteFieldBegin("strict_mode", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.StrictMode); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetBrokerAddresses() { + if err = oprot.WriteFieldBegin("broker_addresses", thrift.LIST, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BrokerAddresses)); err != nil { + return err + } + for _, v := range p.BrokerAddresses { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetFileAttributes() { + if err = oprot.WriteFieldBegin("file_attributes", thrift.STRUCT, 15); err != nil { + goto WriteFieldBeginError + } + if err := p.FileAttributes.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetPreFilterExprs() { + if err = oprot.WriteFieldBegin("pre_filter_exprs", thrift.STRUCT, 16); err != nil { + goto WriteFieldBeginError + } + if err := p.PreFilterExprs.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField17(oprot thrift.TProtocol) (err error) { + if p.IsSetTableFormatParams() { + if err = oprot.WriteFieldBegin("table_format_params", thrift.STRUCT, 17); err != nil { + goto WriteFieldBeginError + } + if err := p.TableFormatParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnIdxs() { + if err = oprot.WriteFieldBegin("column_idxs", thrift.LIST, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.ColumnIdxs)); err != nil { + return err + } + for _, v := range p.ColumnIdxs { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetSlotNameToSchemaPos() { + if err = oprot.WriteFieldBegin("slot_name_to_schema_pos", thrift.MAP, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.I32, len(p.SlotNameToSchemaPos)); err != nil { + return err + } + for k, v := range p.SlotNameToSchemaPos { + if err := oprot.WriteString(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetPreFilterExprsList() { + if err = oprot.WriteFieldBegin("pre_filter_exprs_list", thrift.LIST, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PreFilterExprsList)); err != nil { + return err + } + for _, v := range p.PreFilterExprsList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField21(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadId() { + if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 21); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField22(oprot thrift.TProtocol) (err error) { + if p.IsSetTextSerdeType() { + if err = oprot.WriteFieldBegin("text_serde_type", thrift.I32, 22); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.TextSerdeType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField23(oprot thrift.TProtocol) (err error) { + if p.IsSetSequenceMapCol() { + if err = oprot.WriteFieldBegin("sequence_map_col", thrift.STRING, 23); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SequenceMapCol); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err) +} + +func (p *TFileScanRangeParams) writeField24(oprot thrift.TProtocol) (err error) { + if p.IsSetSerializedTable() { + if err = oprot.WriteFieldBegin("serialized_table", thrift.STRING, 24); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SerializedTable); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err) +} + +func (p *TFileScanRangeParams) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFileScanRangeParams(%+v)", *p) + +} + +func (p *TFileScanRangeParams) DeepEqual(ano *TFileScanRangeParams) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FileType) { + return false + } + if !p.Field2DeepEqual(ano.FormatType) { + return false + } + if !p.Field3DeepEqual(ano.CompressType) { + return false + } + if !p.Field4DeepEqual(ano.SrcTupleId) { + return false + } + if !p.Field5DeepEqual(ano.DestTupleId) { + return false + } + if !p.Field6DeepEqual(ano.NumOfColumnsFromFile) { + return false + } + if !p.Field7DeepEqual(ano.RequiredSlots) { + return false + } + if !p.Field8DeepEqual(ano.HdfsParams) { + return false + } + if !p.Field9DeepEqual(ano.Properties) { + return false + } + if !p.Field10DeepEqual(ano.ExprOfDestSlot) { + return false + } + if !p.Field11DeepEqual(ano.DefaultValueOfSrcSlot) { + return false + } + if !p.Field12DeepEqual(ano.DestSidToSrcSidWithoutTrans) { + return false + } + if !p.Field13DeepEqual(ano.StrictMode) { + return false + } + if !p.Field14DeepEqual(ano.BrokerAddresses) { + return false + } + if !p.Field15DeepEqual(ano.FileAttributes) { + return false + } + if !p.Field16DeepEqual(ano.PreFilterExprs) { + return false + } + if !p.Field17DeepEqual(ano.TableFormatParams) { + return false + } + if !p.Field18DeepEqual(ano.ColumnIdxs) { + return false + } + if !p.Field19DeepEqual(ano.SlotNameToSchemaPos) { + return false + } + if !p.Field20DeepEqual(ano.PreFilterExprsList) { + return false + } + if !p.Field21DeepEqual(ano.LoadId) { + return false + } + if !p.Field22DeepEqual(ano.TextSerdeType) { + return false + } + if !p.Field23DeepEqual(ano.SequenceMapCol) { + return false + } + if !p.Field24DeepEqual(ano.SerializedTable) { + return false + } + return true +} + +func (p *TFileScanRangeParams) Field1DeepEqual(src *types.TFileType) bool { + + if p.FileType == src { + return true + } else if p.FileType == nil || src == nil { + return false + } + if *p.FileType != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field2DeepEqual(src *TFileFormatType) bool { + + if p.FormatType == src { + return true + } else if p.FormatType == nil || src == nil { + return false + } + if *p.FormatType != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field3DeepEqual(src *TFileCompressType) bool { + + if p.CompressType == src { + return true + } else if p.CompressType == nil || src == nil { + return false + } + if *p.CompressType != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field4DeepEqual(src *types.TTupleId) bool { + + if p.SrcTupleId == src { + return true + } else if p.SrcTupleId == nil || src == nil { + return false + } + if *p.SrcTupleId != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field5DeepEqual(src *types.TTupleId) bool { + + if p.DestTupleId == src { + return true + } else if p.DestTupleId == nil || src == nil { + return false + } + if *p.DestTupleId != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field6DeepEqual(src *int32) bool { + + if p.NumOfColumnsFromFile == src { + return true + } else if p.NumOfColumnsFromFile == nil || src == nil { + return false + } + if *p.NumOfColumnsFromFile != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field7DeepEqual(src []*TFileScanSlotInfo) bool { + + if len(p.RequiredSlots) != len(src) { + return false + } + for i, v := range p.RequiredSlots { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field8DeepEqual(src *THdfsParams) bool { + + if !p.HdfsParams.DeepEqual(src) { + return false + } + return true +} +func (p *TFileScanRangeParams) Field9DeepEqual(src map[string]string) bool { + + if len(p.Properties) != len(src) { + return false + } + for k, v := range p.Properties { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field10DeepEqual(src map[types.TSlotId]*exprs.TExpr) bool { + + if len(p.ExprOfDestSlot) != len(src) { + return false + } + for k, v := range p.ExprOfDestSlot { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field11DeepEqual(src map[types.TSlotId]*exprs.TExpr) bool { + + if len(p.DefaultValueOfSrcSlot) != len(src) { + return false + } + for k, v := range p.DefaultValueOfSrcSlot { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field12DeepEqual(src map[types.TSlotId]types.TSlotId) bool { + + if len(p.DestSidToSrcSidWithoutTrans) != len(src) { + return false + } + for k, v := range p.DestSidToSrcSidWithoutTrans { + _src := src[k] + if v != _src { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field13DeepEqual(src *bool) bool { + + if p.StrictMode == src { + return true + } else if p.StrictMode == nil || src == nil { + return false + } + if *p.StrictMode != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field14DeepEqual(src []*types.TNetworkAddress) bool { + + if len(p.BrokerAddresses) != len(src) { + return false + } + for i, v := range p.BrokerAddresses { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field15DeepEqual(src *TFileAttributes) bool { + + if !p.FileAttributes.DeepEqual(src) { + return false + } + return true +} +func (p *TFileScanRangeParams) Field16DeepEqual(src *exprs.TExpr) bool { + + if !p.PreFilterExprs.DeepEqual(src) { + return false + } + return true +} +func (p *TFileScanRangeParams) Field17DeepEqual(src *TTableFormatFileDesc) bool { + + if !p.TableFormatParams.DeepEqual(src) { + return false + } + return true +} +func (p *TFileScanRangeParams) Field18DeepEqual(src []int32) bool { + + if len(p.ColumnIdxs) != len(src) { + return false + } + for i, v := range p.ColumnIdxs { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field19DeepEqual(src map[string]int32) bool { + + if len(p.SlotNameToSchemaPos) != len(src) { + return false + } + for k, v := range p.SlotNameToSchemaPos { + _src := src[k] + if v != _src { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field20DeepEqual(src []*exprs.TExpr) bool { + + if len(p.PreFilterExprsList) != len(src) { + return false + } + for i, v := range p.PreFilterExprsList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TFileScanRangeParams) Field21DeepEqual(src *types.TUniqueId) bool { + + if !p.LoadId.DeepEqual(src) { + return false + } + return true +} +func (p *TFileScanRangeParams) Field22DeepEqual(src *TTextSerdeType) bool { + + if p.TextSerdeType == src { + return true + } else if p.TextSerdeType == nil || src == nil { + return false + } + if *p.TextSerdeType != *src { + return false + } + return true +} +func (p *TFileScanRangeParams) Field23DeepEqual(src *string) bool { + + if p.SequenceMapCol == src { + return true + } else if p.SequenceMapCol == nil || src == nil { + return false + } + if strings.Compare(*p.SequenceMapCol, *src) != 0 { + return false + } + return true +} +func (p *TFileScanRangeParams) Field24DeepEqual(src *string) bool { + + if p.SerializedTable == src { + return true + } else if p.SerializedTable == nil || src == nil { + return false + } + if strings.Compare(*p.SerializedTable, *src) != 0 { + return false + } + return true +} + +type TFileRangeDesc struct { + LoadId *types.TUniqueId `thrift:"load_id,1,optional" frugal:"1,optional,types.TUniqueId" json:"load_id,omitempty"` + Path *string `thrift:"path,2,optional" frugal:"2,optional,string" json:"path,omitempty"` + StartOffset *int64 `thrift:"start_offset,3,optional" frugal:"3,optional,i64" json:"start_offset,omitempty"` + Size *int64 `thrift:"size,4,optional" frugal:"4,optional,i64" json:"size,omitempty"` + FileSize int64 `thrift:"file_size,5,optional" frugal:"5,optional,i64" json:"file_size,omitempty"` + ColumnsFromPath []string `thrift:"columns_from_path,6,optional" frugal:"6,optional,list" json:"columns_from_path,omitempty"` + ColumnsFromPathKeys []string `thrift:"columns_from_path_keys,7,optional" frugal:"7,optional,list" json:"columns_from_path_keys,omitempty"` + TableFormatParams *TTableFormatFileDesc `thrift:"table_format_params,8,optional" frugal:"8,optional,TTableFormatFileDesc" json:"table_format_params,omitempty"` + ModificationTime *int64 `thrift:"modification_time,9,optional" frugal:"9,optional,i64" json:"modification_time,omitempty"` + FileType *types.TFileType `thrift:"file_type,10,optional" frugal:"10,optional,TFileType" json:"file_type,omitempty"` + CompressType *TFileCompressType `thrift:"compress_type,11,optional" frugal:"11,optional,TFileCompressType" json:"compress_type,omitempty"` + FsName *string `thrift:"fs_name,12,optional" frugal:"12,optional,string" json:"fs_name,omitempty"` +} + +func NewTFileRangeDesc() *TFileRangeDesc { + return &TFileRangeDesc{ + + FileSize: -1, + } +} + +func (p *TFileRangeDesc) InitDefault() { + p.FileSize = -1 +} + +var TFileRangeDesc_LoadId_DEFAULT *types.TUniqueId + +func (p *TFileRangeDesc) GetLoadId() (v *types.TUniqueId) { + if !p.IsSetLoadId() { + return TFileRangeDesc_LoadId_DEFAULT + } + return p.LoadId +} + +var TFileRangeDesc_Path_DEFAULT string + +func (p *TFileRangeDesc) GetPath() (v string) { + if !p.IsSetPath() { + return TFileRangeDesc_Path_DEFAULT + } + return *p.Path +} + +var TFileRangeDesc_StartOffset_DEFAULT int64 + +func (p *TFileRangeDesc) GetStartOffset() (v int64) { + if !p.IsSetStartOffset() { + return TFileRangeDesc_StartOffset_DEFAULT + } + return *p.StartOffset +} + +var TFileRangeDesc_Size_DEFAULT int64 + +func (p *TFileRangeDesc) GetSize() (v int64) { + if !p.IsSetSize() { + return TFileRangeDesc_Size_DEFAULT + } + return *p.Size +} + +var TFileRangeDesc_FileSize_DEFAULT int64 = -1 + +func (p *TFileRangeDesc) GetFileSize() (v int64) { + if !p.IsSetFileSize() { + return TFileRangeDesc_FileSize_DEFAULT + } + return p.FileSize +} + +var TFileRangeDesc_ColumnsFromPath_DEFAULT []string + +func (p *TFileRangeDesc) GetColumnsFromPath() (v []string) { + if !p.IsSetColumnsFromPath() { + return TFileRangeDesc_ColumnsFromPath_DEFAULT + } + return p.ColumnsFromPath +} + +var TFileRangeDesc_ColumnsFromPathKeys_DEFAULT []string + +func (p *TFileRangeDesc) GetColumnsFromPathKeys() (v []string) { + if !p.IsSetColumnsFromPathKeys() { + return TFileRangeDesc_ColumnsFromPathKeys_DEFAULT + } + return p.ColumnsFromPathKeys +} + +var TFileRangeDesc_TableFormatParams_DEFAULT *TTableFormatFileDesc + +func (p *TFileRangeDesc) GetTableFormatParams() (v *TTableFormatFileDesc) { + if !p.IsSetTableFormatParams() { + return TFileRangeDesc_TableFormatParams_DEFAULT + } + return p.TableFormatParams +} + +var TFileRangeDesc_ModificationTime_DEFAULT int64 + +func (p *TFileRangeDesc) GetModificationTime() (v int64) { + if !p.IsSetModificationTime() { + return TFileRangeDesc_ModificationTime_DEFAULT + } + return *p.ModificationTime +} + +var TFileRangeDesc_FileType_DEFAULT types.TFileType + +func (p *TFileRangeDesc) GetFileType() (v types.TFileType) { + if !p.IsSetFileType() { + return TFileRangeDesc_FileType_DEFAULT + } + return *p.FileType +} + +var TFileRangeDesc_CompressType_DEFAULT TFileCompressType + +func (p *TFileRangeDesc) GetCompressType() (v TFileCompressType) { + if !p.IsSetCompressType() { + return TFileRangeDesc_CompressType_DEFAULT + } + return *p.CompressType +} + +var TFileRangeDesc_FsName_DEFAULT string + +func (p *TFileRangeDesc) GetFsName() (v string) { + if !p.IsSetFsName() { + return TFileRangeDesc_FsName_DEFAULT + } + return *p.FsName +} +func (p *TFileRangeDesc) SetLoadId(val *types.TUniqueId) { + p.LoadId = val +} +func (p *TFileRangeDesc) SetPath(val *string) { + p.Path = val +} +func (p *TFileRangeDesc) SetStartOffset(val *int64) { + p.StartOffset = val +} +func (p *TFileRangeDesc) SetSize(val *int64) { + p.Size = val +} +func (p *TFileRangeDesc) SetFileSize(val int64) { + p.FileSize = val +} +func (p *TFileRangeDesc) SetColumnsFromPath(val []string) { + p.ColumnsFromPath = val +} +func (p *TFileRangeDesc) SetColumnsFromPathKeys(val []string) { + p.ColumnsFromPathKeys = val +} +func (p *TFileRangeDesc) SetTableFormatParams(val *TTableFormatFileDesc) { + p.TableFormatParams = val +} +func (p *TFileRangeDesc) SetModificationTime(val *int64) { + p.ModificationTime = val +} +func (p *TFileRangeDesc) SetFileType(val *types.TFileType) { + p.FileType = val +} +func (p *TFileRangeDesc) SetCompressType(val *TFileCompressType) { + p.CompressType = val +} +func (p *TFileRangeDesc) SetFsName(val *string) { + p.FsName = val +} + +var fieldIDToName_TFileRangeDesc = map[int16]string{ + 1: "load_id", + 2: "path", + 3: "start_offset", + 4: "size", + 5: "file_size", + 6: "columns_from_path", + 7: "columns_from_path_keys", + 8: "table_format_params", + 9: "modification_time", + 10: "file_type", + 11: "compress_type", + 12: "fs_name", +} + +func (p *TFileRangeDesc) IsSetLoadId() bool { + return p.LoadId != nil +} + +func (p *TFileRangeDesc) IsSetPath() bool { + return p.Path != nil +} + +func (p *TFileRangeDesc) IsSetStartOffset() bool { + return p.StartOffset != nil +} + +func (p *TFileRangeDesc) IsSetSize() bool { + return p.Size != nil +} + +func (p *TFileRangeDesc) IsSetFileSize() bool { + return p.FileSize != TFileRangeDesc_FileSize_DEFAULT +} + +func (p *TFileRangeDesc) IsSetColumnsFromPath() bool { + return p.ColumnsFromPath != nil +} + +func (p *TFileRangeDesc) IsSetColumnsFromPathKeys() bool { + return p.ColumnsFromPathKeys != nil +} + +func (p *TFileRangeDesc) IsSetTableFormatParams() bool { + return p.TableFormatParams != nil +} + +func (p *TFileRangeDesc) IsSetModificationTime() bool { + return p.ModificationTime != nil +} + +func (p *TFileRangeDesc) IsSetFileType() bool { + return p.FileType != nil +} + +func (p *TFileRangeDesc) IsSetCompressType() bool { + return p.CompressType != nil +} + +func (p *TFileRangeDesc) IsSetFsName() bool { + return p.FsName != nil +} + +func (p *TFileRangeDesc) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I32 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.STRING { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileRangeDesc[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFileRangeDesc) ReadField1(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.LoadId = _field + return nil +} +func (p *TFileRangeDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Path = _field + return nil +} +func (p *TFileRangeDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.StartOffset = _field + return nil +} +func (p *TFileRangeDesc) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Size = _field + return nil +} +func (p *TFileRangeDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.FileSize = _field + return nil +} +func (p *TFileRangeDesc) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ColumnsFromPath = _field + return nil +} +func (p *TFileRangeDesc) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ColumnsFromPathKeys = _field + return nil +} +func (p *TFileRangeDesc) ReadField8(iprot thrift.TProtocol) error { + _field := NewTTableFormatFileDesc() + if err := _field.Read(iprot); err != nil { + return err + } + p.TableFormatParams = _field + return nil +} +func (p *TFileRangeDesc) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ModificationTime = _field + return nil +} +func (p *TFileRangeDesc) ReadField10(iprot thrift.TProtocol) error { + + var _field *types.TFileType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := types.TFileType(v) + _field = &tmp + } + p.FileType = _field + return nil +} +func (p *TFileRangeDesc) ReadField11(iprot thrift.TProtocol) error { + + var _field *TFileCompressType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TFileCompressType(v) + _field = &tmp + } + p.CompressType = _field + return nil +} +func (p *TFileRangeDesc) ReadField12(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FsName = _field + return nil +} + +func (p *TFileRangeDesc) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFileRangeDesc"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetLoadId() { + if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.LoadId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPath() { + if err = oprot.WriteFieldBegin("path", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Path); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetStartOffset() { + if err = oprot.WriteFieldBegin("start_offset", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.StartOffset); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetSize() { + if err = oprot.WriteFieldBegin("size", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Size); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetFileSize() { + if err = oprot.WriteFieldBegin("file_size", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.FileSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnsFromPath() { + if err = oprot.WriteFieldBegin("columns_from_path", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsFromPath)); err != nil { + return err + } + for _, v := range p.ColumnsFromPath { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetColumnsFromPathKeys() { + if err = oprot.WriteFieldBegin("columns_from_path_keys", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsFromPathKeys)); err != nil { + return err + } + for _, v := range p.ColumnsFromPathKeys { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTableFormatParams() { + if err = oprot.WriteFieldBegin("table_format_params", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.TableFormatParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetModificationTime() { + if err = oprot.WriteFieldBegin("modification_time", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ModificationTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetFileType() { + if err = oprot.WriteFieldBegin("file_type", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.FileType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetCompressType() { + if err = oprot.WriteFieldBegin("compress_type", thrift.I32, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.CompressType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TFileRangeDesc) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetFsName() { + if err = oprot.WriteFieldBegin("fs_name", thrift.STRING, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FsName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TFileRangeDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFileRangeDesc(%+v)", *p) + +} + +func (p *TFileRangeDesc) DeepEqual(ano *TFileRangeDesc) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.LoadId) { + return false + } + if !p.Field2DeepEqual(ano.Path) { + return false + } + if !p.Field3DeepEqual(ano.StartOffset) { + return false + } + if !p.Field4DeepEqual(ano.Size) { + return false + } + if !p.Field5DeepEqual(ano.FileSize) { + return false + } + if !p.Field6DeepEqual(ano.ColumnsFromPath) { + return false + } + if !p.Field7DeepEqual(ano.ColumnsFromPathKeys) { + return false + } + if !p.Field8DeepEqual(ano.TableFormatParams) { + return false + } + if !p.Field9DeepEqual(ano.ModificationTime) { + return false + } + if !p.Field10DeepEqual(ano.FileType) { + return false + } + if !p.Field11DeepEqual(ano.CompressType) { + return false + } + if !p.Field12DeepEqual(ano.FsName) { + return false + } + return true +} + +func (p *TFileRangeDesc) Field1DeepEqual(src *types.TUniqueId) bool { + + if !p.LoadId.DeepEqual(src) { + return false + } + return true +} +func (p *TFileRangeDesc) Field2DeepEqual(src *string) bool { + + if p.Path == src { + return true + } else if p.Path == nil || src == nil { + return false + } + if strings.Compare(*p.Path, *src) != 0 { + return false + } + return true +} +func (p *TFileRangeDesc) Field3DeepEqual(src *int64) bool { + + if p.StartOffset == src { + return true + } else if p.StartOffset == nil || src == nil { + return false + } + if *p.StartOffset != *src { + return false + } + return true +} +func (p *TFileRangeDesc) Field4DeepEqual(src *int64) bool { + + if p.Size == src { + return true + } else if p.Size == nil || src == nil { + return false + } + if *p.Size != *src { + return false + } + return true +} +func (p *TFileRangeDesc) Field5DeepEqual(src int64) bool { + + if p.FileSize != src { + return false + } + return true +} +func (p *TFileRangeDesc) Field6DeepEqual(src []string) bool { + + if len(p.ColumnsFromPath) != len(src) { + return false + } + for i, v := range p.ColumnsFromPath { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TFileRangeDesc) Field7DeepEqual(src []string) bool { + + if len(p.ColumnsFromPathKeys) != len(src) { + return false + } + for i, v := range p.ColumnsFromPathKeys { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TFileRangeDesc) Field8DeepEqual(src *TTableFormatFileDesc) bool { + + if !p.TableFormatParams.DeepEqual(src) { + return false + } + return true +} +func (p *TFileRangeDesc) Field9DeepEqual(src *int64) bool { + + if p.ModificationTime == src { + return true + } else if p.ModificationTime == nil || src == nil { + return false + } + if *p.ModificationTime != *src { + return false + } + return true +} +func (p *TFileRangeDesc) Field10DeepEqual(src *types.TFileType) bool { + + if p.FileType == src { + return true + } else if p.FileType == nil || src == nil { + return false + } + if *p.FileType != *src { + return false + } + return true +} +func (p *TFileRangeDesc) Field11DeepEqual(src *TFileCompressType) bool { + + if p.CompressType == src { + return true + } else if p.CompressType == nil || src == nil { + return false + } + if *p.CompressType != *src { + return false + } + return true +} +func (p *TFileRangeDesc) Field12DeepEqual(src *string) bool { + + if p.FsName == src { + return true + } else if p.FsName == nil || src == nil { + return false + } + if strings.Compare(*p.FsName, *src) != 0 { + return false + } + return true +} + +type TSplitSource struct { + SplitSourceId *int64 `thrift:"split_source_id,1,optional" frugal:"1,optional,i64" json:"split_source_id,omitempty"` + NumSplits *int32 `thrift:"num_splits,2,optional" frugal:"2,optional,i32" json:"num_splits,omitempty"` +} + +func NewTSplitSource() *TSplitSource { + return &TSplitSource{} +} + +func (p *TSplitSource) InitDefault() { +} + +var TSplitSource_SplitSourceId_DEFAULT int64 + +func (p *TSplitSource) GetSplitSourceId() (v int64) { + if !p.IsSetSplitSourceId() { + return TSplitSource_SplitSourceId_DEFAULT + } + return *p.SplitSourceId +} + +var TSplitSource_NumSplits_DEFAULT int32 + +func (p *TSplitSource) GetNumSplits() (v int32) { + if !p.IsSetNumSplits() { + return TSplitSource_NumSplits_DEFAULT + } + return *p.NumSplits +} +func (p *TSplitSource) SetSplitSourceId(val *int64) { + p.SplitSourceId = val +} +func (p *TSplitSource) SetNumSplits(val *int32) { + p.NumSplits = val +} + +var fieldIDToName_TSplitSource = map[int16]string{ + 1: "split_source_id", + 2: "num_splits", +} + +func (p *TSplitSource) IsSetSplitSourceId() bool { + return p.SplitSourceId != nil +} + +func (p *TSplitSource) IsSetNumSplits() bool { + return p.NumSplits != nil +} + +func (p *TSplitSource) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I32 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSplitSource[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TSplitSource) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.SplitSourceId = _field + return nil +} +func (p *TSplitSource) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.NumSplits = _field + return nil +} + +func (p *TSplitSource) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TSplitSource"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TSplitSource) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSplitSourceId() { + if err = oprot.WriteFieldBegin("split_source_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.SplitSourceId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TSplitSource) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetNumSplits() { + if err = oprot.WriteFieldBegin("num_splits", thrift.I32, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NumSplits); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TSplitSource) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSplitSource(%+v)", *p) + +} + +func (p *TSplitSource) DeepEqual(ano *TSplitSource) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SplitSourceId) { + return false + } + if !p.Field2DeepEqual(ano.NumSplits) { + return false + } + return true +} + +func (p *TSplitSource) Field1DeepEqual(src *int64) bool { + + if p.SplitSourceId == src { + return true + } else if p.SplitSourceId == nil || src == nil { + return false + } + if *p.SplitSourceId != *src { + return false + } + return true +} +func (p *TSplitSource) Field2DeepEqual(src *int32) bool { + + if p.NumSplits == src { + return true + } else if p.NumSplits == nil || src == nil { + return false + } + if *p.NumSplits != *src { + return false + } + return true +} + +type TFileScanRange struct { + Ranges []*TFileRangeDesc `thrift:"ranges,1,optional" frugal:"1,optional,list" json:"ranges,omitempty"` + Params *TFileScanRangeParams `thrift:"params,2,optional" frugal:"2,optional,TFileScanRangeParams" json:"params,omitempty"` + SplitSource *TSplitSource `thrift:"split_source,3,optional" frugal:"3,optional,TSplitSource" json:"split_source,omitempty"` +} + +func NewTFileScanRange() *TFileScanRange { + return &TFileScanRange{} +} + +func (p *TFileScanRange) InitDefault() { +} + +var TFileScanRange_Ranges_DEFAULT []*TFileRangeDesc + +func (p *TFileScanRange) GetRanges() (v []*TFileRangeDesc) { + if !p.IsSetRanges() { + return TFileScanRange_Ranges_DEFAULT + } + return p.Ranges +} + +var TFileScanRange_Params_DEFAULT *TFileScanRangeParams + +func (p *TFileScanRange) GetParams() (v *TFileScanRangeParams) { + if !p.IsSetParams() { + return TFileScanRange_Params_DEFAULT + } + return p.Params +} + +var TFileScanRange_SplitSource_DEFAULT *TSplitSource + +func (p *TFileScanRange) GetSplitSource() (v *TSplitSource) { + if !p.IsSetSplitSource() { + return TFileScanRange_SplitSource_DEFAULT + } + return p.SplitSource +} +func (p *TFileScanRange) SetRanges(val []*TFileRangeDesc) { + p.Ranges = val +} +func (p *TFileScanRange) SetParams(val *TFileScanRangeParams) { + p.Params = val +} +func (p *TFileScanRange) SetSplitSource(val *TSplitSource) { + p.SplitSource = val +} + +var fieldIDToName_TFileScanRange = map[int16]string{ + 1: "ranges", + 2: "params", + 3: "split_source", +} + +func (p *TFileScanRange) IsSetRanges() bool { + return p.Ranges != nil +} + +func (p *TFileScanRange) IsSetParams() bool { + return p.Params != nil +} + +func (p *TFileScanRange) IsSetSplitSource() bool { + return p.SplitSource != nil +} + +func (p *TFileScanRange) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRange[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFileScanRange) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TFileRangeDesc, 0, size) + values := make([]TFileRangeDesc, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Ranges = _field + return nil +} +func (p *TFileScanRange) ReadField2(iprot thrift.TProtocol) error { + _field := NewTFileScanRangeParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.Params = _field + return nil +} +func (p *TFileScanRange) ReadField3(iprot thrift.TProtocol) error { + _field := NewTSplitSource() + if err := _field.Read(iprot); err != nil { + return err + } + p.SplitSource = _field + return nil +} + +func (p *TFileScanRange) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFileScanRange"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TFileScanRange) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetRanges() { + if err = oprot.WriteFieldBegin("ranges", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Ranges)); err != nil { + return err + } + for _, v := range p.Ranges { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFileScanRange) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetParams() { + if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.Params.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TFileScanRange) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetSplitSource() { + if err = oprot.WriteFieldBegin("split_source", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.SplitSource.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TFileScanRange) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFileScanRange(%+v)", *p) + +} + +func (p *TFileScanRange) DeepEqual(ano *TFileScanRange) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Ranges) { + return false + } + if !p.Field2DeepEqual(ano.Params) { + return false + } + if !p.Field3DeepEqual(ano.SplitSource) { + return false } return true } -type TFileRangeDesc struct { - LoadId *types.TUniqueId `thrift:"load_id,1,optional" frugal:"1,optional,types.TUniqueId" json:"load_id,omitempty"` - Path *string `thrift:"path,2,optional" frugal:"2,optional,string" json:"path,omitempty"` - StartOffset *int64 `thrift:"start_offset,3,optional" frugal:"3,optional,i64" json:"start_offset,omitempty"` - Size *int64 `thrift:"size,4,optional" frugal:"4,optional,i64" json:"size,omitempty"` - FileSize int64 `thrift:"file_size,5,optional" frugal:"5,optional,i64" json:"file_size,omitempty"` - ColumnsFromPath []string `thrift:"columns_from_path,6,optional" frugal:"6,optional,list" json:"columns_from_path,omitempty"` - ColumnsFromPathKeys []string `thrift:"columns_from_path_keys,7,optional" frugal:"7,optional,list" json:"columns_from_path_keys,omitempty"` - TableFormatParams *TTableFormatFileDesc `thrift:"table_format_params,8,optional" frugal:"8,optional,TTableFormatFileDesc" json:"table_format_params,omitempty"` - ModificationTime *int64 `thrift:"modification_time,9,optional" frugal:"9,optional,i64" json:"modification_time,omitempty"` - FileType *types.TFileType `thrift:"file_type,10,optional" frugal:"10,optional,TFileType" json:"file_type,omitempty"` - CompressType *TFileCompressType `thrift:"compress_type,11,optional" frugal:"11,optional,TFileCompressType" json:"compress_type,omitempty"` - FsName *string `thrift:"fs_name,12,optional" frugal:"12,optional,string" json:"fs_name,omitempty"` +func (p *TFileScanRange) Field1DeepEqual(src []*TFileRangeDesc) bool { + + if len(p.Ranges) != len(src) { + return false + } + for i, v := range p.Ranges { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true } +func (p *TFileScanRange) Field2DeepEqual(src *TFileScanRangeParams) bool { -func NewTFileRangeDesc() *TFileRangeDesc { - return &TFileRangeDesc{ + if !p.Params.DeepEqual(src) { + return false + } + return true +} +func (p *TFileScanRange) Field3DeepEqual(src *TSplitSource) bool { - FileSize: -1, + if !p.SplitSource.DeepEqual(src) { + return false } + return true } -func (p *TFileRangeDesc) InitDefault() { - *p = TFileRangeDesc{ +type TExternalScanRange struct { + FileScanRange *TFileScanRange `thrift:"file_scan_range,1,optional" frugal:"1,optional,TFileScanRange" json:"file_scan_range,omitempty"` +} - FileSize: -1, +func NewTExternalScanRange() *TExternalScanRange { + return &TExternalScanRange{} +} + +func (p *TExternalScanRange) InitDefault() { +} + +var TExternalScanRange_FileScanRange_DEFAULT *TFileScanRange + +func (p *TExternalScanRange) GetFileScanRange() (v *TFileScanRange) { + if !p.IsSetFileScanRange() { + return TExternalScanRange_FileScanRange_DEFAULT } + return p.FileScanRange +} +func (p *TExternalScanRange) SetFileScanRange(val *TFileScanRange) { + p.FileScanRange = val } -var TFileRangeDesc_LoadId_DEFAULT *types.TUniqueId +var fieldIDToName_TExternalScanRange = map[int16]string{ + 1: "file_scan_range", +} -func (p *TFileRangeDesc) GetLoadId() (v *types.TUniqueId) { - if !p.IsSetLoadId() { - return TFileRangeDesc_LoadId_DEFAULT +func (p *TExternalScanRange) IsSetFileScanRange() bool { + return p.FileScanRange != nil +} + +func (p *TExternalScanRange) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - return p.LoadId + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExternalScanRange[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -var TFileRangeDesc_Path_DEFAULT string +func (p *TExternalScanRange) ReadField1(iprot thrift.TProtocol) error { + _field := NewTFileScanRange() + if err := _field.Read(iprot); err != nil { + return err + } + p.FileScanRange = _field + return nil +} -func (p *TFileRangeDesc) GetPath() (v string) { - if !p.IsSetPath() { - return TFileRangeDesc_Path_DEFAULT +func (p *TExternalScanRange) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TExternalScanRange"); err != nil { + goto WriteStructBeginError } - return *p.Path + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -var TFileRangeDesc_StartOffset_DEFAULT int64 +func (p *TExternalScanRange) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFileScanRange() { + if err = oprot.WriteFieldBegin("file_scan_range", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.FileScanRange.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} -func (p *TFileRangeDesc) GetStartOffset() (v int64) { - if !p.IsSetStartOffset() { - return TFileRangeDesc_StartOffset_DEFAULT +func (p *TExternalScanRange) String() string { + if p == nil { + return "" } - return *p.StartOffset + return fmt.Sprintf("TExternalScanRange(%+v)", *p) + } -var TFileRangeDesc_Size_DEFAULT int64 +func (p *TExternalScanRange) DeepEqual(ano *TExternalScanRange) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FileScanRange) { + return false + } + return true +} -func (p *TFileRangeDesc) GetSize() (v int64) { - if !p.IsSetSize() { - return TFileRangeDesc_Size_DEFAULT +func (p *TExternalScanRange) Field1DeepEqual(src *TFileScanRange) bool { + + if !p.FileScanRange.DeepEqual(src) { + return false } - return *p.Size + return true } -var TFileRangeDesc_FileSize_DEFAULT int64 = -1 +type TTVFNumbersScanRange struct { + TotalNumbers *int64 `thrift:"totalNumbers,1,optional" frugal:"1,optional,i64" json:"totalNumbers,omitempty"` + UseConst *bool `thrift:"useConst,2,optional" frugal:"2,optional,bool" json:"useConst,omitempty"` + ConstValue *int64 `thrift:"constValue,3,optional" frugal:"3,optional,i64" json:"constValue,omitempty"` +} -func (p *TFileRangeDesc) GetFileSize() (v int64) { - if !p.IsSetFileSize() { - return TFileRangeDesc_FileSize_DEFAULT +func NewTTVFNumbersScanRange() *TTVFNumbersScanRange { + return &TTVFNumbersScanRange{} +} + +func (p *TTVFNumbersScanRange) InitDefault() { +} + +var TTVFNumbersScanRange_TotalNumbers_DEFAULT int64 + +func (p *TTVFNumbersScanRange) GetTotalNumbers() (v int64) { + if !p.IsSetTotalNumbers() { + return TTVFNumbersScanRange_TotalNumbers_DEFAULT } - return p.FileSize + return *p.TotalNumbers } -var TFileRangeDesc_ColumnsFromPath_DEFAULT []string +var TTVFNumbersScanRange_UseConst_DEFAULT bool -func (p *TFileRangeDesc) GetColumnsFromPath() (v []string) { - if !p.IsSetColumnsFromPath() { - return TFileRangeDesc_ColumnsFromPath_DEFAULT +func (p *TTVFNumbersScanRange) GetUseConst() (v bool) { + if !p.IsSetUseConst() { + return TTVFNumbersScanRange_UseConst_DEFAULT } - return p.ColumnsFromPath + return *p.UseConst } -var TFileRangeDesc_ColumnsFromPathKeys_DEFAULT []string +var TTVFNumbersScanRange_ConstValue_DEFAULT int64 -func (p *TFileRangeDesc) GetColumnsFromPathKeys() (v []string) { - if !p.IsSetColumnsFromPathKeys() { - return TFileRangeDesc_ColumnsFromPathKeys_DEFAULT +func (p *TTVFNumbersScanRange) GetConstValue() (v int64) { + if !p.IsSetConstValue() { + return TTVFNumbersScanRange_ConstValue_DEFAULT + } + return *p.ConstValue +} +func (p *TTVFNumbersScanRange) SetTotalNumbers(val *int64) { + p.TotalNumbers = val +} +func (p *TTVFNumbersScanRange) SetUseConst(val *bool) { + p.UseConst = val +} +func (p *TTVFNumbersScanRange) SetConstValue(val *int64) { + p.ConstValue = val +} + +var fieldIDToName_TTVFNumbersScanRange = map[int16]string{ + 1: "totalNumbers", + 2: "useConst", + 3: "constValue", +} + +func (p *TTVFNumbersScanRange) IsSetTotalNumbers() bool { + return p.TotalNumbers != nil +} + +func (p *TTVFNumbersScanRange) IsSetUseConst() bool { + return p.UseConst != nil +} + +func (p *TTVFNumbersScanRange) IsSetConstValue() bool { + return p.ConstValue != nil +} + +func (p *TTVFNumbersScanRange) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTVFNumbersScanRange[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTVFNumbersScanRange) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TotalNumbers = _field + return nil +} +func (p *TTVFNumbersScanRange) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.UseConst = _field + return nil +} +func (p *TTVFNumbersScanRange) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ConstValue = _field + return nil +} + +func (p *TTVFNumbersScanRange) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TTVFNumbersScanRange"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TTVFNumbersScanRange) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalNumbers() { + if err = oprot.WriteFieldBegin("totalNumbers", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TotalNumbers); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.ColumnsFromPathKeys + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -var TFileRangeDesc_TableFormatParams_DEFAULT *TTableFormatFileDesc - -func (p *TFileRangeDesc) GetTableFormatParams() (v *TTableFormatFileDesc) { - if !p.IsSetTableFormatParams() { - return TFileRangeDesc_TableFormatParams_DEFAULT +func (p *TTVFNumbersScanRange) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetUseConst() { + if err = oprot.WriteFieldBegin("useConst", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.UseConst); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return p.TableFormatParams + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -var TFileRangeDesc_ModificationTime_DEFAULT int64 - -func (p *TFileRangeDesc) GetModificationTime() (v int64) { - if !p.IsSetModificationTime() { - return TFileRangeDesc_ModificationTime_DEFAULT +func (p *TTVFNumbersScanRange) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetConstValue() { + if err = oprot.WriteFieldBegin("constValue", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ConstValue); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - return *p.ModificationTime + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -var TFileRangeDesc_FileType_DEFAULT types.TFileType - -func (p *TFileRangeDesc) GetFileType() (v types.TFileType) { - if !p.IsSetFileType() { - return TFileRangeDesc_FileType_DEFAULT +func (p *TTVFNumbersScanRange) String() string { + if p == nil { + return "" } - return *p.FileType -} - -var TFileRangeDesc_CompressType_DEFAULT TFileCompressType + return fmt.Sprintf("TTVFNumbersScanRange(%+v)", *p) -func (p *TFileRangeDesc) GetCompressType() (v TFileCompressType) { - if !p.IsSetCompressType() { - return TFileRangeDesc_CompressType_DEFAULT - } - return *p.CompressType } -var TFileRangeDesc_FsName_DEFAULT string - -func (p *TFileRangeDesc) GetFsName() (v string) { - if !p.IsSetFsName() { - return TFileRangeDesc_FsName_DEFAULT +func (p *TTVFNumbersScanRange) DeepEqual(ano *TTVFNumbersScanRange) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return *p.FsName -} -func (p *TFileRangeDesc) SetLoadId(val *types.TUniqueId) { - p.LoadId = val -} -func (p *TFileRangeDesc) SetPath(val *string) { - p.Path = val -} -func (p *TFileRangeDesc) SetStartOffset(val *int64) { - p.StartOffset = val -} -func (p *TFileRangeDesc) SetSize(val *int64) { - p.Size = val -} -func (p *TFileRangeDesc) SetFileSize(val int64) { - p.FileSize = val -} -func (p *TFileRangeDesc) SetColumnsFromPath(val []string) { - p.ColumnsFromPath = val -} -func (p *TFileRangeDesc) SetColumnsFromPathKeys(val []string) { - p.ColumnsFromPathKeys = val -} -func (p *TFileRangeDesc) SetTableFormatParams(val *TTableFormatFileDesc) { - p.TableFormatParams = val -} -func (p *TFileRangeDesc) SetModificationTime(val *int64) { - p.ModificationTime = val -} -func (p *TFileRangeDesc) SetFileType(val *types.TFileType) { - p.FileType = val -} -func (p *TFileRangeDesc) SetCompressType(val *TFileCompressType) { - p.CompressType = val -} -func (p *TFileRangeDesc) SetFsName(val *string) { - p.FsName = val -} - -var fieldIDToName_TFileRangeDesc = map[int16]string{ - 1: "load_id", - 2: "path", - 3: "start_offset", - 4: "size", - 5: "file_size", - 6: "columns_from_path", - 7: "columns_from_path_keys", - 8: "table_format_params", - 9: "modification_time", - 10: "file_type", - 11: "compress_type", - 12: "fs_name", + if !p.Field1DeepEqual(ano.TotalNumbers) { + return false + } + if !p.Field2DeepEqual(ano.UseConst) { + return false + } + if !p.Field3DeepEqual(ano.ConstValue) { + return false + } + return true } -func (p *TFileRangeDesc) IsSetLoadId() bool { - return p.LoadId != nil -} +func (p *TTVFNumbersScanRange) Field1DeepEqual(src *int64) bool { -func (p *TFileRangeDesc) IsSetPath() bool { - return p.Path != nil + if p.TotalNumbers == src { + return true + } else if p.TotalNumbers == nil || src == nil { + return false + } + if *p.TotalNumbers != *src { + return false + } + return true } +func (p *TTVFNumbersScanRange) Field2DeepEqual(src *bool) bool { -func (p *TFileRangeDesc) IsSetStartOffset() bool { - return p.StartOffset != nil + if p.UseConst == src { + return true + } else if p.UseConst == nil || src == nil { + return false + } + if *p.UseConst != *src { + return false + } + return true } +func (p *TTVFNumbersScanRange) Field3DeepEqual(src *int64) bool { -func (p *TFileRangeDesc) IsSetSize() bool { - return p.Size != nil + if p.ConstValue == src { + return true + } else if p.ConstValue == nil || src == nil { + return false + } + if *p.ConstValue != *src { + return false + } + return true } -func (p *TFileRangeDesc) IsSetFileSize() bool { - return p.FileSize != TFileRangeDesc_FileSize_DEFAULT +type TDataGenScanRange struct { + NumbersParams *TTVFNumbersScanRange `thrift:"numbers_params,1,optional" frugal:"1,optional,TTVFNumbersScanRange" json:"numbers_params,omitempty"` } -func (p *TFileRangeDesc) IsSetColumnsFromPath() bool { - return p.ColumnsFromPath != nil +func NewTDataGenScanRange() *TDataGenScanRange { + return &TDataGenScanRange{} } -func (p *TFileRangeDesc) IsSetColumnsFromPathKeys() bool { - return p.ColumnsFromPathKeys != nil +func (p *TDataGenScanRange) InitDefault() { } -func (p *TFileRangeDesc) IsSetTableFormatParams() bool { - return p.TableFormatParams != nil -} +var TDataGenScanRange_NumbersParams_DEFAULT *TTVFNumbersScanRange -func (p *TFileRangeDesc) IsSetModificationTime() bool { - return p.ModificationTime != nil +func (p *TDataGenScanRange) GetNumbersParams() (v *TTVFNumbersScanRange) { + if !p.IsSetNumbersParams() { + return TDataGenScanRange_NumbersParams_DEFAULT + } + return p.NumbersParams } - -func (p *TFileRangeDesc) IsSetFileType() bool { - return p.FileType != nil +func (p *TDataGenScanRange) SetNumbersParams(val *TTVFNumbersScanRange) { + p.NumbersParams = val } -func (p *TFileRangeDesc) IsSetCompressType() bool { - return p.CompressType != nil +var fieldIDToName_TDataGenScanRange = map[int16]string{ + 1: "numbers_params", } -func (p *TFileRangeDesc) IsSetFsName() bool { - return p.FsName != nil +func (p *TDataGenScanRange) IsSetNumbersParams() bool { + return p.NumbersParams != nil } -func (p *TFileRangeDesc) Read(iprot thrift.TProtocol) (err error) { +func (p *TDataGenScanRange) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -14103,127 +19747,14 @@ func (p *TFileRangeDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err = p.ReadField2(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err = p.ReadField5(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err = p.ReadField6(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.LIST { - if err = p.ReadField7(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err = p.ReadField8(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err = p.ReadField9(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 10: - if fieldTypeId == thrift.I32 { - if err = p.ReadField10(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 11: - if fieldTypeId == thrift.I32 { - if err = p.ReadField11(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.STRING { - if err = p.ReadField12(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14238,7 +19769,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileRangeDesc[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDataGenScanRange[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -14248,143 +19779,304 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileRangeDesc) ReadField1(iprot thrift.TProtocol) error { - p.LoadId = types.NewTUniqueId() - if err := p.LoadId.Read(iprot); err != nil { +func (p *TDataGenScanRange) ReadField1(iprot thrift.TProtocol) error { + _field := NewTTVFNumbersScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.NumbersParams = _field return nil } -func (p *TFileRangeDesc) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Path = &v +func (p *TDataGenScanRange) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TDataGenScanRange"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError } return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFileRangeDesc) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.StartOffset = &v +func (p *TDataGenScanRange) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetNumbersParams() { + if err = oprot.WriteFieldBegin("numbers_params", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.NumbersParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFileRangeDesc) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.Size = &v +func (p *TDataGenScanRange) String() string { + if p == nil { + return "" } - return nil + return fmt.Sprintf("TDataGenScanRange(%+v)", *p) + } -func (p *TFileRangeDesc) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return err - } else { - p.FileSize = v +func (p *TDataGenScanRange) DeepEqual(ano *TDataGenScanRange) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - return nil + if !p.Field1DeepEqual(ano.NumbersParams) { + return false + } + return true } -func (p *TFileRangeDesc) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +func (p *TDataGenScanRange) Field1DeepEqual(src *TTVFNumbersScanRange) bool { + + if !p.NumbersParams.DeepEqual(src) { + return false } - p.ColumnsFromPath = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v + return true +} + +type TIcebergMetadataParams struct { + IcebergQueryType *types.TIcebergQueryType `thrift:"iceberg_query_type,1,optional" frugal:"1,optional,TIcebergQueryType" json:"iceberg_query_type,omitempty"` + Catalog *string `thrift:"catalog,2,optional" frugal:"2,optional,string" json:"catalog,omitempty"` + Database *string `thrift:"database,3,optional" frugal:"3,optional,string" json:"database,omitempty"` + Table *string `thrift:"table,4,optional" frugal:"4,optional,string" json:"table,omitempty"` +} + +func NewTIcebergMetadataParams() *TIcebergMetadataParams { + return &TIcebergMetadataParams{} +} + +func (p *TIcebergMetadataParams) InitDefault() { +} + +var TIcebergMetadataParams_IcebergQueryType_DEFAULT types.TIcebergQueryType + +func (p *TIcebergMetadataParams) GetIcebergQueryType() (v types.TIcebergQueryType) { + if !p.IsSetIcebergQueryType() { + return TIcebergMetadataParams_IcebergQueryType_DEFAULT + } + return *p.IcebergQueryType +} + +var TIcebergMetadataParams_Catalog_DEFAULT string + +func (p *TIcebergMetadataParams) GetCatalog() (v string) { + if !p.IsSetCatalog() { + return TIcebergMetadataParams_Catalog_DEFAULT + } + return *p.Catalog +} + +var TIcebergMetadataParams_Database_DEFAULT string + +func (p *TIcebergMetadataParams) GetDatabase() (v string) { + if !p.IsSetDatabase() { + return TIcebergMetadataParams_Database_DEFAULT + } + return *p.Database +} + +var TIcebergMetadataParams_Table_DEFAULT string + +func (p *TIcebergMetadataParams) GetTable() (v string) { + if !p.IsSetTable() { + return TIcebergMetadataParams_Table_DEFAULT + } + return *p.Table +} +func (p *TIcebergMetadataParams) SetIcebergQueryType(val *types.TIcebergQueryType) { + p.IcebergQueryType = val +} +func (p *TIcebergMetadataParams) SetCatalog(val *string) { + p.Catalog = val +} +func (p *TIcebergMetadataParams) SetDatabase(val *string) { + p.Database = val +} +func (p *TIcebergMetadataParams) SetTable(val *string) { + p.Table = val +} + +var fieldIDToName_TIcebergMetadataParams = map[int16]string{ + 1: "iceberg_query_type", + 2: "catalog", + 3: "database", + 4: "table", +} + +func (p *TIcebergMetadataParams) IsSetIcebergQueryType() bool { + return p.IcebergQueryType != nil +} + +func (p *TIcebergMetadataParams) IsSetCatalog() bool { + return p.Catalog != nil +} + +func (p *TIcebergMetadataParams) IsSetDatabase() bool { + return p.Database != nil +} + +func (p *TIcebergMetadataParams) IsSetTable() bool { + return p.Table != nil +} + +func (p *TIcebergMetadataParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } } - - p.ColumnsFromPath = append(p.ColumnsFromPath, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err - } - return nil -} - -func (p *TFileRangeDesc) ReadField7(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.ColumnsFromPathKeys = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, err := iprot.ReadString(); err != nil { - return err - } else { - _elem = v + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } - - p.ColumnsFromPathKeys = append(p.ColumnsFromPathKeys, _elem) } - if err := iprot.ReadListEnd(); err != nil { - return err + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return nil -} -func (p *TFileRangeDesc) ReadField8(iprot thrift.TProtocol) error { - p.TableFormatParams = NewTTableFormatFileDesc() - if err := p.TableFormatParams.Read(iprot); err != nil { - return err - } return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergMetadataParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileRangeDesc) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TIcebergMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TIcebergQueryType + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ModificationTime = &v + tmp := types.TIcebergQueryType(v) + _field = &tmp } + p.IcebergQueryType = _field return nil } +func (p *TIcebergMetadataParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TFileRangeDesc) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - tmp := types.TFileType(v) - p.FileType = &tmp + _field = &v } + p.Catalog = _field return nil } +func (p *TIcebergMetadataParams) ReadField3(iprot thrift.TProtocol) error { -func (p *TFileRangeDesc) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - tmp := TFileCompressType(v) - p.CompressType = &tmp + _field = &v } + p.Database = _field return nil } +func (p *TIcebergMetadataParams) ReadField4(iprot thrift.TProtocol) error { -func (p *TFileRangeDesc) ReadField12(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FsName = &v + _field = &v } + p.Table = _field return nil } -func (p *TFileRangeDesc) Write(oprot thrift.TProtocol) (err error) { +func (p *TIcebergMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFileRangeDesc"); err != nil { + if err = oprot.WriteStructBegin("TIcebergMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -14404,39 +20096,6 @@ func (p *TFileRangeDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - if err = p.writeField5(oprot); err != nil { - fieldId = 5 - goto WriteFieldError - } - if err = p.writeField6(oprot); err != nil { - fieldId = 6 - goto WriteFieldError - } - if err = p.writeField7(oprot); err != nil { - fieldId = 7 - goto WriteFieldError - } - if err = p.writeField8(oprot); err != nil { - fieldId = 8 - goto WriteFieldError - } - if err = p.writeField9(oprot); err != nil { - fieldId = 9 - goto WriteFieldError - } - if err = p.writeField10(oprot); err != nil { - fieldId = 10 - goto WriteFieldError - } - if err = p.writeField11(oprot); err != nil { - fieldId = 11 - goto WriteFieldError - } - if err = p.writeField12(oprot); err != nil { - fieldId = 12 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14455,12 +20114,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFileRangeDesc) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetLoadId() { - if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 1); err != nil { +func (p *TIcebergMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetIcebergQueryType() { + if err = oprot.WriteFieldBegin("iceberg_query_type", thrift.I32, 1); err != nil { goto WriteFieldBeginError } - if err := p.LoadId.Write(oprot); err != nil { + if err := oprot.WriteI32(int32(*p.IcebergQueryType)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14474,12 +20133,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFileRangeDesc) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetPath() { - if err = oprot.WriteFieldBegin("path", thrift.STRING, 2); err != nil { +func (p *TIcebergMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalog() { + if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Path); err != nil { + if err := oprot.WriteString(*p.Catalog); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14493,12 +20152,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFileRangeDesc) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetStartOffset() { - if err = oprot.WriteFieldBegin("start_offset", thrift.I64, 3); err != nil { +func (p *TIcebergMetadataParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDatabase() { + if err = oprot.WriteFieldBegin("database", thrift.STRING, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.StartOffset); err != nil { + if err := oprot.WriteString(*p.Database); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14512,12 +20171,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TFileRangeDesc) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetSize() { - if err = oprot.WriteFieldBegin("size", thrift.I64, 4); err != nil { +func (p *TIcebergMetadataParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.Size); err != nil { + if err := oprot.WriteString(*p.Table); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -14531,409 +20190,492 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TFileRangeDesc) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetFileSize() { - if err = oprot.WriteFieldBegin("file_size", thrift.I64, 5); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(p.FileSize); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnsFromPath() { - if err = oprot.WriteFieldBegin("columns_from_path", thrift.LIST, 6); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsFromPath)); err != nil { - return err - } - for _, v := range p.ColumnsFromPath { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetColumnsFromPathKeys() { - if err = oprot.WriteFieldBegin("columns_from_path_keys", thrift.LIST, 7); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.ColumnsFromPathKeys)); err != nil { - return err - } - for _, v := range p.ColumnsFromPathKeys { - if err := oprot.WriteString(v); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetTableFormatParams() { - if err = oprot.WriteFieldBegin("table_format_params", thrift.STRUCT, 8); err != nil { - goto WriteFieldBeginError - } - if err := p.TableFormatParams.Write(oprot); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetModificationTime() { - if err = oprot.WriteFieldBegin("modification_time", thrift.I64, 9); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.ModificationTime); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetFileType() { - if err = oprot.WriteFieldBegin("file_type", thrift.I32, 10); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.FileType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetCompressType() { - if err = oprot.WriteFieldBegin("compress_type", thrift.I32, 11); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.CompressType)); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) -} - -func (p *TFileRangeDesc) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetFsName() { - if err = oprot.WriteFieldBegin("fs_name", thrift.STRING, 12); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.FsName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) -} - -func (p *TFileRangeDesc) String() string { +func (p *TIcebergMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TFileRangeDesc(%+v)", *p) + return fmt.Sprintf("TIcebergMetadataParams(%+v)", *p) + } -func (p *TFileRangeDesc) DeepEqual(ano *TFileRangeDesc) bool { +func (p *TIcebergMetadataParams) DeepEqual(ano *TIcebergMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.LoadId) { + if !p.Field1DeepEqual(ano.IcebergQueryType) { return false } - if !p.Field2DeepEqual(ano.Path) { + if !p.Field2DeepEqual(ano.Catalog) { return false } - if !p.Field3DeepEqual(ano.StartOffset) { + if !p.Field3DeepEqual(ano.Database) { return false } - if !p.Field4DeepEqual(ano.Size) { + if !p.Field4DeepEqual(ano.Table) { return false } - if !p.Field5DeepEqual(ano.FileSize) { + return true +} + +func (p *TIcebergMetadataParams) Field1DeepEqual(src *types.TIcebergQueryType) bool { + + if p.IcebergQueryType == src { + return true + } else if p.IcebergQueryType == nil || src == nil { return false } - if !p.Field6DeepEqual(ano.ColumnsFromPath) { + if *p.IcebergQueryType != *src { return false } - if !p.Field7DeepEqual(ano.ColumnsFromPathKeys) { + return true +} +func (p *TIcebergMetadataParams) Field2DeepEqual(src *string) bool { + + if p.Catalog == src { + return true + } else if p.Catalog == nil || src == nil { return false } - if !p.Field8DeepEqual(ano.TableFormatParams) { + if strings.Compare(*p.Catalog, *src) != 0 { return false } - if !p.Field9DeepEqual(ano.ModificationTime) { + return true +} +func (p *TIcebergMetadataParams) Field3DeepEqual(src *string) bool { + + if p.Database == src { + return true + } else if p.Database == nil || src == nil { return false } - if !p.Field10DeepEqual(ano.FileType) { + if strings.Compare(*p.Database, *src) != 0 { return false } - if !p.Field11DeepEqual(ano.CompressType) { + return true +} +func (p *TIcebergMetadataParams) Field4DeepEqual(src *string) bool { + + if p.Table == src { + return true + } else if p.Table == nil || src == nil { return false } - if !p.Field12DeepEqual(ano.FsName) { + if strings.Compare(*p.Table, *src) != 0 { return false } return true } -func (p *TFileRangeDesc) Field1DeepEqual(src *types.TUniqueId) bool { +type TBackendsMetadataParams struct { + ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` +} - if !p.LoadId.DeepEqual(src) { - return false +func NewTBackendsMetadataParams() *TBackendsMetadataParams { + return &TBackendsMetadataParams{} +} + +func (p *TBackendsMetadataParams) InitDefault() { +} + +var TBackendsMetadataParams_ClusterName_DEFAULT string + +func (p *TBackendsMetadataParams) GetClusterName() (v string) { + if !p.IsSetClusterName() { + return TBackendsMetadataParams_ClusterName_DEFAULT } - return true + return *p.ClusterName +} +func (p *TBackendsMetadataParams) SetClusterName(val *string) { + p.ClusterName = val } -func (p *TFileRangeDesc) Field2DeepEqual(src *string) bool { - if p.Path == src { - return true - } else if p.Path == nil || src == nil { - return false +var fieldIDToName_TBackendsMetadataParams = map[int16]string{ + 1: "cluster_name", +} + +func (p *TBackendsMetadataParams) IsSetClusterName() bool { + return p.ClusterName != nil +} + +func (p *TBackendsMetadataParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if strings.Compare(*p.Path, *src) != 0 { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendsMetadataParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TBackendsMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ClusterName = _field + return nil +} + +func (p *TBackendsMetadataParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TBackendsMetadataParams"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TBackendsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetClusterName() { + if err = oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ClusterName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TBackendsMetadataParams) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TBackendsMetadataParams(%+v)", *p) + } -func (p *TFileRangeDesc) Field3DeepEqual(src *int64) bool { - if p.StartOffset == src { +func (p *TBackendsMetadataParams) DeepEqual(ano *TBackendsMetadataParams) bool { + if p == ano { return true - } else if p.StartOffset == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.StartOffset != *src { + if !p.Field1DeepEqual(ano.ClusterName) { return false } return true } -func (p *TFileRangeDesc) Field4DeepEqual(src *int64) bool { - if p.Size == src { +func (p *TBackendsMetadataParams) Field1DeepEqual(src *string) bool { + + if p.ClusterName == src { return true - } else if p.Size == nil || src == nil { + } else if p.ClusterName == nil || src == nil { return false } - if *p.Size != *src { + if strings.Compare(*p.ClusterName, *src) != 0 { return false } return true } -func (p *TFileRangeDesc) Field5DeepEqual(src int64) bool { - if p.FileSize != src { - return false - } - return true +type TFrontendsMetadataParams struct { + ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` } -func (p *TFileRangeDesc) Field6DeepEqual(src []string) bool { - if len(p.ColumnsFromPath) != len(src) { - return false - } - for i, v := range p.ColumnsFromPath { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false - } +func NewTFrontendsMetadataParams() *TFrontendsMetadataParams { + return &TFrontendsMetadataParams{} +} + +func (p *TFrontendsMetadataParams) InitDefault() { +} + +var TFrontendsMetadataParams_ClusterName_DEFAULT string + +func (p *TFrontendsMetadataParams) GetClusterName() (v string) { + if !p.IsSetClusterName() { + return TFrontendsMetadataParams_ClusterName_DEFAULT } - return true + return *p.ClusterName +} +func (p *TFrontendsMetadataParams) SetClusterName(val *string) { + p.ClusterName = val } -func (p *TFileRangeDesc) Field7DeepEqual(src []string) bool { - if len(p.ColumnsFromPathKeys) != len(src) { - return false +var fieldIDToName_TFrontendsMetadataParams = map[int16]string{ + 1: "cluster_name", +} + +func (p *TFrontendsMetadataParams) IsSetClusterName() bool { + return p.ClusterName != nil +} + +func (p *TFrontendsMetadataParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - for i, v := range p.ColumnsFromPathKeys { - _src := src[i] - if strings.Compare(v, _src) != 0 { - return false + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError } } - return true + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendsMetadataParams[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileRangeDesc) Field8DeepEqual(src *TTableFormatFileDesc) bool { - if !p.TableFormatParams.DeepEqual(src) { - return false +func (p *TFrontendsMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v } - return true + p.ClusterName = _field + return nil } -func (p *TFileRangeDesc) Field9DeepEqual(src *int64) bool { - if p.ModificationTime == src { - return true - } else if p.ModificationTime == nil || src == nil { - return false +func (p *TFrontendsMetadataParams) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TFrontendsMetadataParams"); err != nil { + goto WriteStructBeginError } - if *p.ModificationTime != *src { - return false + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - return true + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFileRangeDesc) Field10DeepEqual(src *types.TFileType) bool { - if p.FileType == src { - return true - } else if p.FileType == nil || src == nil { - return false +func (p *TFrontendsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetClusterName() { + if err = oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ClusterName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if *p.FileType != *src { - return false + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TFrontendsMetadataParams) String() string { + if p == nil { + return "" } - return true + return fmt.Sprintf("TFrontendsMetadataParams(%+v)", *p) + } -func (p *TFileRangeDesc) Field11DeepEqual(src *TFileCompressType) bool { - if p.CompressType == src { +func (p *TFrontendsMetadataParams) DeepEqual(ano *TFrontendsMetadataParams) bool { + if p == ano { return true - } else if p.CompressType == nil || src == nil { + } else if p == nil || ano == nil { return false } - if *p.CompressType != *src { + if !p.Field1DeepEqual(ano.ClusterName) { return false } return true } -func (p *TFileRangeDesc) Field12DeepEqual(src *string) bool { - if p.FsName == src { +func (p *TFrontendsMetadataParams) Field1DeepEqual(src *string) bool { + + if p.ClusterName == src { return true - } else if p.FsName == nil || src == nil { + } else if p.ClusterName == nil || src == nil { return false } - if strings.Compare(*p.FsName, *src) != 0 { + if strings.Compare(*p.ClusterName, *src) != 0 { return false } return true } -type TFileScanRange struct { - Ranges []*TFileRangeDesc `thrift:"ranges,1,optional" frugal:"1,optional,list" json:"ranges,omitempty"` - Params *TFileScanRangeParams `thrift:"params,2,optional" frugal:"2,optional,TFileScanRangeParams" json:"params,omitempty"` +type TMaterializedViewsMetadataParams struct { + Database *string `thrift:"database,1,optional" frugal:"1,optional,string" json:"database,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,2,optional" frugal:"2,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` } -func NewTFileScanRange() *TFileScanRange { - return &TFileScanRange{} +func NewTMaterializedViewsMetadataParams() *TMaterializedViewsMetadataParams { + return &TMaterializedViewsMetadataParams{} } -func (p *TFileScanRange) InitDefault() { - *p = TFileScanRange{} +func (p *TMaterializedViewsMetadataParams) InitDefault() { } -var TFileScanRange_Ranges_DEFAULT []*TFileRangeDesc +var TMaterializedViewsMetadataParams_Database_DEFAULT string -func (p *TFileScanRange) GetRanges() (v []*TFileRangeDesc) { - if !p.IsSetRanges() { - return TFileScanRange_Ranges_DEFAULT +func (p *TMaterializedViewsMetadataParams) GetDatabase() (v string) { + if !p.IsSetDatabase() { + return TMaterializedViewsMetadataParams_Database_DEFAULT } - return p.Ranges + return *p.Database } -var TFileScanRange_Params_DEFAULT *TFileScanRangeParams +var TMaterializedViewsMetadataParams_CurrentUserIdent_DEFAULT *types.TUserIdentity -func (p *TFileScanRange) GetParams() (v *TFileScanRangeParams) { - if !p.IsSetParams() { - return TFileScanRange_Params_DEFAULT +func (p *TMaterializedViewsMetadataParams) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TMaterializedViewsMetadataParams_CurrentUserIdent_DEFAULT } - return p.Params + return p.CurrentUserIdent } -func (p *TFileScanRange) SetRanges(val []*TFileRangeDesc) { - p.Ranges = val +func (p *TMaterializedViewsMetadataParams) SetDatabase(val *string) { + p.Database = val } -func (p *TFileScanRange) SetParams(val *TFileScanRangeParams) { - p.Params = val +func (p *TMaterializedViewsMetadataParams) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val } -var fieldIDToName_TFileScanRange = map[int16]string{ - 1: "ranges", - 2: "params", +var fieldIDToName_TMaterializedViewsMetadataParams = map[int16]string{ + 1: "database", + 2: "current_user_ident", } -func (p *TFileScanRange) IsSetRanges() bool { - return p.Ranges != nil +func (p *TMaterializedViewsMetadataParams) IsSetDatabase() bool { + return p.Database != nil } -func (p *TFileScanRange) IsSetParams() bool { - return p.Params != nil +func (p *TMaterializedViewsMetadataParams) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil } -func (p *TFileScanRange) Read(iprot thrift.TProtocol) (err error) { +func (p *TMaterializedViewsMetadataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -14953,31 +20695,26 @@ func (p *TFileScanRange) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -14992,7 +20729,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRange[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMaterializedViewsMetadataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15002,37 +20739,29 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRange) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err - } - p.Ranges = make([]*TFileRangeDesc, 0, size) - for i := 0; i < size; i++ { - _elem := NewTFileRangeDesc() - if err := _elem.Read(iprot); err != nil { - return err - } +func (p *TMaterializedViewsMetadataParams) ReadField1(iprot thrift.TProtocol) error { - p.Ranges = append(p.Ranges, _elem) - } - if err := iprot.ReadListEnd(); err != nil { + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Database = _field return nil } - -func (p *TFileScanRange) ReadField2(iprot thrift.TProtocol) error { - p.Params = NewTFileScanRangeParams() - if err := p.Params.Read(iprot); err != nil { +func (p *TMaterializedViewsMetadataParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } -func (p *TFileScanRange) Write(oprot thrift.TProtocol) (err error) { +func (p *TMaterializedViewsMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TFileScanRange"); err != nil { + if err = oprot.WriteStructBegin("TMaterializedViewsMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15044,7 +20773,6 @@ func (p *TFileScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15063,20 +20791,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFileScanRange) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetRanges() { - if err = oprot.WriteFieldBegin("ranges", thrift.LIST, 1); err != nil { +func (p *TMaterializedViewsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDatabase() { + if err = oprot.WriteFieldBegin("database", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Ranges)); err != nil { - return err - } - for _, v := range p.Ranges { - if err := v.Write(oprot); err != nil { - return err - } - } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteString(*p.Database); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15090,12 +20810,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TFileScanRange) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetParams() { - if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 2); err != nil { +func (p *TMaterializedViewsMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := p.Params.Write(oprot); err != nil { + if err := p.CurrentUserIdent.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15109,82 +20829,117 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TFileScanRange) String() string { +func (p *TMaterializedViewsMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TFileScanRange(%+v)", *p) + return fmt.Sprintf("TMaterializedViewsMetadataParams(%+v)", *p) + } -func (p *TFileScanRange) DeepEqual(ano *TFileScanRange) bool { +func (p *TMaterializedViewsMetadataParams) DeepEqual(ano *TMaterializedViewsMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.Ranges) { + if !p.Field1DeepEqual(ano.Database) { return false } - if !p.Field2DeepEqual(ano.Params) { + if !p.Field2DeepEqual(ano.CurrentUserIdent) { return false } return true } -func (p *TFileScanRange) Field1DeepEqual(src []*TFileRangeDesc) bool { +func (p *TMaterializedViewsMetadataParams) Field1DeepEqual(src *string) bool { - if len(p.Ranges) != len(src) { + if p.Database == src { + return true + } else if p.Database == nil || src == nil { return false } - for i, v := range p.Ranges { - _src := src[i] - if !v.DeepEqual(_src) { - return false - } + if strings.Compare(*p.Database, *src) != 0 { + return false } return true } -func (p *TFileScanRange) Field2DeepEqual(src *TFileScanRangeParams) bool { +func (p *TMaterializedViewsMetadataParams) Field2DeepEqual(src *types.TUserIdentity) bool { - if !p.Params.DeepEqual(src) { + if !p.CurrentUserIdent.DeepEqual(src) { return false } return true } -type TExternalScanRange struct { - FileScanRange *TFileScanRange `thrift:"file_scan_range,1,optional" frugal:"1,optional,TFileScanRange" json:"file_scan_range,omitempty"` +type TPartitionsMetadataParams struct { + Catalog *string `thrift:"catalog,1,optional" frugal:"1,optional,string" json:"catalog,omitempty"` + Database *string `thrift:"database,2,optional" frugal:"2,optional,string" json:"database,omitempty"` + Table *string `thrift:"table,3,optional" frugal:"3,optional,string" json:"table,omitempty"` } -func NewTExternalScanRange() *TExternalScanRange { - return &TExternalScanRange{} +func NewTPartitionsMetadataParams() *TPartitionsMetadataParams { + return &TPartitionsMetadataParams{} } -func (p *TExternalScanRange) InitDefault() { - *p = TExternalScanRange{} +func (p *TPartitionsMetadataParams) InitDefault() { } -var TExternalScanRange_FileScanRange_DEFAULT *TFileScanRange +var TPartitionsMetadataParams_Catalog_DEFAULT string -func (p *TExternalScanRange) GetFileScanRange() (v *TFileScanRange) { - if !p.IsSetFileScanRange() { - return TExternalScanRange_FileScanRange_DEFAULT +func (p *TPartitionsMetadataParams) GetCatalog() (v string) { + if !p.IsSetCatalog() { + return TPartitionsMetadataParams_Catalog_DEFAULT } - return p.FileScanRange + return *p.Catalog } -func (p *TExternalScanRange) SetFileScanRange(val *TFileScanRange) { - p.FileScanRange = val + +var TPartitionsMetadataParams_Database_DEFAULT string + +func (p *TPartitionsMetadataParams) GetDatabase() (v string) { + if !p.IsSetDatabase() { + return TPartitionsMetadataParams_Database_DEFAULT + } + return *p.Database } -var fieldIDToName_TExternalScanRange = map[int16]string{ - 1: "file_scan_range", +var TPartitionsMetadataParams_Table_DEFAULT string + +func (p *TPartitionsMetadataParams) GetTable() (v string) { + if !p.IsSetTable() { + return TPartitionsMetadataParams_Table_DEFAULT + } + return *p.Table +} +func (p *TPartitionsMetadataParams) SetCatalog(val *string) { + p.Catalog = val +} +func (p *TPartitionsMetadataParams) SetDatabase(val *string) { + p.Database = val +} +func (p *TPartitionsMetadataParams) SetTable(val *string) { + p.Table = val } -func (p *TExternalScanRange) IsSetFileScanRange() bool { - return p.FileScanRange != nil +var fieldIDToName_TPartitionsMetadataParams = map[int16]string{ + 1: "catalog", + 2: "database", + 3: "table", } -func (p *TExternalScanRange) Read(iprot thrift.TProtocol) (err error) { +func (p *TPartitionsMetadataParams) IsSetCatalog() bool { + return p.Catalog != nil +} + +func (p *TPartitionsMetadataParams) IsSetDatabase() bool { + return p.Database != nil +} + +func (p *TPartitionsMetadataParams) IsSetTable() bool { + return p.Table != nil +} + +func (p *TPartitionsMetadataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15204,21 +20959,34 @@ func (p *TExternalScanRange) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15233,7 +21001,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExternalScanRange[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionsMetadataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15243,17 +21011,43 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TExternalScanRange) ReadField1(iprot thrift.TProtocol) error { - p.FileScanRange = NewTFileScanRange() - if err := p.FileScanRange.Read(iprot); err != nil { +func (p *TPartitionsMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err + } else { + _field = &v } + p.Catalog = _field return nil } +func (p *TPartitionsMetadataParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TExternalScanRange) Write(oprot thrift.TProtocol) (err error) { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Database = _field + return nil +} +func (p *TPartitionsMetadataParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Table = _field + return nil +} + +func (p *TPartitionsMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TExternalScanRange"); err != nil { + if err = oprot.WriteStructBegin("TPartitionsMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15261,7 +21055,14 @@ func (p *TExternalScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15280,12 +21081,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TExternalScanRange) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetFileScanRange() { - if err = oprot.WriteFieldBegin("file_scan_range", thrift.STRUCT, 1); err != nil { +func (p *TPartitionsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalog() { + if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := p.FileScanRange.Write(oprot); err != nil { + if err := oprot.WriteString(*p.Catalog); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15299,66 +21100,175 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TExternalScanRange) String() string { +func (p *TPartitionsMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDatabase() { + if err = oprot.WriteFieldBegin("database", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Database); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPartitionsMetadataParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Table); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPartitionsMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TExternalScanRange(%+v)", *p) + return fmt.Sprintf("TPartitionsMetadataParams(%+v)", *p) + } -func (p *TExternalScanRange) DeepEqual(ano *TExternalScanRange) bool { +func (p *TPartitionsMetadataParams) DeepEqual(ano *TPartitionsMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.FileScanRange) { + if !p.Field1DeepEqual(ano.Catalog) { + return false + } + if !p.Field2DeepEqual(ano.Database) { + return false + } + if !p.Field3DeepEqual(ano.Table) { return false } return true } -func (p *TExternalScanRange) Field1DeepEqual(src *TFileScanRange) bool { +func (p *TPartitionsMetadataParams) Field1DeepEqual(src *string) bool { - if !p.FileScanRange.DeepEqual(src) { + if p.Catalog == src { + return true + } else if p.Catalog == nil || src == nil { + return false + } + if strings.Compare(*p.Catalog, *src) != 0 { return false } return true } +func (p *TPartitionsMetadataParams) Field2DeepEqual(src *string) bool { -type TTVFNumbersScanRange struct { - TotalNumbers *int64 `thrift:"totalNumbers,1,optional" frugal:"1,optional,i64" json:"totalNumbers,omitempty"` + if p.Database == src { + return true + } else if p.Database == nil || src == nil { + return false + } + if strings.Compare(*p.Database, *src) != 0 { + return false + } + return true } +func (p *TPartitionsMetadataParams) Field3DeepEqual(src *string) bool { -func NewTTVFNumbersScanRange() *TTVFNumbersScanRange { - return &TTVFNumbersScanRange{} + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true } -func (p *TTVFNumbersScanRange) InitDefault() { - *p = TTVFNumbersScanRange{} +type TPartitionValuesMetadataParams struct { + Catalog *string `thrift:"catalog,1,optional" frugal:"1,optional,string" json:"catalog,omitempty"` + Database *string `thrift:"database,2,optional" frugal:"2,optional,string" json:"database,omitempty"` + Table *string `thrift:"table,3,optional" frugal:"3,optional,string" json:"table,omitempty"` } -var TTVFNumbersScanRange_TotalNumbers_DEFAULT int64 +func NewTPartitionValuesMetadataParams() *TPartitionValuesMetadataParams { + return &TPartitionValuesMetadataParams{} +} -func (p *TTVFNumbersScanRange) GetTotalNumbers() (v int64) { - if !p.IsSetTotalNumbers() { - return TTVFNumbersScanRange_TotalNumbers_DEFAULT +func (p *TPartitionValuesMetadataParams) InitDefault() { +} + +var TPartitionValuesMetadataParams_Catalog_DEFAULT string + +func (p *TPartitionValuesMetadataParams) GetCatalog() (v string) { + if !p.IsSetCatalog() { + return TPartitionValuesMetadataParams_Catalog_DEFAULT } - return *p.TotalNumbers + return *p.Catalog +} + +var TPartitionValuesMetadataParams_Database_DEFAULT string + +func (p *TPartitionValuesMetadataParams) GetDatabase() (v string) { + if !p.IsSetDatabase() { + return TPartitionValuesMetadataParams_Database_DEFAULT + } + return *p.Database +} + +var TPartitionValuesMetadataParams_Table_DEFAULT string + +func (p *TPartitionValuesMetadataParams) GetTable() (v string) { + if !p.IsSetTable() { + return TPartitionValuesMetadataParams_Table_DEFAULT + } + return *p.Table +} +func (p *TPartitionValuesMetadataParams) SetCatalog(val *string) { + p.Catalog = val +} +func (p *TPartitionValuesMetadataParams) SetDatabase(val *string) { + p.Database = val +} +func (p *TPartitionValuesMetadataParams) SetTable(val *string) { + p.Table = val } -func (p *TTVFNumbersScanRange) SetTotalNumbers(val *int64) { - p.TotalNumbers = val + +var fieldIDToName_TPartitionValuesMetadataParams = map[int16]string{ + 1: "catalog", + 2: "database", + 3: "table", } -var fieldIDToName_TTVFNumbersScanRange = map[int16]string{ - 1: "totalNumbers", +func (p *TPartitionValuesMetadataParams) IsSetCatalog() bool { + return p.Catalog != nil } -func (p *TTVFNumbersScanRange) IsSetTotalNumbers() bool { - return p.TotalNumbers != nil +func (p *TPartitionValuesMetadataParams) IsSetDatabase() bool { + return p.Database != nil } -func (p *TTVFNumbersScanRange) Read(iprot thrift.TProtocol) (err error) { +func (p *TPartitionValuesMetadataParams) IsSetTable() bool { + return p.Table != nil +} + +func (p *TPartitionValuesMetadataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15378,21 +21288,34 @@ func (p *TTVFNumbersScanRange) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15407,7 +21330,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTVFNumbersScanRange[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionValuesMetadataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15417,18 +21340,43 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTVFNumbersScanRange) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TPartitionValuesMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { return err } else { - p.TotalNumbers = &v + _field = &v } + p.Catalog = _field return nil } +func (p *TPartitionValuesMetadataParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TTVFNumbersScanRange) Write(oprot thrift.TProtocol) (err error) { + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Database = _field + return nil +} +func (p *TPartitionValuesMetadataParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Table = _field + return nil +} + +func (p *TPartitionValuesMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TTVFNumbersScanRange"); err != nil { + if err = oprot.WriteStructBegin("TPartitionValuesMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15436,7 +21384,14 @@ func (p *TTVFNumbersScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15455,12 +21410,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TTVFNumbersScanRange) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetTotalNumbers() { - if err = oprot.WriteFieldBegin("totalNumbers", thrift.I64, 1); err != nil { +func (p *TPartitionValuesMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalog() { + if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(*p.TotalNumbers); err != nil { + if err := oprot.WriteString(*p.Catalog); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15474,71 +21429,157 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TTVFNumbersScanRange) String() string { +func (p *TPartitionValuesMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDatabase() { + if err = oprot.WriteFieldBegin("database", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Database); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPartitionValuesMetadataParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTable() { + if err = oprot.WriteFieldBegin("table", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Table); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPartitionValuesMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TTVFNumbersScanRange(%+v)", *p) + return fmt.Sprintf("TPartitionValuesMetadataParams(%+v)", *p) + } -func (p *TTVFNumbersScanRange) DeepEqual(ano *TTVFNumbersScanRange) bool { +func (p *TPartitionValuesMetadataParams) DeepEqual(ano *TPartitionValuesMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.TotalNumbers) { + if !p.Field1DeepEqual(ano.Catalog) { + return false + } + if !p.Field2DeepEqual(ano.Database) { + return false + } + if !p.Field3DeepEqual(ano.Table) { return false } return true } -func (p *TTVFNumbersScanRange) Field1DeepEqual(src *int64) bool { +func (p *TPartitionValuesMetadataParams) Field1DeepEqual(src *string) bool { - if p.TotalNumbers == src { + if p.Catalog == src { return true - } else if p.TotalNumbers == nil || src == nil { + } else if p.Catalog == nil || src == nil { return false } - if *p.TotalNumbers != *src { + if strings.Compare(*p.Catalog, *src) != 0 { return false } return true } +func (p *TPartitionValuesMetadataParams) Field2DeepEqual(src *string) bool { -type TDataGenScanRange struct { - NumbersParams *TTVFNumbersScanRange `thrift:"numbers_params,1,optional" frugal:"1,optional,TTVFNumbersScanRange" json:"numbers_params,omitempty"` + if p.Database == src { + return true + } else if p.Database == nil || src == nil { + return false + } + if strings.Compare(*p.Database, *src) != 0 { + return false + } + return true } +func (p *TPartitionValuesMetadataParams) Field3DeepEqual(src *string) bool { -func NewTDataGenScanRange() *TDataGenScanRange { - return &TDataGenScanRange{} + if p.Table == src { + return true + } else if p.Table == nil || src == nil { + return false + } + if strings.Compare(*p.Table, *src) != 0 { + return false + } + return true } -func (p *TDataGenScanRange) InitDefault() { - *p = TDataGenScanRange{} +type TJobsMetadataParams struct { + Type *string `thrift:"type,1,optional" frugal:"1,optional,string" json:"type,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,2,optional" frugal:"2,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` } -var TDataGenScanRange_NumbersParams_DEFAULT *TTVFNumbersScanRange +func NewTJobsMetadataParams() *TJobsMetadataParams { + return &TJobsMetadataParams{} +} -func (p *TDataGenScanRange) GetNumbersParams() (v *TTVFNumbersScanRange) { - if !p.IsSetNumbersParams() { - return TDataGenScanRange_NumbersParams_DEFAULT +func (p *TJobsMetadataParams) InitDefault() { +} + +var TJobsMetadataParams_Type_DEFAULT string + +func (p *TJobsMetadataParams) GetType() (v string) { + if !p.IsSetType() { + return TJobsMetadataParams_Type_DEFAULT } - return p.NumbersParams + return *p.Type } -func (p *TDataGenScanRange) SetNumbersParams(val *TTVFNumbersScanRange) { - p.NumbersParams = val + +var TJobsMetadataParams_CurrentUserIdent_DEFAULT *types.TUserIdentity + +func (p *TJobsMetadataParams) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TJobsMetadataParams_CurrentUserIdent_DEFAULT + } + return p.CurrentUserIdent +} +func (p *TJobsMetadataParams) SetType(val *string) { + p.Type = val +} +func (p *TJobsMetadataParams) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val } -var fieldIDToName_TDataGenScanRange = map[int16]string{ - 1: "numbers_params", +var fieldIDToName_TJobsMetadataParams = map[int16]string{ + 1: "type", + 2: "current_user_ident", } -func (p *TDataGenScanRange) IsSetNumbersParams() bool { - return p.NumbersParams != nil +func (p *TJobsMetadataParams) IsSetType() bool { + return p.Type != nil } -func (p *TDataGenScanRange) Read(iprot thrift.TProtocol) (err error) { +func (p *TJobsMetadataParams) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil +} + +func (p *TJobsMetadataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15558,21 +21599,26 @@ func (p *TDataGenScanRange) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15587,7 +21633,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDataGenScanRange[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TJobsMetadataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15597,17 +21643,29 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TDataGenScanRange) ReadField1(iprot thrift.TProtocol) error { - p.NumbersParams = NewTTVFNumbersScanRange() - if err := p.NumbersParams.Read(iprot); err != nil { +func (p *TJobsMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Type = _field + return nil +} +func (p *TJobsMetadataParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } -func (p *TDataGenScanRange) Write(oprot thrift.TProtocol) (err error) { +func (p *TJobsMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TDataGenScanRange"); err != nil { + if err = oprot.WriteStructBegin("TJobsMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15615,7 +21673,10 @@ func (p *TDataGenScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15634,12 +21695,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TDataGenScanRange) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetNumbersParams() { - if err = oprot.WriteFieldBegin("numbers_params", thrift.STRUCT, 1); err != nil { +func (p *TJobsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err = oprot.WriteFieldBegin("type", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := p.NumbersParams.Write(oprot); err != nil { + if err := oprot.WriteString(*p.Type); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15653,120 +21714,118 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TDataGenScanRange) String() string { +func (p *TJobsMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.CurrentUserIdent.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TJobsMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TDataGenScanRange(%+v)", *p) + return fmt.Sprintf("TJobsMetadataParams(%+v)", *p) + } -func (p *TDataGenScanRange) DeepEqual(ano *TDataGenScanRange) bool { +func (p *TJobsMetadataParams) DeepEqual(ano *TJobsMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.NumbersParams) { + if !p.Field1DeepEqual(ano.Type) { + return false + } + if !p.Field2DeepEqual(ano.CurrentUserIdent) { return false } return true } -func (p *TDataGenScanRange) Field1DeepEqual(src *TTVFNumbersScanRange) bool { +func (p *TJobsMetadataParams) Field1DeepEqual(src *string) bool { - if !p.NumbersParams.DeepEqual(src) { + if p.Type == src { + return true + } else if p.Type == nil || src == nil { + return false + } + if strings.Compare(*p.Type, *src) != 0 { return false } return true } +func (p *TJobsMetadataParams) Field2DeepEqual(src *types.TUserIdentity) bool { -type TIcebergMetadataParams struct { - IcebergQueryType *types.TIcebergQueryType `thrift:"iceberg_query_type,1,optional" frugal:"1,optional,TIcebergQueryType" json:"iceberg_query_type,omitempty"` - Catalog *string `thrift:"catalog,2,optional" frugal:"2,optional,string" json:"catalog,omitempty"` - Database *string `thrift:"database,3,optional" frugal:"3,optional,string" json:"database,omitempty"` - Table *string `thrift:"table,4,optional" frugal:"4,optional,string" json:"table,omitempty"` -} - -func NewTIcebergMetadataParams() *TIcebergMetadataParams { - return &TIcebergMetadataParams{} + if !p.CurrentUserIdent.DeepEqual(src) { + return false + } + return true } -func (p *TIcebergMetadataParams) InitDefault() { - *p = TIcebergMetadataParams{} +type TTasksMetadataParams struct { + Type *string `thrift:"type,1,optional" frugal:"1,optional,string" json:"type,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,2,optional" frugal:"2,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` } -var TIcebergMetadataParams_IcebergQueryType_DEFAULT types.TIcebergQueryType - -func (p *TIcebergMetadataParams) GetIcebergQueryType() (v types.TIcebergQueryType) { - if !p.IsSetIcebergQueryType() { - return TIcebergMetadataParams_IcebergQueryType_DEFAULT - } - return *p.IcebergQueryType +func NewTTasksMetadataParams() *TTasksMetadataParams { + return &TTasksMetadataParams{} } -var TIcebergMetadataParams_Catalog_DEFAULT string - -func (p *TIcebergMetadataParams) GetCatalog() (v string) { - if !p.IsSetCatalog() { - return TIcebergMetadataParams_Catalog_DEFAULT - } - return *p.Catalog +func (p *TTasksMetadataParams) InitDefault() { } -var TIcebergMetadataParams_Database_DEFAULT string +var TTasksMetadataParams_Type_DEFAULT string -func (p *TIcebergMetadataParams) GetDatabase() (v string) { - if !p.IsSetDatabase() { - return TIcebergMetadataParams_Database_DEFAULT +func (p *TTasksMetadataParams) GetType() (v string) { + if !p.IsSetType() { + return TTasksMetadataParams_Type_DEFAULT } - return *p.Database + return *p.Type } -var TIcebergMetadataParams_Table_DEFAULT string +var TTasksMetadataParams_CurrentUserIdent_DEFAULT *types.TUserIdentity -func (p *TIcebergMetadataParams) GetTable() (v string) { - if !p.IsSetTable() { - return TIcebergMetadataParams_Table_DEFAULT +func (p *TTasksMetadataParams) GetCurrentUserIdent() (v *types.TUserIdentity) { + if !p.IsSetCurrentUserIdent() { + return TTasksMetadataParams_CurrentUserIdent_DEFAULT } - return *p.Table -} -func (p *TIcebergMetadataParams) SetIcebergQueryType(val *types.TIcebergQueryType) { - p.IcebergQueryType = val -} -func (p *TIcebergMetadataParams) SetCatalog(val *string) { - p.Catalog = val -} -func (p *TIcebergMetadataParams) SetDatabase(val *string) { - p.Database = val -} -func (p *TIcebergMetadataParams) SetTable(val *string) { - p.Table = val + return p.CurrentUserIdent } - -var fieldIDToName_TIcebergMetadataParams = map[int16]string{ - 1: "iceberg_query_type", - 2: "catalog", - 3: "database", - 4: "table", +func (p *TTasksMetadataParams) SetType(val *string) { + p.Type = val } - -func (p *TIcebergMetadataParams) IsSetIcebergQueryType() bool { - return p.IcebergQueryType != nil +func (p *TTasksMetadataParams) SetCurrentUserIdent(val *types.TUserIdentity) { + p.CurrentUserIdent = val } -func (p *TIcebergMetadataParams) IsSetCatalog() bool { - return p.Catalog != nil +var fieldIDToName_TTasksMetadataParams = map[int16]string{ + 1: "type", + 2: "current_user_ident", } -func (p *TIcebergMetadataParams) IsSetDatabase() bool { - return p.Database != nil +func (p *TTasksMetadataParams) IsSetType() bool { + return p.Type != nil } -func (p *TIcebergMetadataParams) IsSetTable() bool { - return p.Table != nil +func (p *TTasksMetadataParams) IsSetCurrentUserIdent() bool { + return p.CurrentUserIdent != nil } -func (p *TIcebergMetadataParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TTasksMetadataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15786,51 +21845,26 @@ func (p *TIcebergMetadataParams) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err = p.ReadField3(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err = p.ReadField4(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -15845,7 +21879,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergMetadataParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTasksMetadataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15855,46 +21889,29 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TIcebergMetadataParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return err - } else { - tmp := types.TIcebergQueryType(v) - p.IcebergQueryType = &tmp - } - return nil -} - -func (p *TIcebergMetadataParams) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.Catalog = &v - } - return nil -} +func (p *TTasksMetadataParams) ReadField1(iprot thrift.TProtocol) error { -func (p *TIcebergMetadataParams) ReadField3(iprot thrift.TProtocol) error { + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Database = &v + _field = &v } + p.Type = _field return nil } - -func (p *TIcebergMetadataParams) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *TTasksMetadataParams) ReadField2(iprot thrift.TProtocol) error { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err - } else { - p.Table = &v } + p.CurrentUserIdent = _field return nil } -func (p *TIcebergMetadataParams) Write(oprot thrift.TProtocol) (err error) { +func (p *TTasksMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TIcebergMetadataParams"); err != nil { + if err = oprot.WriteStructBegin("TTasksMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15906,15 +21923,6 @@ func (p *TIcebergMetadataParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - if err = p.writeField3(oprot); err != nil { - fieldId = 3 - goto WriteFieldError - } - if err = p.writeField4(oprot); err != nil { - fieldId = 4 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -15933,12 +21941,12 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TIcebergMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIcebergQueryType() { - if err = oprot.WriteFieldBegin("iceberg_query_type", thrift.I32, 1); err != nil { +func (p *TTasksMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err = oprot.WriteFieldBegin("type", thrift.STRING, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.IcebergQueryType)); err != nil { + if err := oprot.WriteString(*p.Type); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15952,12 +21960,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TIcebergMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetCatalog() { - if err = oprot.WriteFieldBegin("catalog", thrift.STRING, 2); err != nil { +func (p *TTasksMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentUserIdent() { + if err = oprot.WriteFieldBegin("current_user_ident", thrift.STRUCT, 2); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.Catalog); err != nil { + if err := p.CurrentUserIdent.Write(oprot); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -15971,154 +21979,189 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TIcebergMetadataParams) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetDatabase() { - if err = oprot.WriteFieldBegin("database", thrift.STRING, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Database); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) -} - -func (p *TIcebergMetadataParams) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTable() { - if err = oprot.WriteFieldBegin("table", thrift.STRING, 4); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.Table); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) -} - -func (p *TIcebergMetadataParams) String() string { +func (p *TTasksMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TIcebergMetadataParams(%+v)", *p) + return fmt.Sprintf("TTasksMetadataParams(%+v)", *p) + } -func (p *TIcebergMetadataParams) DeepEqual(ano *TIcebergMetadataParams) bool { +func (p *TTasksMetadataParams) DeepEqual(ano *TTasksMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.IcebergQueryType) { - return false - } - if !p.Field2DeepEqual(ano.Catalog) { - return false - } - if !p.Field3DeepEqual(ano.Database) { + if !p.Field1DeepEqual(ano.Type) { return false } - if !p.Field4DeepEqual(ano.Table) { + if !p.Field2DeepEqual(ano.CurrentUserIdent) { return false } return true } -func (p *TIcebergMetadataParams) Field1DeepEqual(src *types.TIcebergQueryType) bool { +func (p *TTasksMetadataParams) Field1DeepEqual(src *string) bool { - if p.IcebergQueryType == src { + if p.Type == src { return true - } else if p.IcebergQueryType == nil || src == nil { + } else if p.Type == nil || src == nil { return false } - if *p.IcebergQueryType != *src { + if strings.Compare(*p.Type, *src) != 0 { return false } return true } -func (p *TIcebergMetadataParams) Field2DeepEqual(src *string) bool { +func (p *TTasksMetadataParams) Field2DeepEqual(src *types.TUserIdentity) bool { - if p.Catalog == src { - return true - } else if p.Catalog == nil || src == nil { - return false - } - if strings.Compare(*p.Catalog, *src) != 0 { + if !p.CurrentUserIdent.DeepEqual(src) { return false } return true } -func (p *TIcebergMetadataParams) Field3DeepEqual(src *string) bool { - if p.Database == src { - return true - } else if p.Database == nil || src == nil { - return false +type TQueriesMetadataParams struct { + ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` + RelayToOtherFe *bool `thrift:"relay_to_other_fe,2,optional" frugal:"2,optional,bool" json:"relay_to_other_fe,omitempty"` + MaterializedViewsParams *TMaterializedViewsMetadataParams `thrift:"materialized_views_params,3,optional" frugal:"3,optional,TMaterializedViewsMetadataParams" json:"materialized_views_params,omitempty"` + JobsParams *TJobsMetadataParams `thrift:"jobs_params,4,optional" frugal:"4,optional,TJobsMetadataParams" json:"jobs_params,omitempty"` + TasksParams *TTasksMetadataParams `thrift:"tasks_params,5,optional" frugal:"5,optional,TTasksMetadataParams" json:"tasks_params,omitempty"` + PartitionsParams *TPartitionsMetadataParams `thrift:"partitions_params,6,optional" frugal:"6,optional,TPartitionsMetadataParams" json:"partitions_params,omitempty"` + PartitionValuesParams *TPartitionValuesMetadataParams `thrift:"partition_values_params,7,optional" frugal:"7,optional,TPartitionValuesMetadataParams" json:"partition_values_params,omitempty"` +} + +func NewTQueriesMetadataParams() *TQueriesMetadataParams { + return &TQueriesMetadataParams{} +} + +func (p *TQueriesMetadataParams) InitDefault() { +} + +var TQueriesMetadataParams_ClusterName_DEFAULT string + +func (p *TQueriesMetadataParams) GetClusterName() (v string) { + if !p.IsSetClusterName() { + return TQueriesMetadataParams_ClusterName_DEFAULT } - if strings.Compare(*p.Database, *src) != 0 { - return false + return *p.ClusterName +} + +var TQueriesMetadataParams_RelayToOtherFe_DEFAULT bool + +func (p *TQueriesMetadataParams) GetRelayToOtherFe() (v bool) { + if !p.IsSetRelayToOtherFe() { + return TQueriesMetadataParams_RelayToOtherFe_DEFAULT } - return true + return *p.RelayToOtherFe } -func (p *TIcebergMetadataParams) Field4DeepEqual(src *string) bool { - if p.Table == src { - return true - } else if p.Table == nil || src == nil { - return false +var TQueriesMetadataParams_MaterializedViewsParams_DEFAULT *TMaterializedViewsMetadataParams + +func (p *TQueriesMetadataParams) GetMaterializedViewsParams() (v *TMaterializedViewsMetadataParams) { + if !p.IsSetMaterializedViewsParams() { + return TQueriesMetadataParams_MaterializedViewsParams_DEFAULT } - if strings.Compare(*p.Table, *src) != 0 { - return false + return p.MaterializedViewsParams +} + +var TQueriesMetadataParams_JobsParams_DEFAULT *TJobsMetadataParams + +func (p *TQueriesMetadataParams) GetJobsParams() (v *TJobsMetadataParams) { + if !p.IsSetJobsParams() { + return TQueriesMetadataParams_JobsParams_DEFAULT } - return true + return p.JobsParams } -type TBackendsMetadataParams struct { - ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` +var TQueriesMetadataParams_TasksParams_DEFAULT *TTasksMetadataParams + +func (p *TQueriesMetadataParams) GetTasksParams() (v *TTasksMetadataParams) { + if !p.IsSetTasksParams() { + return TQueriesMetadataParams_TasksParams_DEFAULT + } + return p.TasksParams } -func NewTBackendsMetadataParams() *TBackendsMetadataParams { - return &TBackendsMetadataParams{} +var TQueriesMetadataParams_PartitionsParams_DEFAULT *TPartitionsMetadataParams + +func (p *TQueriesMetadataParams) GetPartitionsParams() (v *TPartitionsMetadataParams) { + if !p.IsSetPartitionsParams() { + return TQueriesMetadataParams_PartitionsParams_DEFAULT + } + return p.PartitionsParams } -func (p *TBackendsMetadataParams) InitDefault() { - *p = TBackendsMetadataParams{} +var TQueriesMetadataParams_PartitionValuesParams_DEFAULT *TPartitionValuesMetadataParams + +func (p *TQueriesMetadataParams) GetPartitionValuesParams() (v *TPartitionValuesMetadataParams) { + if !p.IsSetPartitionValuesParams() { + return TQueriesMetadataParams_PartitionValuesParams_DEFAULT + } + return p.PartitionValuesParams +} +func (p *TQueriesMetadataParams) SetClusterName(val *string) { + p.ClusterName = val +} +func (p *TQueriesMetadataParams) SetRelayToOtherFe(val *bool) { + p.RelayToOtherFe = val +} +func (p *TQueriesMetadataParams) SetMaterializedViewsParams(val *TMaterializedViewsMetadataParams) { + p.MaterializedViewsParams = val +} +func (p *TQueriesMetadataParams) SetJobsParams(val *TJobsMetadataParams) { + p.JobsParams = val +} +func (p *TQueriesMetadataParams) SetTasksParams(val *TTasksMetadataParams) { + p.TasksParams = val +} +func (p *TQueriesMetadataParams) SetPartitionsParams(val *TPartitionsMetadataParams) { + p.PartitionsParams = val +} +func (p *TQueriesMetadataParams) SetPartitionValuesParams(val *TPartitionValuesMetadataParams) { + p.PartitionValuesParams = val } -var TBackendsMetadataParams_ClusterName_DEFAULT string +var fieldIDToName_TQueriesMetadataParams = map[int16]string{ + 1: "cluster_name", + 2: "relay_to_other_fe", + 3: "materialized_views_params", + 4: "jobs_params", + 5: "tasks_params", + 6: "partitions_params", + 7: "partition_values_params", +} + +func (p *TQueriesMetadataParams) IsSetClusterName() bool { + return p.ClusterName != nil +} + +func (p *TQueriesMetadataParams) IsSetRelayToOtherFe() bool { + return p.RelayToOtherFe != nil +} + +func (p *TQueriesMetadataParams) IsSetMaterializedViewsParams() bool { + return p.MaterializedViewsParams != nil +} -func (p *TBackendsMetadataParams) GetClusterName() (v string) { - if !p.IsSetClusterName() { - return TBackendsMetadataParams_ClusterName_DEFAULT - } - return *p.ClusterName +func (p *TQueriesMetadataParams) IsSetJobsParams() bool { + return p.JobsParams != nil } -func (p *TBackendsMetadataParams) SetClusterName(val *string) { - p.ClusterName = val + +func (p *TQueriesMetadataParams) IsSetTasksParams() bool { + return p.TasksParams != nil } -var fieldIDToName_TBackendsMetadataParams = map[int16]string{ - 1: "cluster_name", +func (p *TQueriesMetadataParams) IsSetPartitionsParams() bool { + return p.PartitionsParams != nil } -func (p *TBackendsMetadataParams) IsSetClusterName() bool { - return p.ClusterName != nil +func (p *TQueriesMetadataParams) IsSetPartitionValuesParams() bool { + return p.PartitionValuesParams != nil } -func (p *TBackendsMetadataParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TQueriesMetadataParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16142,17 +22185,62 @@ func (p *TBackendsMetadataParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -16167,7 +22255,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendsMetadataParams[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueriesMetadataParams[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16177,18 +22265,72 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBackendsMetadataParams) ReadField1(iprot thrift.TProtocol) error { +func (p *TQueriesMetadataParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ClusterName = &v + _field = &v } + p.ClusterName = _field return nil } +func (p *TQueriesMetadataParams) ReadField2(iprot thrift.TProtocol) error { -func (p *TBackendsMetadataParams) Write(oprot thrift.TProtocol) (err error) { + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.RelayToOtherFe = _field + return nil +} +func (p *TQueriesMetadataParams) ReadField3(iprot thrift.TProtocol) error { + _field := NewTMaterializedViewsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.MaterializedViewsParams = _field + return nil +} +func (p *TQueriesMetadataParams) ReadField4(iprot thrift.TProtocol) error { + _field := NewTJobsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.JobsParams = _field + return nil +} +func (p *TQueriesMetadataParams) ReadField5(iprot thrift.TProtocol) error { + _field := NewTTasksMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.TasksParams = _field + return nil +} +func (p *TQueriesMetadataParams) ReadField6(iprot thrift.TProtocol) error { + _field := NewTPartitionsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.PartitionsParams = _field + return nil +} +func (p *TQueriesMetadataParams) ReadField7(iprot thrift.TProtocol) error { + _field := NewTPartitionValuesMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.PartitionValuesParams = _field + return nil +} + +func (p *TQueriesMetadataParams) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TBackendsMetadataParams"); err != nil { + if err = oprot.WriteStructBegin("TQueriesMetadataParams"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16196,7 +22338,30 @@ func (p *TBackendsMetadataParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16215,7 +22380,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TBackendsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TQueriesMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetClusterName() { if err = oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { goto WriteFieldBeginError @@ -16234,14 +22399,129 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TBackendsMetadataParams) String() string { +func (p *TQueriesMetadataParams) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetRelayToOtherFe() { + if err = oprot.WriteFieldBegin("relay_to_other_fe", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.RelayToOtherFe); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TQueriesMetadataParams) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetMaterializedViewsParams() { + if err = oprot.WriteFieldBegin("materialized_views_params", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.MaterializedViewsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TQueriesMetadataParams) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetJobsParams() { + if err = oprot.WriteFieldBegin("jobs_params", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.JobsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueriesMetadataParams) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetTasksParams() { + if err = oprot.WriteFieldBegin("tasks_params", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.TasksParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TQueriesMetadataParams) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionsParams() { + if err = oprot.WriteFieldBegin("partitions_params", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.PartitionsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TQueriesMetadataParams) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionValuesParams() { + if err = oprot.WriteFieldBegin("partition_values_params", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.PartitionValuesParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TQueriesMetadataParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TBackendsMetadataParams(%+v)", *p) + return fmt.Sprintf("TQueriesMetadataParams(%+v)", *p) + } -func (p *TBackendsMetadataParams) DeepEqual(ano *TBackendsMetadataParams) bool { +func (p *TQueriesMetadataParams) DeepEqual(ano *TQueriesMetadataParams) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16250,10 +22530,28 @@ func (p *TBackendsMetadataParams) DeepEqual(ano *TBackendsMetadataParams) bool { if !p.Field1DeepEqual(ano.ClusterName) { return false } + if !p.Field2DeepEqual(ano.RelayToOtherFe) { + return false + } + if !p.Field3DeepEqual(ano.MaterializedViewsParams) { + return false + } + if !p.Field4DeepEqual(ano.JobsParams) { + return false + } + if !p.Field5DeepEqual(ano.TasksParams) { + return false + } + if !p.Field6DeepEqual(ano.PartitionsParams) { + return false + } + if !p.Field7DeepEqual(ano.PartitionValuesParams) { + return false + } return true } -func (p *TBackendsMetadataParams) Field1DeepEqual(src *string) bool { +func (p *TQueriesMetadataParams) Field1DeepEqual(src *string) bool { if p.ClusterName == src { return true @@ -16265,40 +22563,67 @@ func (p *TBackendsMetadataParams) Field1DeepEqual(src *string) bool { } return true } +func (p *TQueriesMetadataParams) Field2DeepEqual(src *bool) bool { -type TFrontendsMetadataParams struct { - ClusterName *string `thrift:"cluster_name,1,optional" frugal:"1,optional,string" json:"cluster_name,omitempty"` + if p.RelayToOtherFe == src { + return true + } else if p.RelayToOtherFe == nil || src == nil { + return false + } + if *p.RelayToOtherFe != *src { + return false + } + return true } +func (p *TQueriesMetadataParams) Field3DeepEqual(src *TMaterializedViewsMetadataParams) bool { -func NewTFrontendsMetadataParams() *TFrontendsMetadataParams { - return &TFrontendsMetadataParams{} + if !p.MaterializedViewsParams.DeepEqual(src) { + return false + } + return true } +func (p *TQueriesMetadataParams) Field4DeepEqual(src *TJobsMetadataParams) bool { -func (p *TFrontendsMetadataParams) InitDefault() { - *p = TFrontendsMetadataParams{} + if !p.JobsParams.DeepEqual(src) { + return false + } + return true } +func (p *TQueriesMetadataParams) Field5DeepEqual(src *TTasksMetadataParams) bool { -var TFrontendsMetadataParams_ClusterName_DEFAULT string + if !p.TasksParams.DeepEqual(src) { + return false + } + return true +} +func (p *TQueriesMetadataParams) Field6DeepEqual(src *TPartitionsMetadataParams) bool { -func (p *TFrontendsMetadataParams) GetClusterName() (v string) { - if !p.IsSetClusterName() { - return TFrontendsMetadataParams_ClusterName_DEFAULT + if !p.PartitionsParams.DeepEqual(src) { + return false } - return *p.ClusterName + return true } -func (p *TFrontendsMetadataParams) SetClusterName(val *string) { - p.ClusterName = val +func (p *TQueriesMetadataParams) Field7DeepEqual(src *TPartitionValuesMetadataParams) bool { + + if !p.PartitionValuesParams.DeepEqual(src) { + return false + } + return true } -var fieldIDToName_TFrontendsMetadataParams = map[int16]string{ - 1: "cluster_name", +type TMetaCacheStatsParams struct { } -func (p *TFrontendsMetadataParams) IsSetClusterName() bool { - return p.ClusterName != nil +func NewTMetaCacheStatsParams() *TMetaCacheStatsParams { + return &TMetaCacheStatsParams{} } -func (p *TFrontendsMetadataParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TMetaCacheStatsParams) InitDefault() { +} + +var fieldIDToName_TMetaCacheStatsParams = map[int16]string{} + +func (p *TMetaCacheStatsParams) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16315,24 +22640,9 @@ func (p *TFrontendsMetadataParams) Read(iprot thrift.TProtocol) (err error) { if fieldTypeId == thrift.STOP { break } - - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err = p.ReadField1(iprot); err != nil { - goto ReadFieldError - } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } - } - default: - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldTypeError } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -16346,10 +22656,8 @@ ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendsMetadataParams[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +SkipFieldTypeError: + return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err) ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) @@ -16357,26 +22665,11 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFrontendsMetadataParams) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return err - } else { - p.ClusterName = &v - } - return nil -} - -func (p *TFrontendsMetadataParams) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TFrontendsMetadataParams"); err != nil { +func (p *TMetaCacheStatsParams) Write(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteStructBegin("TMetaCacheStatsParams"); err != nil { goto WriteStructBeginError } if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError - } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16387,70 +22680,41 @@ func (p *TFrontendsMetadataParams) Write(oprot thrift.TProtocol) (err error) { return nil WriteStructBeginError: return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) WriteFieldStopError: return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TFrontendsMetadataParams) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetClusterName() { - if err = oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.ClusterName); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } - } - return nil -WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) -WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) -} - -func (p *TFrontendsMetadataParams) String() string { +func (p *TMetaCacheStatsParams) String() string { if p == nil { return "" } - return fmt.Sprintf("TFrontendsMetadataParams(%+v)", *p) + return fmt.Sprintf("TMetaCacheStatsParams(%+v)", *p) + } -func (p *TFrontendsMetadataParams) DeepEqual(ano *TFrontendsMetadataParams) bool { +func (p *TMetaCacheStatsParams) DeepEqual(ano *TMetaCacheStatsParams) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.ClusterName) { - return false - } - return true -} - -func (p *TFrontendsMetadataParams) Field1DeepEqual(src *string) bool { - - if p.ClusterName == src { - return true - } else if p.ClusterName == nil || src == nil { - return false - } - if strings.Compare(*p.ClusterName, *src) != 0 { - return false - } return true } type TMetaScanRange struct { - MetadataType *types.TMetadataType `thrift:"metadata_type,1,optional" frugal:"1,optional,TMetadataType" json:"metadata_type,omitempty"` - IcebergParams *TIcebergMetadataParams `thrift:"iceberg_params,2,optional" frugal:"2,optional,TIcebergMetadataParams" json:"iceberg_params,omitempty"` - BackendsParams *TBackendsMetadataParams `thrift:"backends_params,3,optional" frugal:"3,optional,TBackendsMetadataParams" json:"backends_params,omitempty"` - FrontendsParams *TFrontendsMetadataParams `thrift:"frontends_params,4,optional" frugal:"4,optional,TFrontendsMetadataParams" json:"frontends_params,omitempty"` + MetadataType *types.TMetadataType `thrift:"metadata_type,1,optional" frugal:"1,optional,TMetadataType" json:"metadata_type,omitempty"` + IcebergParams *TIcebergMetadataParams `thrift:"iceberg_params,2,optional" frugal:"2,optional,TIcebergMetadataParams" json:"iceberg_params,omitempty"` + BackendsParams *TBackendsMetadataParams `thrift:"backends_params,3,optional" frugal:"3,optional,TBackendsMetadataParams" json:"backends_params,omitempty"` + FrontendsParams *TFrontendsMetadataParams `thrift:"frontends_params,4,optional" frugal:"4,optional,TFrontendsMetadataParams" json:"frontends_params,omitempty"` + QueriesParams *TQueriesMetadataParams `thrift:"queries_params,5,optional" frugal:"5,optional,TQueriesMetadataParams" json:"queries_params,omitempty"` + MaterializedViewsParams *TMaterializedViewsMetadataParams `thrift:"materialized_views_params,6,optional" frugal:"6,optional,TMaterializedViewsMetadataParams" json:"materialized_views_params,omitempty"` + JobsParams *TJobsMetadataParams `thrift:"jobs_params,7,optional" frugal:"7,optional,TJobsMetadataParams" json:"jobs_params,omitempty"` + TasksParams *TTasksMetadataParams `thrift:"tasks_params,8,optional" frugal:"8,optional,TTasksMetadataParams" json:"tasks_params,omitempty"` + PartitionsParams *TPartitionsMetadataParams `thrift:"partitions_params,9,optional" frugal:"9,optional,TPartitionsMetadataParams" json:"partitions_params,omitempty"` + MetaCacheStatsParams *TMetaCacheStatsParams `thrift:"meta_cache_stats_params,10,optional" frugal:"10,optional,TMetaCacheStatsParams" json:"meta_cache_stats_params,omitempty"` + PartitionValuesParams *TPartitionValuesMetadataParams `thrift:"partition_values_params,11,optional" frugal:"11,optional,TPartitionValuesMetadataParams" json:"partition_values_params,omitempty"` } func NewTMetaScanRange() *TMetaScanRange { @@ -16458,7 +22722,6 @@ func NewTMetaScanRange() *TMetaScanRange { } func (p *TMetaScanRange) InitDefault() { - *p = TMetaScanRange{} } var TMetaScanRange_MetadataType_DEFAULT types.TMetadataType @@ -16496,6 +22759,69 @@ func (p *TMetaScanRange) GetFrontendsParams() (v *TFrontendsMetadataParams) { } return p.FrontendsParams } + +var TMetaScanRange_QueriesParams_DEFAULT *TQueriesMetadataParams + +func (p *TMetaScanRange) GetQueriesParams() (v *TQueriesMetadataParams) { + if !p.IsSetQueriesParams() { + return TMetaScanRange_QueriesParams_DEFAULT + } + return p.QueriesParams +} + +var TMetaScanRange_MaterializedViewsParams_DEFAULT *TMaterializedViewsMetadataParams + +func (p *TMetaScanRange) GetMaterializedViewsParams() (v *TMaterializedViewsMetadataParams) { + if !p.IsSetMaterializedViewsParams() { + return TMetaScanRange_MaterializedViewsParams_DEFAULT + } + return p.MaterializedViewsParams +} + +var TMetaScanRange_JobsParams_DEFAULT *TJobsMetadataParams + +func (p *TMetaScanRange) GetJobsParams() (v *TJobsMetadataParams) { + if !p.IsSetJobsParams() { + return TMetaScanRange_JobsParams_DEFAULT + } + return p.JobsParams +} + +var TMetaScanRange_TasksParams_DEFAULT *TTasksMetadataParams + +func (p *TMetaScanRange) GetTasksParams() (v *TTasksMetadataParams) { + if !p.IsSetTasksParams() { + return TMetaScanRange_TasksParams_DEFAULT + } + return p.TasksParams +} + +var TMetaScanRange_PartitionsParams_DEFAULT *TPartitionsMetadataParams + +func (p *TMetaScanRange) GetPartitionsParams() (v *TPartitionsMetadataParams) { + if !p.IsSetPartitionsParams() { + return TMetaScanRange_PartitionsParams_DEFAULT + } + return p.PartitionsParams +} + +var TMetaScanRange_MetaCacheStatsParams_DEFAULT *TMetaCacheStatsParams + +func (p *TMetaScanRange) GetMetaCacheStatsParams() (v *TMetaCacheStatsParams) { + if !p.IsSetMetaCacheStatsParams() { + return TMetaScanRange_MetaCacheStatsParams_DEFAULT + } + return p.MetaCacheStatsParams +} + +var TMetaScanRange_PartitionValuesParams_DEFAULT *TPartitionValuesMetadataParams + +func (p *TMetaScanRange) GetPartitionValuesParams() (v *TPartitionValuesMetadataParams) { + if !p.IsSetPartitionValuesParams() { + return TMetaScanRange_PartitionValuesParams_DEFAULT + } + return p.PartitionValuesParams +} func (p *TMetaScanRange) SetMetadataType(val *types.TMetadataType) { p.MetadataType = val } @@ -16508,12 +22834,40 @@ func (p *TMetaScanRange) SetBackendsParams(val *TBackendsMetadataParams) { func (p *TMetaScanRange) SetFrontendsParams(val *TFrontendsMetadataParams) { p.FrontendsParams = val } +func (p *TMetaScanRange) SetQueriesParams(val *TQueriesMetadataParams) { + p.QueriesParams = val +} +func (p *TMetaScanRange) SetMaterializedViewsParams(val *TMaterializedViewsMetadataParams) { + p.MaterializedViewsParams = val +} +func (p *TMetaScanRange) SetJobsParams(val *TJobsMetadataParams) { + p.JobsParams = val +} +func (p *TMetaScanRange) SetTasksParams(val *TTasksMetadataParams) { + p.TasksParams = val +} +func (p *TMetaScanRange) SetPartitionsParams(val *TPartitionsMetadataParams) { + p.PartitionsParams = val +} +func (p *TMetaScanRange) SetMetaCacheStatsParams(val *TMetaCacheStatsParams) { + p.MetaCacheStatsParams = val +} +func (p *TMetaScanRange) SetPartitionValuesParams(val *TPartitionValuesMetadataParams) { + p.PartitionValuesParams = val +} var fieldIDToName_TMetaScanRange = map[int16]string{ - 1: "metadata_type", - 2: "iceberg_params", - 3: "backends_params", - 4: "frontends_params", + 1: "metadata_type", + 2: "iceberg_params", + 3: "backends_params", + 4: "frontends_params", + 5: "queries_params", + 6: "materialized_views_params", + 7: "jobs_params", + 8: "tasks_params", + 9: "partitions_params", + 10: "meta_cache_stats_params", + 11: "partition_values_params", } func (p *TMetaScanRange) IsSetMetadataType() bool { @@ -16532,6 +22886,34 @@ func (p *TMetaScanRange) IsSetFrontendsParams() bool { return p.FrontendsParams != nil } +func (p *TMetaScanRange) IsSetQueriesParams() bool { + return p.QueriesParams != nil +} + +func (p *TMetaScanRange) IsSetMaterializedViewsParams() bool { + return p.MaterializedViewsParams != nil +} + +func (p *TMetaScanRange) IsSetJobsParams() bool { + return p.JobsParams != nil +} + +func (p *TMetaScanRange) IsSetTasksParams() bool { + return p.TasksParams != nil +} + +func (p *TMetaScanRange) IsSetPartitionsParams() bool { + return p.PartitionsParams != nil +} + +func (p *TMetaScanRange) IsSetMetaCacheStatsParams() bool { + return p.MetaCacheStatsParams != nil +} + +func (p *TMetaScanRange) IsSetPartitionValuesParams() bool { + return p.PartitionValuesParams != nil +} + func (p *TMetaScanRange) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -16556,47 +22938,94 @@ func (p *TMetaScanRange) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -16622,36 +23051,95 @@ ReadStructEndError: } func (p *TMetaScanRange) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TMetadataType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TMetadataType(v) - p.MetadataType = &tmp + _field = &tmp } + p.MetadataType = _field return nil } - func (p *TMetaScanRange) ReadField2(iprot thrift.TProtocol) error { - p.IcebergParams = NewTIcebergMetadataParams() - if err := p.IcebergParams.Read(iprot); err != nil { + _field := NewTIcebergMetadataParams() + if err := _field.Read(iprot); err != nil { return err } + p.IcebergParams = _field return nil } - func (p *TMetaScanRange) ReadField3(iprot thrift.TProtocol) error { - p.BackendsParams = NewTBackendsMetadataParams() - if err := p.BackendsParams.Read(iprot); err != nil { + _field := NewTBackendsMetadataParams() + if err := _field.Read(iprot); err != nil { return err } + p.BackendsParams = _field return nil } - func (p *TMetaScanRange) ReadField4(iprot thrift.TProtocol) error { - p.FrontendsParams = NewTFrontendsMetadataParams() - if err := p.FrontendsParams.Read(iprot); err != nil { + _field := NewTFrontendsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.FrontendsParams = _field + return nil +} +func (p *TMetaScanRange) ReadField5(iprot thrift.TProtocol) error { + _field := NewTQueriesMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueriesParams = _field + return nil +} +func (p *TMetaScanRange) ReadField6(iprot thrift.TProtocol) error { + _field := NewTMaterializedViewsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.MaterializedViewsParams = _field + return nil +} +func (p *TMetaScanRange) ReadField7(iprot thrift.TProtocol) error { + _field := NewTJobsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.JobsParams = _field + return nil +} +func (p *TMetaScanRange) ReadField8(iprot thrift.TProtocol) error { + _field := NewTTasksMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.TasksParams = _field + return nil +} +func (p *TMetaScanRange) ReadField9(iprot thrift.TProtocol) error { + _field := NewTPartitionsMetadataParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.PartitionsParams = _field + return nil +} +func (p *TMetaScanRange) ReadField10(iprot thrift.TProtocol) error { + _field := NewTMetaCacheStatsParams() + if err := _field.Read(iprot); err != nil { + return err + } + p.MetaCacheStatsParams = _field + return nil +} +func (p *TMetaScanRange) ReadField11(iprot thrift.TProtocol) error { + _field := NewTPartitionValuesMetadataParams() + if err := _field.Read(iprot); err != nil { return err } + p.PartitionValuesParams = _field return nil } @@ -16677,7 +23165,34 @@ func (p *TMetaScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -16772,11 +23287,145 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } +func (p *TMetaScanRange) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetQueriesParams() { + if err = oprot.WriteFieldBegin("queries_params", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.QueriesParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TMetaScanRange) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMaterializedViewsParams() { + if err = oprot.WriteFieldBegin("materialized_views_params", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.MaterializedViewsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TMetaScanRange) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetJobsParams() { + if err = oprot.WriteFieldBegin("jobs_params", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.JobsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TMetaScanRange) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTasksParams() { + if err = oprot.WriteFieldBegin("tasks_params", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.TasksParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TMetaScanRange) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionsParams() { + if err = oprot.WriteFieldBegin("partitions_params", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.PartitionsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TMetaScanRange) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetMetaCacheStatsParams() { + if err = oprot.WriteFieldBegin("meta_cache_stats_params", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.MetaCacheStatsParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TMetaScanRange) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionValuesParams() { + if err = oprot.WriteFieldBegin("partition_values_params", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.PartitionValuesParams.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + func (p *TMetaScanRange) String() string { if p == nil { return "" } return fmt.Sprintf("TMetaScanRange(%+v)", *p) + } func (p *TMetaScanRange) DeepEqual(ano *TMetaScanRange) bool { @@ -16797,6 +23446,27 @@ func (p *TMetaScanRange) DeepEqual(ano *TMetaScanRange) bool { if !p.Field4DeepEqual(ano.FrontendsParams) { return false } + if !p.Field5DeepEqual(ano.QueriesParams) { + return false + } + if !p.Field6DeepEqual(ano.MaterializedViewsParams) { + return false + } + if !p.Field7DeepEqual(ano.JobsParams) { + return false + } + if !p.Field8DeepEqual(ano.TasksParams) { + return false + } + if !p.Field9DeepEqual(ano.PartitionsParams) { + return false + } + if !p.Field10DeepEqual(ano.MetaCacheStatsParams) { + return false + } + if !p.Field11DeepEqual(ano.PartitionValuesParams) { + return false + } return true } @@ -16833,6 +23503,55 @@ func (p *TMetaScanRange) Field4DeepEqual(src *TFrontendsMetadataParams) bool { } return true } +func (p *TMetaScanRange) Field5DeepEqual(src *TQueriesMetadataParams) bool { + + if !p.QueriesParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetaScanRange) Field6DeepEqual(src *TMaterializedViewsMetadataParams) bool { + + if !p.MaterializedViewsParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetaScanRange) Field7DeepEqual(src *TJobsMetadataParams) bool { + + if !p.JobsParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetaScanRange) Field8DeepEqual(src *TTasksMetadataParams) bool { + + if !p.TasksParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetaScanRange) Field9DeepEqual(src *TPartitionsMetadataParams) bool { + + if !p.PartitionsParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetaScanRange) Field10DeepEqual(src *TMetaCacheStatsParams) bool { + + if !p.MetaCacheStatsParams.DeepEqual(src) { + return false + } + return true +} +func (p *TMetaScanRange) Field11DeepEqual(src *TPartitionValuesMetadataParams) bool { + + if !p.PartitionValuesParams.DeepEqual(src) { + return false + } + return true +} type TScanRange struct { PaloScanRange *TPaloScanRange `thrift:"palo_scan_range,4,optional" frugal:"4,optional,TPaloScanRange" json:"palo_scan_range,omitempty"` @@ -16849,7 +23568,6 @@ func NewTScanRange() *TScanRange { } func (p *TScanRange) InitDefault() { - *p = TScanRange{} } var TScanRange_PaloScanRange_DEFAULT *TPaloScanRange @@ -16998,77 +23716,62 @@ func (p *TScanRange) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRUCT { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17094,59 +23797,62 @@ ReadStructEndError: } func (p *TScanRange) ReadField4(iprot thrift.TProtocol) error { - p.PaloScanRange = NewTPaloScanRange() - if err := p.PaloScanRange.Read(iprot); err != nil { + _field := NewTPaloScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.PaloScanRange = _field return nil } - func (p *TScanRange) ReadField5(iprot thrift.TProtocol) error { + + var _field []byte if v, err := iprot.ReadBinary(); err != nil { return err } else { - p.KuduScanToken = []byte(v) + _field = []byte(v) } + p.KuduScanToken = _field return nil } - func (p *TScanRange) ReadField6(iprot thrift.TProtocol) error { - p.BrokerScanRange = NewTBrokerScanRange() - if err := p.BrokerScanRange.Read(iprot); err != nil { + _field := NewTBrokerScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerScanRange = _field return nil } - func (p *TScanRange) ReadField7(iprot thrift.TProtocol) error { - p.EsScanRange = NewTEsScanRange() - if err := p.EsScanRange.Read(iprot); err != nil { + _field := NewTEsScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.EsScanRange = _field return nil } - func (p *TScanRange) ReadField8(iprot thrift.TProtocol) error { - p.ExtScanRange = NewTExternalScanRange() - if err := p.ExtScanRange.Read(iprot); err != nil { + _field := NewTExternalScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.ExtScanRange = _field return nil } - func (p *TScanRange) ReadField9(iprot thrift.TProtocol) error { - p.DataGenScanRange = NewTDataGenScanRange() - if err := p.DataGenScanRange.Read(iprot); err != nil { + _field := NewTDataGenScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.DataGenScanRange = _field return nil } - func (p *TScanRange) ReadField10(iprot thrift.TProtocol) error { - p.MetaScanRange = NewTMetaScanRange() - if err := p.MetaScanRange.Read(iprot); err != nil { + _field := NewTMetaScanRange() + if err := _field.Read(iprot); err != nil { return err } + p.MetaScanRange = _field return nil } @@ -17184,7 +23890,6 @@ func (p *TScanRange) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17341,6 +24046,7 @@ func (p *TScanRange) String() string { return "" } return fmt.Sprintf("TScanRange(%+v)", *p) + } func (p *TScanRange) DeepEqual(ano *TScanRange) bool { @@ -17435,7 +24141,6 @@ func NewTMySQLScanNode() *TMySQLScanNode { } func (p *TMySQLScanNode) InitDefault() { - *p = TMySQLScanNode{} } func (p *TMySQLScanNode) GetTupleId() (v types.TTupleId) { @@ -17502,10 +24207,8 @@ func (p *TMySQLScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -17513,10 +24216,8 @@ func (p *TMySQLScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -17524,10 +24225,8 @@ func (p *TMySQLScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { @@ -17535,17 +24234,14 @@ func (p *TMySQLScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFilters = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -17592,30 +24288,35 @@ RequiredFieldNotSetError: } func (p *TMySQLScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TMySQLScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } - func (p *TMySQLScanNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -17623,21 +24324,22 @@ func (p *TMySQLScanNode) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TMySQLScanNode) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Filters = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -17645,11 +24347,12 @@ func (p *TMySQLScanNode) ReadField4(iprot thrift.TProtocol) error { _elem = v } - p.Filters = append(p.Filters, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Filters = _field return nil } @@ -17675,7 +24378,6 @@ func (p *TMySQLScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -17783,6 +24485,7 @@ func (p *TMySQLScanNode) String() string { return "" } return fmt.Sprintf("TMySQLScanNode(%+v)", *p) + } func (p *TMySQLScanNode) DeepEqual(ano *TMySQLScanNode) bool { @@ -17863,7 +24566,6 @@ func NewTOdbcScanNode() *TOdbcScanNode { } func (p *TOdbcScanNode) InitDefault() { - *p = TOdbcScanNode{} } var TOdbcScanNode_TupleId_DEFAULT types.TTupleId @@ -18029,87 +24731,70 @@ func (p *TOdbcScanNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18135,49 +24820,58 @@ ReadStructEndError: } func (p *TOdbcScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = &v + _field = &v } + p.TupleId = _field return nil } - func (p *TOdbcScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } - func (p *TOdbcScanNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Driver = &v + _field = &v } + p.Driver = _field return nil } - func (p *TOdbcScanNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TOdbcTableType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TOdbcTableType(v) - p.Type = &tmp + _field = &tmp } + p.Type = _field return nil } - func (p *TOdbcScanNode) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -18185,21 +24879,22 @@ func (p *TOdbcScanNode) ReadField5(iprot thrift.TProtocol) error { _elem = v } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TOdbcScanNode) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Filters = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -18207,29 +24902,34 @@ func (p *TOdbcScanNode) ReadField6(iprot thrift.TProtocol) error { _elem = v } - p.Filters = append(p.Filters, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Filters = _field return nil } - func (p *TOdbcScanNode) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ConnectString = &v + _field = &v } + p.ConnectString = _field return nil } - func (p *TOdbcScanNode) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.QueryString = &v + _field = &v } + p.QueryString = _field return nil } @@ -18271,7 +24971,6 @@ func (p *TOdbcScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18463,6 +25162,7 @@ func (p *TOdbcScanNode) String() string { return "" } return fmt.Sprintf("TOdbcScanNode(%+v)", *p) + } func (p *TOdbcScanNode) DeepEqual(ano *TOdbcScanNode) bool { @@ -18609,7 +25309,6 @@ func NewTJdbcScanNode() *TJdbcScanNode { } func (p *TJdbcScanNode) InitDefault() { - *p = TJdbcScanNode{} } var TJdbcScanNode_TupleId_DEFAULT types.TTupleId @@ -18707,47 +25406,38 @@ func (p *TJdbcScanNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -18773,39 +25463,48 @@ ReadStructEndError: } func (p *TJdbcScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = &v + _field = &v } + p.TupleId = _field return nil } - func (p *TJdbcScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } - func (p *TJdbcScanNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.QueryString = &v + _field = &v } + p.QueryString = _field return nil } - func (p *TJdbcScanNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *types.TOdbcTableType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TOdbcTableType(v) - p.TableType = &tmp + _field = &tmp } + p.TableType = _field return nil } @@ -18831,7 +25530,6 @@ func (p *TJdbcScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -18931,6 +25629,7 @@ func (p *TJdbcScanNode) String() string { return "" } return fmt.Sprintf("TJdbcScanNode(%+v)", *p) + } func (p *TJdbcScanNode) DeepEqual(ano *TJdbcScanNode) bool { @@ -19015,7 +25714,6 @@ func NewTBrokerScanNode() *TBrokerScanNode { } func (p *TBrokerScanNode) InitDefault() { - *p = TBrokerScanNode{} } func (p *TBrokerScanNode) GetTupleId() (v types.TTupleId) { @@ -19106,47 +25804,38 @@ func (p *TBrokerScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19178,71 +25867,83 @@ RequiredFieldNotSetError: } func (p *TBrokerScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TBrokerScanNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionExprs = append(p.PartitionExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionExprs = _field return nil } - func (p *TBrokerScanNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PartitionInfos = make([]*partitions.TRangePartition, 0, size) + _field := make([]*partitions.TRangePartition, 0, size) + values := make([]partitions.TRangePartition, size) for i := 0; i < size; i++ { - _elem := partitions.NewTRangePartition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionInfos = append(p.PartitionInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionInfos = _field return nil } - func (p *TBrokerScanNode) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.PreFilterExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PreFilterExprs = append(p.PreFilterExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PreFilterExprs = _field return nil } @@ -19268,7 +25969,6 @@ func (p *TBrokerScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -19390,6 +26090,7 @@ func (p *TBrokerScanNode) String() string { return "" } return fmt.Sprintf("TBrokerScanNode(%+v)", *p) + } func (p *TBrokerScanNode) DeepEqual(ano *TBrokerScanNode) bool { @@ -19470,7 +26171,6 @@ func NewTFileScanNode() *TFileScanNode { } func (p *TFileScanNode) InitDefault() { - *p = TFileScanNode{} } var TFileScanNode_TupleId_DEFAULT types.TTupleId @@ -19534,27 +26234,22 @@ func (p *TFileScanNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19580,20 +26275,25 @@ ReadStructEndError: } func (p *TFileScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = &v + _field = &v } + p.TupleId = _field return nil } - func (p *TFileScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } @@ -19611,7 +26311,6 @@ func (p *TFileScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -19673,6 +26372,7 @@ func (p *TFileScanNode) String() string { return "" } return fmt.Sprintf("TFileScanNode(%+v)", *p) + } func (p *TFileScanNode) DeepEqual(ano *TFileScanNode) bool { @@ -19727,7 +26427,6 @@ func NewTEsScanNode() *TEsScanNode { } func (p *TEsScanNode) InitDefault() { - *p = TEsScanNode{} } func (p *TEsScanNode) GetTupleId() (v types.TTupleId) { @@ -19818,47 +26517,38 @@ func (p *TEsScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.MAP { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.MAP { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -19890,20 +26580,22 @@ RequiredFieldNotSetError: } func (p *TEsScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TEsScanNode) ReadField2(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.Properties = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -19919,20 +26611,20 @@ func (p *TEsScanNode) ReadField2(iprot thrift.TProtocol) error { _val = v } - p.Properties[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.Properties = _field return nil } - func (p *TEsScanNode) ReadField3(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.DocvalueContext = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -19948,20 +26640,20 @@ func (p *TEsScanNode) ReadField3(iprot thrift.TProtocol) error { _val = v } - p.DocvalueContext[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.DocvalueContext = _field return nil } - func (p *TEsScanNode) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.FieldsContext = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -19977,11 +26669,12 @@ func (p *TEsScanNode) ReadField4(iprot thrift.TProtocol) error { _val = v } - p.FieldsContext[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.FieldsContext = _field return nil } @@ -20007,7 +26700,6 @@ func (p *TEsScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -20052,11 +26744,9 @@ func (p *TEsScanNode) writeField2(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.Properties { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -20084,11 +26774,9 @@ func (p *TEsScanNode) writeField3(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.DocvalueContext { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -20116,11 +26804,9 @@ func (p *TEsScanNode) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.FieldsContext { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -20144,6 +26830,7 @@ func (p *TEsScanNode) String() string { return "" } return fmt.Sprintf("TEsScanNode(%+v)", *p) + } func (p *TEsScanNode) DeepEqual(ano *TEsScanNode) bool { @@ -20224,7 +26911,6 @@ func NewTMiniLoadEtlFunction() *TMiniLoadEtlFunction { } func (p *TMiniLoadEtlFunction) InitDefault() { - *p = TMiniLoadEtlFunction{} } func (p *TMiniLoadEtlFunction) GetFunctionName() (v string) { @@ -20273,10 +26959,8 @@ func (p *TMiniLoadEtlFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFunctionName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -20284,17 +26968,14 @@ func (p *TMiniLoadEtlFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetParamColumnIndex = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -20331,20 +27012,25 @@ RequiredFieldNotSetError: } func (p *TMiniLoadEtlFunction) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FunctionName = v + _field = v } + p.FunctionName = _field return nil } - func (p *TMiniLoadEtlFunction) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ParamColumnIndex = v + _field = v } + p.ParamColumnIndex = _field return nil } @@ -20362,7 +27048,6 @@ func (p *TMiniLoadEtlFunction) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -20420,6 +27105,7 @@ func (p *TMiniLoadEtlFunction) String() string { return "" } return fmt.Sprintf("TMiniLoadEtlFunction(%+v)", *p) + } func (p *TMiniLoadEtlFunction) DeepEqual(ano *TMiniLoadEtlFunction) bool { @@ -20470,7 +27156,6 @@ func NewTCsvScanNode() *TCsvScanNode { } func (p *TCsvScanNode) InitDefault() { - *p = TCsvScanNode{} } func (p *TCsvScanNode) GetTupleId() (v types.TTupleId) { @@ -20655,10 +27340,8 @@ func (p *TCsvScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -20666,97 +27349,78 @@ func (p *TCsvScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFilePaths = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.MAP { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.DOUBLE { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.MAP { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -20793,21 +27457,24 @@ RequiredFieldNotSetError: } func (p *TCsvScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TCsvScanNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.FilePaths = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -20815,38 +27482,43 @@ func (p *TCsvScanNode) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.FilePaths = append(p.FilePaths, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.FilePaths = _field return nil } - func (p *TCsvScanNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.ColumnSeparator = &v + _field = &v } + p.ColumnSeparator = _field return nil } - func (p *TCsvScanNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.LineDelimiter = &v + _field = &v } + p.LineDelimiter = _field return nil } - func (p *TCsvScanNode) ReadField5(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.ColumnTypeMapping = make(map[string]*types.TColumnType, size) + _field := make(map[string]*types.TColumnType, size) + values := make([]types.TColumnType, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -20854,26 +27526,29 @@ func (p *TCsvScanNode) ReadField5(iprot thrift.TProtocol) error { } else { _key = v } - _val := types.NewTColumnType() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.ColumnTypeMapping[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ColumnTypeMapping = _field return nil } - func (p *TCsvScanNode) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Columns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -20881,21 +27556,22 @@ func (p *TCsvScanNode) ReadField6(iprot thrift.TProtocol) error { _elem = v } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TCsvScanNode) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.UnspecifiedColumns = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -20903,21 +27579,22 @@ func (p *TCsvScanNode) ReadField7(iprot thrift.TProtocol) error { _elem = v } - p.UnspecifiedColumns = append(p.UnspecifiedColumns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.UnspecifiedColumns = _field return nil } - func (p *TCsvScanNode) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DefaultValues = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -20925,29 +27602,32 @@ func (p *TCsvScanNode) ReadField8(iprot thrift.TProtocol) error { _elem = v } - p.DefaultValues = append(p.DefaultValues, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DefaultValues = _field return nil } - func (p *TCsvScanNode) ReadField9(iprot thrift.TProtocol) error { + + var _field *float64 if v, err := iprot.ReadDouble(); err != nil { return err } else { - p.MaxFilterRatio = &v + _field = &v } + p.MaxFilterRatio = _field return nil } - func (p *TCsvScanNode) ReadField10(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.ColumnFunctionMapping = make(map[string]*TMiniLoadEtlFunction, size) + _field := make(map[string]*TMiniLoadEtlFunction, size) + values := make([]TMiniLoadEtlFunction, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -20955,16 +27635,19 @@ func (p *TCsvScanNode) ReadField10(iprot thrift.TProtocol) error { } else { _key = v } - _val := NewTMiniLoadEtlFunction() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.ColumnFunctionMapping[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ColumnFunctionMapping = _field return nil } @@ -21014,7 +27697,6 @@ func (p *TCsvScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 10 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -21122,11 +27804,9 @@ func (p *TCsvScanNode) writeField5(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.ColumnTypeMapping { - if err := oprot.WriteString(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -21254,11 +27934,9 @@ func (p *TCsvScanNode) writeField10(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.ColumnFunctionMapping { - if err := oprot.WriteString(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -21282,6 +27960,7 @@ func (p *TCsvScanNode) String() string { return "" } return fmt.Sprintf("TCsvScanNode(%+v)", *p) + } func (p *TCsvScanNode) DeepEqual(ano *TCsvScanNode) bool { @@ -21446,19 +28125,20 @@ func (p *TCsvScanNode) Field10DeepEqual(src map[string]*TMiniLoadEtlFunction) bo } type TSchemaScanNode struct { - TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` - TableName string `thrift:"table_name,2,required" frugal:"2,required,string" json:"table_name"` - Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` - Table *string `thrift:"table,4,optional" frugal:"4,optional,string" json:"table,omitempty"` - Wild *string `thrift:"wild,5,optional" frugal:"5,optional,string" json:"wild,omitempty"` - User *string `thrift:"user,6,optional" frugal:"6,optional,string" json:"user,omitempty"` - Ip *string `thrift:"ip,7,optional" frugal:"7,optional,string" json:"ip,omitempty"` - Port *int32 `thrift:"port,8,optional" frugal:"8,optional,i32" json:"port,omitempty"` - ThreadId *int64 `thrift:"thread_id,9,optional" frugal:"9,optional,i64" json:"thread_id,omitempty"` - UserIp *string `thrift:"user_ip,10,optional" frugal:"10,optional,string" json:"user_ip,omitempty"` - CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,11,optional" frugal:"11,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` - ShowHiddenCloumns bool `thrift:"show_hidden_cloumns,12,optional" frugal:"12,optional,bool" json:"show_hidden_cloumns,omitempty"` - Catalog *string `thrift:"catalog,14,optional" frugal:"14,optional,string" json:"catalog,omitempty"` + TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` + TableName string `thrift:"table_name,2,required" frugal:"2,required,string" json:"table_name"` + Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` + Table *string `thrift:"table,4,optional" frugal:"4,optional,string" json:"table,omitempty"` + Wild *string `thrift:"wild,5,optional" frugal:"5,optional,string" json:"wild,omitempty"` + User *string `thrift:"user,6,optional" frugal:"6,optional,string" json:"user,omitempty"` + Ip *string `thrift:"ip,7,optional" frugal:"7,optional,string" json:"ip,omitempty"` + Port *int32 `thrift:"port,8,optional" frugal:"8,optional,i32" json:"port,omitempty"` + ThreadId *int64 `thrift:"thread_id,9,optional" frugal:"9,optional,i64" json:"thread_id,omitempty"` + UserIp *string `thrift:"user_ip,10,optional" frugal:"10,optional,string" json:"user_ip,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,11,optional" frugal:"11,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` + ShowHiddenCloumns bool `thrift:"show_hidden_cloumns,12,optional" frugal:"12,optional,bool" json:"show_hidden_cloumns,omitempty"` + Catalog *string `thrift:"catalog,14,optional" frugal:"14,optional,string" json:"catalog,omitempty"` + FeAddrList []*types.TNetworkAddress `thrift:"fe_addr_list,15,optional" frugal:"15,optional,list" json:"fe_addr_list,omitempty"` } func NewTSchemaScanNode() *TSchemaScanNode { @@ -21469,10 +28149,7 @@ func NewTSchemaScanNode() *TSchemaScanNode { } func (p *TSchemaScanNode) InitDefault() { - *p = TSchemaScanNode{ - - ShowHiddenCloumns: false, - } + p.ShowHiddenCloumns = false } func (p *TSchemaScanNode) GetTupleId() (v types.TTupleId) { @@ -21581,6 +28258,15 @@ func (p *TSchemaScanNode) GetCatalog() (v string) { } return *p.Catalog } + +var TSchemaScanNode_FeAddrList_DEFAULT []*types.TNetworkAddress + +func (p *TSchemaScanNode) GetFeAddrList() (v []*types.TNetworkAddress) { + if !p.IsSetFeAddrList() { + return TSchemaScanNode_FeAddrList_DEFAULT + } + return p.FeAddrList +} func (p *TSchemaScanNode) SetTupleId(val types.TTupleId) { p.TupleId = val } @@ -21620,6 +28306,9 @@ func (p *TSchemaScanNode) SetShowHiddenCloumns(val bool) { func (p *TSchemaScanNode) SetCatalog(val *string) { p.Catalog = val } +func (p *TSchemaScanNode) SetFeAddrList(val []*types.TNetworkAddress) { + p.FeAddrList = val +} var fieldIDToName_TSchemaScanNode = map[int16]string{ 1: "tuple_id", @@ -21635,6 +28324,7 @@ var fieldIDToName_TSchemaScanNode = map[int16]string{ 11: "current_user_ident", 12: "show_hidden_cloumns", 14: "catalog", + 15: "fe_addr_list", } func (p *TSchemaScanNode) IsSetDb() bool { @@ -21681,6 +28371,10 @@ func (p *TSchemaScanNode) IsSetCatalog() bool { return p.Catalog != nil } +func (p *TSchemaScanNode) IsSetFeAddrList() bool { + return p.FeAddrList != nil +} + func (p *TSchemaScanNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -21708,10 +28402,8 @@ func (p *TSchemaScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -21719,127 +28411,110 @@ func (p *TSchemaScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTableName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRING { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.BOOL { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRING { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.LIST { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -21876,118 +28551,166 @@ RequiredFieldNotSetError: } func (p *TSchemaScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TSchemaScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = v + _field = v } + p.TableName = _field return nil } - func (p *TSchemaScanNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Db = &v + _field = &v } + p.Db = _field return nil } - func (p *TSchemaScanNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Table = &v + _field = &v } + p.Table = _field return nil } - func (p *TSchemaScanNode) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Wild = &v + _field = &v } + p.Wild = _field return nil } - func (p *TSchemaScanNode) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = &v + _field = &v } + p.User = _field return nil } - func (p *TSchemaScanNode) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Ip = &v + _field = &v } + p.Ip = _field return nil } - func (p *TSchemaScanNode) ReadField8(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Port = &v + _field = &v } + p.Port = _field return nil } - func (p *TSchemaScanNode) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ThreadId = &v + _field = &v } + p.ThreadId = _field return nil } - func (p *TSchemaScanNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UserIp = &v + _field = &v } + p.UserIp = _field return nil } - func (p *TSchemaScanNode) ReadField11(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } - func (p *TSchemaScanNode) ReadField12(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ShowHiddenCloumns = v + _field = v } + p.ShowHiddenCloumns = _field return nil } - func (p *TSchemaScanNode) ReadField14(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Catalog = &v + _field = &v } + p.Catalog = _field + return nil +} +func (p *TSchemaScanNode) ReadField15(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FeAddrList = _field return nil } @@ -22049,7 +28772,10 @@ func (p *TSchemaScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 14 goto WriteFieldError } - + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -22311,11 +29037,39 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } +func (p *TSchemaScanNode) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetFeAddrList() { + if err = oprot.WriteFieldBegin("fe_addr_list", thrift.LIST, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FeAddrList)); err != nil { + return err + } + for _, v := range p.FeAddrList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + func (p *TSchemaScanNode) String() string { if p == nil { return "" } return fmt.Sprintf("TSchemaScanNode(%+v)", *p) + } func (p *TSchemaScanNode) DeepEqual(ano *TSchemaScanNode) bool { @@ -22363,6 +29117,9 @@ func (p *TSchemaScanNode) DeepEqual(ano *TSchemaScanNode) bool { if !p.Field14DeepEqual(ano.Catalog) { return false } + if !p.Field15DeepEqual(ano.FeAddrList) { + return false + } return true } @@ -22502,6 +29259,19 @@ func (p *TSchemaScanNode) Field14DeepEqual(src *string) bool { } return true } +func (p *TSchemaScanNode) Field15DeepEqual(src []*types.TNetworkAddress) bool { + + if len(p.FeAddrList) != len(src) { + return false + } + for i, v := range p.FeAddrList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} type TMetaScanNode struct { TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` @@ -22514,7 +29284,6 @@ func NewTMetaScanNode() *TMetaScanNode { } func (p *TMetaScanNode) InitDefault() { - *p = TMetaScanNode{} } func (p *TMetaScanNode) GetTupleId() (v types.TTupleId) { @@ -22588,37 +29357,30 @@ func (p *TMetaScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -22650,29 +29412,34 @@ RequiredFieldNotSetError: } func (p *TMetaScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TMetaScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *types.TMetadataType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TMetadataType(v) - p.MetadataType = &tmp + _field = &tmp } + p.MetadataType = _field return nil } - func (p *TMetaScanNode) ReadField3(iprot thrift.TProtocol) error { - p.CurrentUserIdent = types.NewTUserIdentity() - if err := p.CurrentUserIdent.Read(iprot); err != nil { + _field := types.NewTUserIdentity() + if err := _field.Read(iprot); err != nil { return err } + p.CurrentUserIdent = _field return nil } @@ -22694,7 +29461,6 @@ func (p *TMetaScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -22773,6 +29539,7 @@ func (p *TMetaScanNode) String() string { return "" } return fmt.Sprintf("TMetaScanNode(%+v)", *p) + } func (p *TMetaScanNode) DeepEqual(ano *TMetaScanNode) bool { @@ -22830,7 +29597,6 @@ func NewTTestExternalScanNode() *TTestExternalScanNode { } func (p *TTestExternalScanNode) InitDefault() { - *p = TTestExternalScanNode{} } var TTestExternalScanNode_TupleId_DEFAULT types.TTupleId @@ -22894,27 +29660,22 @@ func (p *TTestExternalScanNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -22940,20 +29701,25 @@ ReadStructEndError: } func (p *TTestExternalScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = &v + _field = &v } + p.TupleId = _field return nil } - func (p *TTestExternalScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } @@ -22971,7 +29737,6 @@ func (p *TTestExternalScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -23033,6 +29798,7 @@ func (p *TTestExternalScanNode) String() string { return "" } return fmt.Sprintf("TTestExternalScanNode(%+v)", *p) + } func (p *TTestExternalScanNode) DeepEqual(ano *TTestExternalScanNode) bool { @@ -23089,7 +29855,6 @@ func NewTSortInfo() *TSortInfo { } func (p *TSortInfo) InitDefault() { - *p = TSortInfo{} } func (p *TSortInfo) GetOrderingExprs() (v []*exprs.TExpr) { @@ -23198,10 +29963,8 @@ func (p *TSortInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOrderingExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -23209,10 +29972,8 @@ func (p *TSortInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsAscOrder = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -23220,47 +29981,38 @@ func (p *TSortInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNullsFirst = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -23306,28 +30058,32 @@ func (p *TSortInfo) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.OrderingExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.OrderingExprs = append(p.OrderingExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OrderingExprs = _field return nil } - func (p *TSortInfo) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.IsAscOrder = make([]bool, 0, size) + _field := make([]bool, 0, size) for i := 0; i < size; i++ { + var _elem bool if v, err := iprot.ReadBool(); err != nil { return err @@ -23335,21 +30091,22 @@ func (p *TSortInfo) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.IsAscOrder = append(p.IsAscOrder, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.IsAscOrder = _field return nil } - func (p *TSortInfo) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.NullsFirst = make([]bool, 0, size) + _field := make([]bool, 0, size) for i := 0; i < size; i++ { + var _elem bool if v, err := iprot.ReadBool(); err != nil { return err @@ -23357,41 +30114,45 @@ func (p *TSortInfo) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.NullsFirst = append(p.NullsFirst, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.NullsFirst = _field return nil } - func (p *TSortInfo) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SortTupleSlotExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SortTupleSlotExprs = append(p.SortTupleSlotExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SortTupleSlotExprs = _field return nil } - func (p *TSortInfo) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SlotExprsNullabilityChangedFlags = make([]bool, 0, size) + _field := make([]bool, 0, size) for i := 0; i < size; i++ { + var _elem bool if v, err := iprot.ReadBool(); err != nil { return err @@ -23399,20 +30160,23 @@ func (p *TSortInfo) ReadField5(iprot thrift.TProtocol) error { _elem = v } - p.SlotExprsNullabilityChangedFlags = append(p.SlotExprsNullabilityChangedFlags, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SlotExprsNullabilityChangedFlags = _field return nil } - func (p *TSortInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTwoPhaseRead = &v + _field = &v } + p.UseTwoPhaseRead = _field return nil } @@ -23446,7 +30210,6 @@ func (p *TSortInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -23618,6 +30381,7 @@ func (p *TSortInfo) String() string { return "" } return fmt.Sprintf("TSortInfo(%+v)", *p) + } func (p *TSortInfo) DeepEqual(ano *TSortInfo) bool { @@ -23743,6 +30507,7 @@ type TOlapScanNode struct { OutputColumnUniqueIds []int32 `thrift:"output_column_unique_ids,15,optional" frugal:"15,optional,set" json:"output_column_unique_ids,omitempty"` DistributeColumnIds []int32 `thrift:"distribute_column_ids,16,optional" frugal:"16,optional,list" json:"distribute_column_ids,omitempty"` SchemaVersion *int32 `thrift:"schema_version,17,optional" frugal:"17,optional,i32" json:"schema_version,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,18,optional" frugal:"18,optional,list" json:"topn_filter_source_node_ids,omitempty"` } func NewTOlapScanNode() *TOlapScanNode { @@ -23750,7 +30515,6 @@ func NewTOlapScanNode() *TOlapScanNode { } func (p *TOlapScanNode) InitDefault() { - *p = TOlapScanNode{} } func (p *TOlapScanNode) GetTupleId() (v types.TTupleId) { @@ -23885,6 +30649,15 @@ func (p *TOlapScanNode) GetSchemaVersion() (v int32) { } return *p.SchemaVersion } + +var TOlapScanNode_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TOlapScanNode) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TOlapScanNode_TopnFilterSourceNodeIds_DEFAULT + } + return p.TopnFilterSourceNodeIds +} func (p *TOlapScanNode) SetTupleId(val types.TTupleId) { p.TupleId = val } @@ -23936,6 +30709,9 @@ func (p *TOlapScanNode) SetDistributeColumnIds(val []int32) { func (p *TOlapScanNode) SetSchemaVersion(val *int32) { p.SchemaVersion = val } +func (p *TOlapScanNode) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} var fieldIDToName_TOlapScanNode = map[int16]string{ 1: "tuple_id", @@ -23955,6 +30731,7 @@ var fieldIDToName_TOlapScanNode = map[int16]string{ 15: "output_column_unique_ids", 16: "distribute_column_ids", 17: "schema_version", + 18: "topn_filter_source_node_ids", } func (p *TOlapScanNode) IsSetSortColumn() bool { @@ -24009,6 +30786,10 @@ func (p *TOlapScanNode) IsSetSchemaVersion() bool { return p.SchemaVersion != nil } +func (p *TOlapScanNode) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil +} + func (p *TOlapScanNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -24038,10 +30819,8 @@ func (p *TOlapScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -24049,10 +30828,8 @@ func (p *TOlapScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetKeyColumnName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -24060,10 +30837,8 @@ func (p *TOlapScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetKeyColumnType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { @@ -24071,147 +30846,126 @@ func (p *TOlapScanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsPreaggregation = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.BOOL { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I32 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.LIST { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.SET { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 16: if fieldTypeId == thrift.LIST { if err = p.ReadField16(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.I32 { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 18: + if fieldTypeId == thrift.LIST { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -24258,21 +31012,24 @@ RequiredFieldNotSetError: } func (p *TOlapScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TOlapScanNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.KeyColumnName = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -24280,21 +31037,22 @@ func (p *TOlapScanNode) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.KeyColumnName = append(p.KeyColumnName, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.KeyColumnName = _field return nil } - func (p *TOlapScanNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.KeyColumnType = make([]types.TPrimitiveType, 0, size) + _field := make([]types.TPrimitiveType, 0, size) for i := 0; i < size; i++ { + var _elem types.TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err @@ -24302,143 +31060,166 @@ func (p *TOlapScanNode) ReadField3(iprot thrift.TProtocol) error { _elem = types.TPrimitiveType(v) } - p.KeyColumnType = append(p.KeyColumnType, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.KeyColumnType = _field return nil } - func (p *TOlapScanNode) ReadField4(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsPreaggregation = v + _field = v } + p.IsPreaggregation = _field return nil } - func (p *TOlapScanNode) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SortColumn = &v + _field = &v } + p.SortColumn = _field return nil } - func (p *TOlapScanNode) ReadField6(iprot thrift.TProtocol) error { + + var _field *types.TKeysType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := types.TKeysType(v) - p.KeyType = &tmp + _field = &tmp } + p.KeyType = _field return nil } - func (p *TOlapScanNode) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.TableName = &v + _field = &v } + p.TableName = _field return nil } - func (p *TOlapScanNode) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnsDesc = make([]*descriptors.TColumn, 0, size) + _field := make([]*descriptors.TColumn, 0, size) + values := make([]descriptors.TColumn, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTColumn() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnsDesc = append(p.ColumnsDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnsDesc = _field return nil } - func (p *TOlapScanNode) ReadField9(iprot thrift.TProtocol) error { - p.SortInfo = NewTSortInfo() - if err := p.SortInfo.Read(iprot); err != nil { + _field := NewTSortInfo() + if err := _field.Read(iprot); err != nil { return err } + p.SortInfo = _field return nil } - func (p *TOlapScanNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.SortLimit = &v + _field = &v } + p.SortLimit = _field return nil } - func (p *TOlapScanNode) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.EnableUniqueKeyMergeOnWrite = &v + _field = &v } + p.EnableUniqueKeyMergeOnWrite = _field return nil } - func (p *TOlapScanNode) ReadField12(iprot thrift.TProtocol) error { + + var _field *TPushAggOp if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TPushAggOp(v) - p.PushDownAggTypeOpt = &tmp + _field = &tmp } + p.PushDownAggTypeOpt = _field return nil } - func (p *TOlapScanNode) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTopnOpt = &v + _field = &v } + p.UseTopnOpt = _field return nil } - func (p *TOlapScanNode) ReadField14(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.IndexesDesc = make([]*descriptors.TOlapTableIndex, 0, size) + _field := make([]*descriptors.TOlapTableIndex, 0, size) + values := make([]descriptors.TOlapTableIndex, size) for i := 0; i < size; i++ { - _elem := descriptors.NewTOlapTableIndex() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.IndexesDesc = append(p.IndexesDesc, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.IndexesDesc = _field return nil } - func (p *TOlapScanNode) ReadField15(iprot thrift.TProtocol) error { _, size, err := iprot.ReadSetBegin() if err != nil { return err } - p.OutputColumnUniqueIds = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -24446,21 +31227,22 @@ func (p *TOlapScanNode) ReadField15(iprot thrift.TProtocol) error { _elem = v } - p.OutputColumnUniqueIds = append(p.OutputColumnUniqueIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadSetEnd(); err != nil { return err } + p.OutputColumnUniqueIds = _field return nil } - func (p *TOlapScanNode) ReadField16(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.DistributeColumnIds = make([]int32, 0, size) + _field := make([]int32, 0, size) for i := 0; i < size; i++ { + var _elem int32 if v, err := iprot.ReadI32(); err != nil { return err @@ -24468,20 +31250,46 @@ func (p *TOlapScanNode) ReadField16(iprot thrift.TProtocol) error { _elem = v } - p.DistributeColumnIds = append(p.DistributeColumnIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.DistributeColumnIds = _field return nil } - func (p *TOlapScanNode) ReadField17(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.SchemaVersion = &v + _field = &v + } + p.SchemaVersion = _field + return nil +} +func (p *TOlapScanNode) ReadField18(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TopnFilterSourceNodeIds = _field return nil } @@ -24559,7 +31367,10 @@ func (p *TOlapScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 17 goto WriteFieldError } - + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -24953,11 +31764,39 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } +func (p *TOlapScanNode) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + func (p *TOlapScanNode) String() string { if p == nil { return "" } return fmt.Sprintf("TOlapScanNode(%+v)", *p) + } func (p *TOlapScanNode) DeepEqual(ano *TOlapScanNode) bool { @@ -25017,6 +31856,9 @@ func (p *TOlapScanNode) DeepEqual(ano *TOlapScanNode) bool { if !p.Field17DeepEqual(ano.SchemaVersion) { return false } + if !p.Field18DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } return true } @@ -25215,6 +32057,19 @@ func (p *TOlapScanNode) Field17DeepEqual(src *int32) bool { } return true } +func (p *TOlapScanNode) Field18DeepEqual(src []int32) bool { + + if len(p.TopnFilterSourceNodeIds) != len(src) { + return false + } + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} type TEqJoinCondition struct { Left *exprs.TExpr `thrift:"left,1,required" frugal:"1,required,exprs.TExpr" json:"left"` @@ -25227,7 +32082,6 @@ func NewTEqJoinCondition() *TEqJoinCondition { } func (p *TEqJoinCondition) InitDefault() { - *p = TEqJoinCondition{} } var TEqJoinCondition_Left_DEFAULT *exprs.TExpr @@ -25311,10 +32165,8 @@ func (p *TEqJoinCondition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLeft = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -25322,27 +32174,22 @@ func (p *TEqJoinCondition) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRight = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -25379,28 +32226,31 @@ RequiredFieldNotSetError: } func (p *TEqJoinCondition) ReadField1(iprot thrift.TProtocol) error { - p.Left = exprs.NewTExpr() - if err := p.Left.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.Left = _field return nil } - func (p *TEqJoinCondition) ReadField2(iprot thrift.TProtocol) error { - p.Right = exprs.NewTExpr() - if err := p.Right.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.Right = _field return nil } - func (p *TEqJoinCondition) ReadField3(iprot thrift.TProtocol) error { + + var _field *opcodes.TExprOpcode if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := opcodes.TExprOpcode(v) - p.Opcode = &tmp + _field = &tmp } + p.Opcode = _field return nil } @@ -25422,7 +32272,6 @@ func (p *TEqJoinCondition) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -25499,6 +32348,7 @@ func (p *TEqJoinCondition) String() string { return "" } return fmt.Sprintf("TEqJoinCondition(%+v)", *p) + } func (p *TEqJoinCondition) DeepEqual(ano *TEqJoinCondition) bool { @@ -25547,17 +32397,20 @@ func (p *TEqJoinCondition) Field3DeepEqual(src *opcodes.TExprOpcode) bool { } type THashJoinNode struct { - JoinOp TJoinOp `thrift:"join_op,1,required" frugal:"1,required,TJoinOp" json:"join_op"` - EqJoinConjuncts []*TEqJoinCondition `thrift:"eq_join_conjuncts,2,required" frugal:"2,required,list" json:"eq_join_conjuncts"` - OtherJoinConjuncts []*exprs.TExpr `thrift:"other_join_conjuncts,3,optional" frugal:"3,optional,list" json:"other_join_conjuncts,omitempty"` - AddProbeFilters *bool `thrift:"add_probe_filters,4,optional" frugal:"4,optional,bool" json:"add_probe_filters,omitempty"` - VotherJoinConjunct *exprs.TExpr `thrift:"vother_join_conjunct,5,optional" frugal:"5,optional,exprs.TExpr" json:"vother_join_conjunct,omitempty"` - HashOutputSlotIds []types.TSlotId `thrift:"hash_output_slot_ids,6,optional" frugal:"6,optional,list" json:"hash_output_slot_ids,omitempty"` - SrcExprList []*exprs.TExpr `thrift:"srcExprList,7,optional" frugal:"7,optional,list" json:"srcExprList,omitempty"` - VoutputTupleId *types.TTupleId `thrift:"voutput_tuple_id,8,optional" frugal:"8,optional,i32" json:"voutput_tuple_id,omitempty"` - VintermediateTupleIdList []types.TTupleId `thrift:"vintermediate_tuple_id_list,9,optional" frugal:"9,optional,list" json:"vintermediate_tuple_id_list,omitempty"` - IsBroadcastJoin *bool `thrift:"is_broadcast_join,10,optional" frugal:"10,optional,bool" json:"is_broadcast_join,omitempty"` - IsMark *bool `thrift:"is_mark,11,optional" frugal:"11,optional,bool" json:"is_mark,omitempty"` + JoinOp TJoinOp `thrift:"join_op,1,required" frugal:"1,required,TJoinOp" json:"join_op"` + EqJoinConjuncts []*TEqJoinCondition `thrift:"eq_join_conjuncts,2,required" frugal:"2,required,list" json:"eq_join_conjuncts"` + OtherJoinConjuncts []*exprs.TExpr `thrift:"other_join_conjuncts,3,optional" frugal:"3,optional,list" json:"other_join_conjuncts,omitempty"` + AddProbeFilters *bool `thrift:"add_probe_filters,4,optional" frugal:"4,optional,bool" json:"add_probe_filters,omitempty"` + VotherJoinConjunct *exprs.TExpr `thrift:"vother_join_conjunct,5,optional" frugal:"5,optional,exprs.TExpr" json:"vother_join_conjunct,omitempty"` + HashOutputSlotIds []types.TSlotId `thrift:"hash_output_slot_ids,6,optional" frugal:"6,optional,list" json:"hash_output_slot_ids,omitempty"` + SrcExprList []*exprs.TExpr `thrift:"srcExprList,7,optional" frugal:"7,optional,list" json:"srcExprList,omitempty"` + VoutputTupleId *types.TTupleId `thrift:"voutput_tuple_id,8,optional" frugal:"8,optional,i32" json:"voutput_tuple_id,omitempty"` + VintermediateTupleIdList []types.TTupleId `thrift:"vintermediate_tuple_id_list,9,optional" frugal:"9,optional,list" json:"vintermediate_tuple_id_list,omitempty"` + IsBroadcastJoin *bool `thrift:"is_broadcast_join,10,optional" frugal:"10,optional,bool" json:"is_broadcast_join,omitempty"` + IsMark *bool `thrift:"is_mark,11,optional" frugal:"11,optional,bool" json:"is_mark,omitempty"` + DistType *TJoinDistributionType `thrift:"dist_type,12,optional" frugal:"12,optional,TJoinDistributionType" json:"dist_type,omitempty"` + MarkJoinConjuncts []*exprs.TExpr `thrift:"mark_join_conjuncts,13,optional" frugal:"13,optional,list" json:"mark_join_conjuncts,omitempty"` + UseSpecificProjections *bool `thrift:"use_specific_projections,14,optional" frugal:"14,optional,bool" json:"use_specific_projections,omitempty"` } func NewTHashJoinNode() *THashJoinNode { @@ -25565,7 +32418,6 @@ func NewTHashJoinNode() *THashJoinNode { } func (p *THashJoinNode) InitDefault() { - *p = THashJoinNode{} } func (p *THashJoinNode) GetJoinOp() (v TJoinOp) { @@ -25656,6 +32508,33 @@ func (p *THashJoinNode) GetIsMark() (v bool) { } return *p.IsMark } + +var THashJoinNode_DistType_DEFAULT TJoinDistributionType + +func (p *THashJoinNode) GetDistType() (v TJoinDistributionType) { + if !p.IsSetDistType() { + return THashJoinNode_DistType_DEFAULT + } + return *p.DistType +} + +var THashJoinNode_MarkJoinConjuncts_DEFAULT []*exprs.TExpr + +func (p *THashJoinNode) GetMarkJoinConjuncts() (v []*exprs.TExpr) { + if !p.IsSetMarkJoinConjuncts() { + return THashJoinNode_MarkJoinConjuncts_DEFAULT + } + return p.MarkJoinConjuncts +} + +var THashJoinNode_UseSpecificProjections_DEFAULT bool + +func (p *THashJoinNode) GetUseSpecificProjections() (v bool) { + if !p.IsSetUseSpecificProjections() { + return THashJoinNode_UseSpecificProjections_DEFAULT + } + return *p.UseSpecificProjections +} func (p *THashJoinNode) SetJoinOp(val TJoinOp) { p.JoinOp = val } @@ -25689,6 +32568,15 @@ func (p *THashJoinNode) SetIsBroadcastJoin(val *bool) { func (p *THashJoinNode) SetIsMark(val *bool) { p.IsMark = val } +func (p *THashJoinNode) SetDistType(val *TJoinDistributionType) { + p.DistType = val +} +func (p *THashJoinNode) SetMarkJoinConjuncts(val []*exprs.TExpr) { + p.MarkJoinConjuncts = val +} +func (p *THashJoinNode) SetUseSpecificProjections(val *bool) { + p.UseSpecificProjections = val +} var fieldIDToName_THashJoinNode = map[int16]string{ 1: "join_op", @@ -25702,6 +32590,9 @@ var fieldIDToName_THashJoinNode = map[int16]string{ 9: "vintermediate_tuple_id_list", 10: "is_broadcast_join", 11: "is_mark", + 12: "dist_type", + 13: "mark_join_conjuncts", + 14: "use_specific_projections", } func (p *THashJoinNode) IsSetOtherJoinConjuncts() bool { @@ -25740,6 +32631,18 @@ func (p *THashJoinNode) IsSetIsMark() bool { return p.IsMark != nil } +func (p *THashJoinNode) IsSetDistType() bool { + return p.DistType != nil +} + +func (p *THashJoinNode) IsSetMarkJoinConjuncts() bool { + return p.MarkJoinConjuncts != nil +} + +func (p *THashJoinNode) IsSetUseSpecificProjections() bool { + return p.UseSpecificProjections != nil +} + func (p *THashJoinNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -25767,10 +32670,8 @@ func (p *THashJoinNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetJoinOp = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -25778,107 +32679,110 @@ func (p *THashJoinNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetEqJoinConjuncts = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRUCT { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.LIST { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.BOOL { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.LIST { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -25915,78 +32819,89 @@ RequiredFieldNotSetError: } func (p *THashJoinNode) ReadField1(iprot thrift.TProtocol) error { + + var _field TJoinOp if v, err := iprot.ReadI32(); err != nil { return err } else { - p.JoinOp = TJoinOp(v) + _field = TJoinOp(v) } + p.JoinOp = _field return nil } - func (p *THashJoinNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.EqJoinConjuncts = make([]*TEqJoinCondition, 0, size) + _field := make([]*TEqJoinCondition, 0, size) + values := make([]TEqJoinCondition, size) for i := 0; i < size; i++ { - _elem := NewTEqJoinCondition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.EqJoinConjuncts = append(p.EqJoinConjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.EqJoinConjuncts = _field return nil } - func (p *THashJoinNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OtherJoinConjuncts = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.OtherJoinConjuncts = append(p.OtherJoinConjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OtherJoinConjuncts = _field return nil } - func (p *THashJoinNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.AddProbeFilters = &v + _field = &v } + p.AddProbeFilters = _field return nil } - func (p *THashJoinNode) ReadField5(iprot thrift.TProtocol) error { - p.VotherJoinConjunct = exprs.NewTExpr() - if err := p.VotherJoinConjunct.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.VotherJoinConjunct = _field return nil } - func (p *THashJoinNode) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.HashOutputSlotIds = make([]types.TSlotId, 0, size) + _field := make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { + var _elem types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err @@ -25994,50 +32909,56 @@ func (p *THashJoinNode) ReadField6(iprot thrift.TProtocol) error { _elem = v } - p.HashOutputSlotIds = append(p.HashOutputSlotIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.HashOutputSlotIds = _field return nil } - func (p *THashJoinNode) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SrcExprList = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SrcExprList = append(p.SrcExprList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SrcExprList = _field return nil } - func (p *THashJoinNode) ReadField8(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VoutputTupleId = &v + _field = &v } + p.VoutputTupleId = _field return nil } - func (p *THashJoinNode) ReadField9(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.VintermediateTupleIdList = make([]types.TTupleId, 0, size) + _field := make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err @@ -26045,29 +32966,80 @@ func (p *THashJoinNode) ReadField9(iprot thrift.TProtocol) error { _elem = v } - p.VintermediateTupleIdList = append(p.VintermediateTupleIdList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.VintermediateTupleIdList = _field return nil } - func (p *THashJoinNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsBroadcastJoin = &v + _field = &v } + p.IsBroadcastJoin = _field return nil } - func (p *THashJoinNode) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsMark = &v + _field = &v } + p.IsMark = _field + return nil +} +func (p *THashJoinNode) ReadField12(iprot thrift.TProtocol) error { + + var _field *TJoinDistributionType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TJoinDistributionType(v) + _field = &tmp + } + p.DistType = _field + return nil +} +func (p *THashJoinNode) ReadField13(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.MarkJoinConjuncts = _field + return nil +} +func (p *THashJoinNode) ReadField14(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.UseSpecificProjections = _field return nil } @@ -26121,7 +33093,18 @@ func (p *THashJoinNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 11 goto WriteFieldError } - + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -26385,11 +33368,77 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } +func (p *THashJoinNode) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDistType() { + if err = oprot.WriteFieldBegin("dist_type", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.DistType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *THashJoinNode) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetMarkJoinConjuncts() { + if err = oprot.WriteFieldBegin("mark_join_conjuncts", thrift.LIST, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.MarkJoinConjuncts)); err != nil { + return err + } + for _, v := range p.MarkJoinConjuncts { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *THashJoinNode) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetUseSpecificProjections() { + if err = oprot.WriteFieldBegin("use_specific_projections", thrift.BOOL, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.UseSpecificProjections); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + func (p *THashJoinNode) String() string { if p == nil { return "" } return fmt.Sprintf("THashJoinNode(%+v)", *p) + } func (p *THashJoinNode) DeepEqual(ano *THashJoinNode) bool { @@ -26431,6 +33480,15 @@ func (p *THashJoinNode) DeepEqual(ano *THashJoinNode) bool { if !p.Field11DeepEqual(ano.IsMark) { return false } + if !p.Field12DeepEqual(ano.DistType) { + return false + } + if !p.Field13DeepEqual(ano.MarkJoinConjuncts) { + return false + } + if !p.Field14DeepEqual(ano.UseSpecificProjections) { + return false + } return true } @@ -26561,6 +33619,43 @@ func (p *THashJoinNode) Field11DeepEqual(src *bool) bool { } return true } +func (p *THashJoinNode) Field12DeepEqual(src *TJoinDistributionType) bool { + + if p.DistType == src { + return true + } else if p.DistType == nil || src == nil { + return false + } + if *p.DistType != *src { + return false + } + return true +} +func (p *THashJoinNode) Field13DeepEqual(src []*exprs.TExpr) bool { + + if len(p.MarkJoinConjuncts) != len(src) { + return false + } + for i, v := range p.MarkJoinConjuncts { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *THashJoinNode) Field14DeepEqual(src *bool) bool { + + if p.UseSpecificProjections == src { + return true + } else if p.UseSpecificProjections == nil || src == nil { + return false + } + if *p.UseSpecificProjections != *src { + return false + } + return true +} type TNestedLoopJoinNode struct { JoinOp TJoinOp `thrift:"join_op,1,required" frugal:"1,required,TJoinOp" json:"join_op"` @@ -26571,6 +33666,8 @@ type TNestedLoopJoinNode struct { VjoinConjunct *exprs.TExpr `thrift:"vjoin_conjunct,6,optional" frugal:"6,optional,exprs.TExpr" json:"vjoin_conjunct,omitempty"` IsMark *bool `thrift:"is_mark,7,optional" frugal:"7,optional,bool" json:"is_mark,omitempty"` JoinConjuncts []*exprs.TExpr `thrift:"join_conjuncts,8,optional" frugal:"8,optional,list" json:"join_conjuncts,omitempty"` + MarkJoinConjuncts []*exprs.TExpr `thrift:"mark_join_conjuncts,9,optional" frugal:"9,optional,list" json:"mark_join_conjuncts,omitempty"` + UseSpecificProjections *bool `thrift:"use_specific_projections,10,optional" frugal:"10,optional,bool" json:"use_specific_projections,omitempty"` } func NewTNestedLoopJoinNode() *TNestedLoopJoinNode { @@ -26578,7 +33675,6 @@ func NewTNestedLoopJoinNode() *TNestedLoopJoinNode { } func (p *TNestedLoopJoinNode) InitDefault() { - *p = TNestedLoopJoinNode{} } func (p *TNestedLoopJoinNode) GetJoinOp() (v TJoinOp) { @@ -26647,6 +33743,24 @@ func (p *TNestedLoopJoinNode) GetJoinConjuncts() (v []*exprs.TExpr) { } return p.JoinConjuncts } + +var TNestedLoopJoinNode_MarkJoinConjuncts_DEFAULT []*exprs.TExpr + +func (p *TNestedLoopJoinNode) GetMarkJoinConjuncts() (v []*exprs.TExpr) { + if !p.IsSetMarkJoinConjuncts() { + return TNestedLoopJoinNode_MarkJoinConjuncts_DEFAULT + } + return p.MarkJoinConjuncts +} + +var TNestedLoopJoinNode_UseSpecificProjections_DEFAULT bool + +func (p *TNestedLoopJoinNode) GetUseSpecificProjections() (v bool) { + if !p.IsSetUseSpecificProjections() { + return TNestedLoopJoinNode_UseSpecificProjections_DEFAULT + } + return *p.UseSpecificProjections +} func (p *TNestedLoopJoinNode) SetJoinOp(val TJoinOp) { p.JoinOp = val } @@ -26671,16 +33785,24 @@ func (p *TNestedLoopJoinNode) SetIsMark(val *bool) { func (p *TNestedLoopJoinNode) SetJoinConjuncts(val []*exprs.TExpr) { p.JoinConjuncts = val } +func (p *TNestedLoopJoinNode) SetMarkJoinConjuncts(val []*exprs.TExpr) { + p.MarkJoinConjuncts = val +} +func (p *TNestedLoopJoinNode) SetUseSpecificProjections(val *bool) { + p.UseSpecificProjections = val +} var fieldIDToName_TNestedLoopJoinNode = map[int16]string{ - 1: "join_op", - 2: "srcExprList", - 3: "voutput_tuple_id", - 4: "vintermediate_tuple_id_list", - 5: "is_output_left_side_only", - 6: "vjoin_conjunct", - 7: "is_mark", - 8: "join_conjuncts", + 1: "join_op", + 2: "srcExprList", + 3: "voutput_tuple_id", + 4: "vintermediate_tuple_id_list", + 5: "is_output_left_side_only", + 6: "vjoin_conjunct", + 7: "is_mark", + 8: "join_conjuncts", + 9: "mark_join_conjuncts", + 10: "use_specific_projections", } func (p *TNestedLoopJoinNode) IsSetSrcExprList() bool { @@ -26711,6 +33833,14 @@ func (p *TNestedLoopJoinNode) IsSetJoinConjuncts() bool { return p.JoinConjuncts != nil } +func (p *TNestedLoopJoinNode) IsSetMarkJoinConjuncts() bool { + return p.MarkJoinConjuncts != nil +} + +func (p *TNestedLoopJoinNode) IsSetUseSpecificProjections() bool { + return p.UseSpecificProjections != nil +} + func (p *TNestedLoopJoinNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -26737,87 +33867,86 @@ func (p *TNestedLoopJoinNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetJoinOp = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRUCT { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.LIST { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.LIST { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -26849,50 +33978,58 @@ RequiredFieldNotSetError: } func (p *TNestedLoopJoinNode) ReadField1(iprot thrift.TProtocol) error { + + var _field TJoinOp if v, err := iprot.ReadI32(); err != nil { return err } else { - p.JoinOp = TJoinOp(v) + _field = TJoinOp(v) } + p.JoinOp = _field return nil } - func (p *TNestedLoopJoinNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SrcExprList = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SrcExprList = append(p.SrcExprList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SrcExprList = _field return nil } - func (p *TNestedLoopJoinNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.VoutputTupleId = &v + _field = &v } + p.VoutputTupleId = _field return nil } - func (p *TNestedLoopJoinNode) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.VintermediateTupleIdList = make([]types.TTupleId, 0, size) + _field := make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err @@ -26900,57 +34037,99 @@ func (p *TNestedLoopJoinNode) ReadField4(iprot thrift.TProtocol) error { _elem = v } - p.VintermediateTupleIdList = append(p.VintermediateTupleIdList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.VintermediateTupleIdList = _field return nil } - func (p *TNestedLoopJoinNode) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsOutputLeftSideOnly = &v + _field = &v } + p.IsOutputLeftSideOnly = _field return nil } - func (p *TNestedLoopJoinNode) ReadField6(iprot thrift.TProtocol) error { - p.VjoinConjunct = exprs.NewTExpr() - if err := p.VjoinConjunct.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.VjoinConjunct = _field return nil } - func (p *TNestedLoopJoinNode) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsMark = &v + _field = &v } + p.IsMark = _field return nil } - func (p *TNestedLoopJoinNode) ReadField8(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.JoinConjuncts = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.JoinConjuncts = _field + return nil +} +func (p *TNestedLoopJoinNode) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.JoinConjuncts = append(p.JoinConjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.MarkJoinConjuncts = _field + return nil +} +func (p *TNestedLoopJoinNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.UseSpecificProjections = _field return nil } @@ -26992,7 +34171,14 @@ func (p *TNestedLoopJoinNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -27185,11 +34371,58 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } +func (p *TNestedLoopJoinNode) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetMarkJoinConjuncts() { + if err = oprot.WriteFieldBegin("mark_join_conjuncts", thrift.LIST, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.MarkJoinConjuncts)); err != nil { + return err + } + for _, v := range p.MarkJoinConjuncts { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TNestedLoopJoinNode) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetUseSpecificProjections() { + if err = oprot.WriteFieldBegin("use_specific_projections", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.UseSpecificProjections); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + func (p *TNestedLoopJoinNode) String() string { if p == nil { return "" } return fmt.Sprintf("TNestedLoopJoinNode(%+v)", *p) + } func (p *TNestedLoopJoinNode) DeepEqual(ano *TNestedLoopJoinNode) bool { @@ -27222,6 +34455,12 @@ func (p *TNestedLoopJoinNode) DeepEqual(ano *TNestedLoopJoinNode) bool { if !p.Field8DeepEqual(ano.JoinConjuncts) { return false } + if !p.Field9DeepEqual(ano.MarkJoinConjuncts) { + return false + } + if !p.Field10DeepEqual(ano.UseSpecificProjections) { + return false + } return true } @@ -27314,6 +34553,31 @@ func (p *TNestedLoopJoinNode) Field8DeepEqual(src []*exprs.TExpr) bool { } return true } +func (p *TNestedLoopJoinNode) Field9DeepEqual(src []*exprs.TExpr) bool { + + if len(p.MarkJoinConjuncts) != len(src) { + return false + } + for i, v := range p.MarkJoinConjuncts { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TNestedLoopJoinNode) Field10DeepEqual(src *bool) bool { + + if p.UseSpecificProjections == src { + return true + } else if p.UseSpecificProjections == nil || src == nil { + return false + } + if *p.UseSpecificProjections != *src { + return false + } + return true +} type TMergeJoinNode struct { CmpConjuncts []*TEqJoinCondition `thrift:"cmp_conjuncts,1,required" frugal:"1,required,list" json:"cmp_conjuncts"` @@ -27325,7 +34589,6 @@ func NewTMergeJoinNode() *TMergeJoinNode { } func (p *TMergeJoinNode) InitDefault() { - *p = TMergeJoinNode{} } func (p *TMergeJoinNode) GetCmpConjuncts() (v []*TEqJoinCondition) { @@ -27382,27 +34645,22 @@ func (p *TMergeJoinNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCmpConjuncts = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -27438,38 +34696,45 @@ func (p *TMergeJoinNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.CmpConjuncts = make([]*TEqJoinCondition, 0, size) + _field := make([]*TEqJoinCondition, 0, size) + values := make([]TEqJoinCondition, size) for i := 0; i < size; i++ { - _elem := NewTEqJoinCondition() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.CmpConjuncts = append(p.CmpConjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.CmpConjuncts = _field return nil } - func (p *TMergeJoinNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OtherJoinConjuncts = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.OtherJoinConjuncts = append(p.OtherJoinConjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OtherJoinConjuncts = _field return nil } @@ -27487,7 +34752,6 @@ func (p *TMergeJoinNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -27563,6 +34827,7 @@ func (p *TMergeJoinNode) String() string { return "" } return fmt.Sprintf("TMergeJoinNode(%+v)", *p) + } func (p *TMergeJoinNode) DeepEqual(ano *TMergeJoinNode) bool { @@ -27616,6 +34881,8 @@ type TAggregationNode struct { UseStreamingPreaggregation *bool `thrift:"use_streaming_preaggregation,6,optional" frugal:"6,optional,bool" json:"use_streaming_preaggregation,omitempty"` AggSortInfos []*TSortInfo `thrift:"agg_sort_infos,7,optional" frugal:"7,optional,list" json:"agg_sort_infos,omitempty"` IsFirstPhase *bool `thrift:"is_first_phase,8,optional" frugal:"8,optional,bool" json:"is_first_phase,omitempty"` + IsColocate *bool `thrift:"is_colocate,9,optional" frugal:"9,optional,bool" json:"is_colocate,omitempty"` + AggSortInfoByGroupKey *TSortInfo `thrift:"agg_sort_info_by_group_key,10,optional" frugal:"10,optional,TSortInfo" json:"agg_sort_info_by_group_key,omitempty"` } func NewTAggregationNode() *TAggregationNode { @@ -27623,7 +34890,6 @@ func NewTAggregationNode() *TAggregationNode { } func (p *TAggregationNode) InitDefault() { - *p = TAggregationNode{} } var TAggregationNode_GroupingExprs_DEFAULT []*exprs.TExpr @@ -27677,6 +34943,24 @@ func (p *TAggregationNode) GetIsFirstPhase() (v bool) { } return *p.IsFirstPhase } + +var TAggregationNode_IsColocate_DEFAULT bool + +func (p *TAggregationNode) GetIsColocate() (v bool) { + if !p.IsSetIsColocate() { + return TAggregationNode_IsColocate_DEFAULT + } + return *p.IsColocate +} + +var TAggregationNode_AggSortInfoByGroupKey_DEFAULT *TSortInfo + +func (p *TAggregationNode) GetAggSortInfoByGroupKey() (v *TSortInfo) { + if !p.IsSetAggSortInfoByGroupKey() { + return TAggregationNode_AggSortInfoByGroupKey_DEFAULT + } + return p.AggSortInfoByGroupKey +} func (p *TAggregationNode) SetGroupingExprs(val []*exprs.TExpr) { p.GroupingExprs = val } @@ -27701,16 +34985,24 @@ func (p *TAggregationNode) SetAggSortInfos(val []*TSortInfo) { func (p *TAggregationNode) SetIsFirstPhase(val *bool) { p.IsFirstPhase = val } +func (p *TAggregationNode) SetIsColocate(val *bool) { + p.IsColocate = val +} +func (p *TAggregationNode) SetAggSortInfoByGroupKey(val *TSortInfo) { + p.AggSortInfoByGroupKey = val +} var fieldIDToName_TAggregationNode = map[int16]string{ - 1: "grouping_exprs", - 2: "aggregate_functions", - 3: "intermediate_tuple_id", - 4: "output_tuple_id", - 5: "need_finalize", - 6: "use_streaming_preaggregation", - 7: "agg_sort_infos", - 8: "is_first_phase", + 1: "grouping_exprs", + 2: "aggregate_functions", + 3: "intermediate_tuple_id", + 4: "output_tuple_id", + 5: "need_finalize", + 6: "use_streaming_preaggregation", + 7: "agg_sort_infos", + 8: "is_first_phase", + 9: "is_colocate", + 10: "agg_sort_info_by_group_key", } func (p *TAggregationNode) IsSetGroupingExprs() bool { @@ -27729,6 +35021,14 @@ func (p *TAggregationNode) IsSetIsFirstPhase() bool { return p.IsFirstPhase != nil } +func (p *TAggregationNode) IsSetIsColocate() bool { + return p.IsColocate != nil +} + +func (p *TAggregationNode) IsSetAggSortInfoByGroupKey() bool { + return p.AggSortInfoByGroupKey != nil +} + func (p *TAggregationNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -27757,10 +35057,8 @@ func (p *TAggregationNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -27768,10 +35066,8 @@ func (p *TAggregationNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetAggregateFunctions = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -27779,10 +35075,8 @@ func (p *TAggregationNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIntermediateTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -27790,10 +35084,8 @@ func (p *TAggregationNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOutputTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { @@ -27801,47 +35093,54 @@ func (p *TAggregationNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNeedFinalize = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.BOOL { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -27892,103 +35191,142 @@ func (p *TAggregationNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.GroupingExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.GroupingExprs = append(p.GroupingExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.GroupingExprs = _field return nil } - func (p *TAggregationNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.AggregateFunctions = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.AggregateFunctions = append(p.AggregateFunctions, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.AggregateFunctions = _field return nil } - func (p *TAggregationNode) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.IntermediateTupleId = v + _field = v } + p.IntermediateTupleId = _field return nil } - func (p *TAggregationNode) ReadField4(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = v + _field = v } + p.OutputTupleId = _field return nil } - func (p *TAggregationNode) ReadField5(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.NeedFinalize = v + _field = v } + p.NeedFinalize = _field return nil } - func (p *TAggregationNode) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseStreamingPreaggregation = &v + _field = &v } + p.UseStreamingPreaggregation = _field return nil } - func (p *TAggregationNode) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.AggSortInfos = make([]*TSortInfo, 0, size) + _field := make([]*TSortInfo, 0, size) + values := make([]TSortInfo, size) for i := 0; i < size; i++ { - _elem := NewTSortInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.AggSortInfos = append(p.AggSortInfos, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.AggSortInfos = _field return nil } - func (p *TAggregationNode) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsFirstPhase = _field + return nil +} +func (p *TAggregationNode) ReadField9(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsFirstPhase = &v + _field = &v + } + p.IsColocate = _field + return nil +} +func (p *TAggregationNode) ReadField10(iprot thrift.TProtocol) error { + _field := NewTSortInfo() + if err := _field.Read(iprot); err != nil { + return err } + p.AggSortInfoByGroupKey = _field return nil } @@ -28030,7 +35368,14 @@ func (p *TAggregationNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } - + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -28217,11 +35562,50 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } +func (p *TAggregationNode) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsColocate() { + if err = oprot.WriteFieldBegin("is_colocate", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsColocate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TAggregationNode) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetAggSortInfoByGroupKey() { + if err = oprot.WriteFieldBegin("agg_sort_info_by_group_key", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.AggSortInfoByGroupKey.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + func (p *TAggregationNode) String() string { if p == nil { return "" } return fmt.Sprintf("TAggregationNode(%+v)", *p) + } func (p *TAggregationNode) DeepEqual(ano *TAggregationNode) bool { @@ -28254,6 +35638,12 @@ func (p *TAggregationNode) DeepEqual(ano *TAggregationNode) bool { if !p.Field8DeepEqual(ano.IsFirstPhase) { return false } + if !p.Field9DeepEqual(ano.IsColocate) { + return false + } + if !p.Field10DeepEqual(ano.AggSortInfoByGroupKey) { + return false + } return true } @@ -28341,6 +35731,25 @@ func (p *TAggregationNode) Field8DeepEqual(src *bool) bool { } return true } +func (p *TAggregationNode) Field9DeepEqual(src *bool) bool { + + if p.IsColocate == src { + return true + } else if p.IsColocate == nil || src == nil { + return false + } + if *p.IsColocate != *src { + return false + } + return true +} +func (p *TAggregationNode) Field10DeepEqual(src *TSortInfo) bool { + + if !p.AggSortInfoByGroupKey.DeepEqual(src) { + return false + } + return true +} type TRepeatNode struct { OutputTupleId types.TTupleId `thrift:"output_tuple_id,1,required" frugal:"1,required,i32" json:"output_tuple_id"` @@ -28356,7 +35765,6 @@ func NewTRepeatNode() *TRepeatNode { } func (p *TRepeatNode) InitDefault() { - *p = TRepeatNode{} } func (p *TRepeatNode) GetOutputTupleId() (v types.TTupleId) { @@ -28441,10 +35849,8 @@ func (p *TRepeatNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOutputTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -28452,10 +35858,8 @@ func (p *TRepeatNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSlotIdSetList = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -28463,10 +35867,8 @@ func (p *TRepeatNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRepeatIdList = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { @@ -28474,10 +35876,8 @@ func (p *TRepeatNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetGroupingList = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.SET { @@ -28485,10 +35885,8 @@ func (p *TRepeatNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetAllSlotIds = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { @@ -28496,17 +35894,14 @@ func (p *TRepeatNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -28563,20 +35958,22 @@ RequiredFieldNotSetError: } func (p *TRepeatNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = v + _field = v } + p.OutputTupleId = _field return nil } - func (p *TRepeatNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SlotIdSetList = make([][]types.TSlotId, 0, size) + _field := make([][]types.TSlotId, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadSetBegin() if err != nil { @@ -28584,6 +35981,7 @@ func (p *TRepeatNode) ReadField2(iprot thrift.TProtocol) error { } _elem := make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { + var _elem1 types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err @@ -28597,21 +35995,22 @@ func (p *TRepeatNode) ReadField2(iprot thrift.TProtocol) error { return err } - p.SlotIdSetList = append(p.SlotIdSetList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SlotIdSetList = _field return nil } - func (p *TRepeatNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RepeatIdList = make([]int64, 0, size) + _field := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -28619,20 +36018,20 @@ func (p *TRepeatNode) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.RepeatIdList = append(p.RepeatIdList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RepeatIdList = _field return nil } - func (p *TRepeatNode) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.GroupingList = make([][]int64, 0, size) + _field := make([][]int64, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { @@ -28640,6 +36039,7 @@ func (p *TRepeatNode) ReadField4(iprot thrift.TProtocol) error { } _elem := make([]int64, 0, size) for i := 0; i < size; i++ { + var _elem1 int64 if v, err := iprot.ReadI64(); err != nil { return err @@ -28653,21 +36053,22 @@ func (p *TRepeatNode) ReadField4(iprot thrift.TProtocol) error { return err } - p.GroupingList = append(p.GroupingList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.GroupingList = _field return nil } - func (p *TRepeatNode) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadSetBegin() if err != nil { return err } - p.AllSlotIds = make([]types.TSlotId, 0, size) + _field := make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { + var _elem types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err @@ -28675,31 +36076,35 @@ func (p *TRepeatNode) ReadField5(iprot thrift.TProtocol) error { _elem = v } - p.AllSlotIds = append(p.AllSlotIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadSetEnd(); err != nil { return err } + p.AllSlotIds = _field return nil } - func (p *TRepeatNode) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Exprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Exprs = append(p.Exprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Exprs = _field return nil } @@ -28733,7 +36138,6 @@ func (p *TRepeatNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -28939,6 +36343,7 @@ func (p *TRepeatNode) String() string { return "" } return fmt.Sprintf("TRepeatNode(%+v)", *p) + } func (p *TRepeatNode) DeepEqual(ano *TRepeatNode) bool { @@ -29063,7 +36468,6 @@ func NewTPreAggregationNode() *TPreAggregationNode { } func (p *TPreAggregationNode) InitDefault() { - *p = TPreAggregationNode{} } func (p *TPreAggregationNode) GetGroupExprs() (v []*exprs.TExpr) { @@ -29112,10 +36516,8 @@ func (p *TPreAggregationNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetGroupExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -29123,17 +36525,14 @@ func (p *TPreAggregationNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetAggregateExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -29174,38 +36573,45 @@ func (p *TPreAggregationNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.GroupExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.GroupExprs = append(p.GroupExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.GroupExprs = _field return nil } - func (p *TPreAggregationNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.AggregateExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.AggregateExprs = append(p.AggregateExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.AggregateExprs = _field return nil } @@ -29223,7 +36629,6 @@ func (p *TPreAggregationNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -29297,6 +36702,7 @@ func (p *TPreAggregationNode) String() string { return "" } return fmt.Sprintf("TPreAggregationNode(%+v)", *p) + } func (p *TPreAggregationNode) DeepEqual(ano *TPreAggregationNode) bool { @@ -29342,11 +36748,15 @@ func (p *TPreAggregationNode) Field2DeepEqual(src []*exprs.TExpr) bool { } type TSortNode struct { - SortInfo *TSortInfo `thrift:"sort_info,1,required" frugal:"1,required,TSortInfo" json:"sort_info"` - UseTopN bool `thrift:"use_top_n,2,required" frugal:"2,required,bool" json:"use_top_n"` - Offset *int64 `thrift:"offset,3,optional" frugal:"3,optional,i64" json:"offset,omitempty"` - IsDefaultLimit *bool `thrift:"is_default_limit,6,optional" frugal:"6,optional,bool" json:"is_default_limit,omitempty"` - UseTopnOpt *bool `thrift:"use_topn_opt,7,optional" frugal:"7,optional,bool" json:"use_topn_opt,omitempty"` + SortInfo *TSortInfo `thrift:"sort_info,1,required" frugal:"1,required,TSortInfo" json:"sort_info"` + UseTopN bool `thrift:"use_top_n,2,required" frugal:"2,required,bool" json:"use_top_n"` + Offset *int64 `thrift:"offset,3,optional" frugal:"3,optional,i64" json:"offset,omitempty"` + IsDefaultLimit *bool `thrift:"is_default_limit,6,optional" frugal:"6,optional,bool" json:"is_default_limit,omitempty"` + UseTopnOpt *bool `thrift:"use_topn_opt,7,optional" frugal:"7,optional,bool" json:"use_topn_opt,omitempty"` + MergeByExchange *bool `thrift:"merge_by_exchange,8,optional" frugal:"8,optional,bool" json:"merge_by_exchange,omitempty"` + IsAnalyticSort *bool `thrift:"is_analytic_sort,9,optional" frugal:"9,optional,bool" json:"is_analytic_sort,omitempty"` + IsColocate *bool `thrift:"is_colocate,10,optional" frugal:"10,optional,bool" json:"is_colocate,omitempty"` + Algorithm *TSortAlgorithm `thrift:"algorithm,11,optional" frugal:"11,optional,TSortAlgorithm" json:"algorithm,omitempty"` } func NewTSortNode() *TSortNode { @@ -29354,7 +36764,6 @@ func NewTSortNode() *TSortNode { } func (p *TSortNode) InitDefault() { - *p = TSortNode{} } var TSortNode_SortInfo_DEFAULT *TSortInfo @@ -29396,6 +36805,42 @@ func (p *TSortNode) GetUseTopnOpt() (v bool) { } return *p.UseTopnOpt } + +var TSortNode_MergeByExchange_DEFAULT bool + +func (p *TSortNode) GetMergeByExchange() (v bool) { + if !p.IsSetMergeByExchange() { + return TSortNode_MergeByExchange_DEFAULT + } + return *p.MergeByExchange +} + +var TSortNode_IsAnalyticSort_DEFAULT bool + +func (p *TSortNode) GetIsAnalyticSort() (v bool) { + if !p.IsSetIsAnalyticSort() { + return TSortNode_IsAnalyticSort_DEFAULT + } + return *p.IsAnalyticSort +} + +var TSortNode_IsColocate_DEFAULT bool + +func (p *TSortNode) GetIsColocate() (v bool) { + if !p.IsSetIsColocate() { + return TSortNode_IsColocate_DEFAULT + } + return *p.IsColocate +} + +var TSortNode_Algorithm_DEFAULT TSortAlgorithm + +func (p *TSortNode) GetAlgorithm() (v TSortAlgorithm) { + if !p.IsSetAlgorithm() { + return TSortNode_Algorithm_DEFAULT + } + return *p.Algorithm +} func (p *TSortNode) SetSortInfo(val *TSortInfo) { p.SortInfo = val } @@ -29411,13 +36856,29 @@ func (p *TSortNode) SetIsDefaultLimit(val *bool) { func (p *TSortNode) SetUseTopnOpt(val *bool) { p.UseTopnOpt = val } +func (p *TSortNode) SetMergeByExchange(val *bool) { + p.MergeByExchange = val +} +func (p *TSortNode) SetIsAnalyticSort(val *bool) { + p.IsAnalyticSort = val +} +func (p *TSortNode) SetIsColocate(val *bool) { + p.IsColocate = val +} +func (p *TSortNode) SetAlgorithm(val *TSortAlgorithm) { + p.Algorithm = val +} var fieldIDToName_TSortNode = map[int16]string{ - 1: "sort_info", - 2: "use_top_n", - 3: "offset", - 6: "is_default_limit", - 7: "use_topn_opt", + 1: "sort_info", + 2: "use_top_n", + 3: "offset", + 6: "is_default_limit", + 7: "use_topn_opt", + 8: "merge_by_exchange", + 9: "is_analytic_sort", + 10: "is_colocate", + 11: "algorithm", } func (p *TSortNode) IsSetSortInfo() bool { @@ -29436,6 +36897,22 @@ func (p *TSortNode) IsSetUseTopnOpt() bool { return p.UseTopnOpt != nil } +func (p *TSortNode) IsSetMergeByExchange() bool { + return p.MergeByExchange != nil +} + +func (p *TSortNode) IsSetIsAnalyticSort() bool { + return p.IsAnalyticSort != nil +} + +func (p *TSortNode) IsSetIsColocate() bool { + return p.IsColocate != nil +} + +func (p *TSortNode) IsSetAlgorithm() bool { + return p.Algorithm != nil +} + func (p *TSortNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -29463,10 +36940,8 @@ func (p *TSortNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSortInfo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { @@ -29474,47 +36949,70 @@ func (p *TSortNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUseTopN = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I32 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -29551,46 +37049,100 @@ RequiredFieldNotSetError: } func (p *TSortNode) ReadField1(iprot thrift.TProtocol) error { - p.SortInfo = NewTSortInfo() - if err := p.SortInfo.Read(iprot); err != nil { + _field := NewTSortInfo() + if err := _field.Read(iprot); err != nil { return err } + p.SortInfo = _field return nil } - func (p *TSortNode) ReadField2(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTopN = v + _field = v } + p.UseTopN = _field return nil } - func (p *TSortNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Offset = &v + _field = &v } + p.Offset = _field return nil } - func (p *TSortNode) ReadField6(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDefaultLimit = &v + _field = &v } + p.IsDefaultLimit = _field return nil } - func (p *TSortNode) ReadField7(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.UseTopnOpt = _field + return nil +} +func (p *TSortNode) ReadField8(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.MergeByExchange = _field + return nil +} +func (p *TSortNode) ReadField9(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsAnalyticSort = _field + return nil +} +func (p *TSortNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.UseTopnOpt = &v + _field = &v + } + p.IsColocate = _field + return nil +} +func (p *TSortNode) ReadField11(iprot thrift.TProtocol) error { + + var _field *TSortAlgorithm + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TSortAlgorithm(v) + _field = &tmp } + p.Algorithm = _field return nil } @@ -29620,7 +37172,22 @@ func (p *TSortNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 7 goto WriteFieldError } - + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -29730,11 +37297,88 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) } +func (p *TSortNode) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetMergeByExchange() { + if err = oprot.WriteFieldBegin("merge_by_exchange", thrift.BOOL, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.MergeByExchange); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TSortNode) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsAnalyticSort() { + if err = oprot.WriteFieldBegin("is_analytic_sort", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsAnalyticSort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TSortNode) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetIsColocate() { + if err = oprot.WriteFieldBegin("is_colocate", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsColocate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TSortNode) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetAlgorithm() { + if err = oprot.WriteFieldBegin("algorithm", thrift.I32, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Algorithm)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + func (p *TSortNode) String() string { if p == nil { return "" } return fmt.Sprintf("TSortNode(%+v)", *p) + } func (p *TSortNode) DeepEqual(ano *TSortNode) bool { @@ -29758,6 +37402,18 @@ func (p *TSortNode) DeepEqual(ano *TSortNode) bool { if !p.Field7DeepEqual(ano.UseTopnOpt) { return false } + if !p.Field8DeepEqual(ano.MergeByExchange) { + return false + } + if !p.Field9DeepEqual(ano.IsAnalyticSort) { + return false + } + if !p.Field10DeepEqual(ano.IsColocate) { + return false + } + if !p.Field11DeepEqual(ano.Algorithm) { + return false + } return true } @@ -29811,13 +37467,62 @@ func (p *TSortNode) Field7DeepEqual(src *bool) bool { } return true } +func (p *TSortNode) Field8DeepEqual(src *bool) bool { + + if p.MergeByExchange == src { + return true + } else if p.MergeByExchange == nil || src == nil { + return false + } + if *p.MergeByExchange != *src { + return false + } + return true +} +func (p *TSortNode) Field9DeepEqual(src *bool) bool { + + if p.IsAnalyticSort == src { + return true + } else if p.IsAnalyticSort == nil || src == nil { + return false + } + if *p.IsAnalyticSort != *src { + return false + } + return true +} +func (p *TSortNode) Field10DeepEqual(src *bool) bool { + + if p.IsColocate == src { + return true + } else if p.IsColocate == nil || src == nil { + return false + } + if *p.IsColocate != *src { + return false + } + return true +} +func (p *TSortNode) Field11DeepEqual(src *TSortAlgorithm) bool { + + if p.Algorithm == src { + return true + } else if p.Algorithm == nil || src == nil { + return false + } + if *p.Algorithm != *src { + return false + } + return true +} type TPartitionSortNode struct { - PartitionExprs []*exprs.TExpr `thrift:"partition_exprs,1,optional" frugal:"1,optional,list" json:"partition_exprs,omitempty"` - SortInfo *TSortInfo `thrift:"sort_info,2,optional" frugal:"2,optional,TSortInfo" json:"sort_info,omitempty"` - HasGlobalLimit *bool `thrift:"has_global_limit,3,optional" frugal:"3,optional,bool" json:"has_global_limit,omitempty"` - TopNAlgorithm *TopNAlgorithm `thrift:"top_n_algorithm,4,optional" frugal:"4,optional,TopNAlgorithm" json:"top_n_algorithm,omitempty"` - PartitionInnerLimit *int64 `thrift:"partition_inner_limit,5,optional" frugal:"5,optional,i64" json:"partition_inner_limit,omitempty"` + PartitionExprs []*exprs.TExpr `thrift:"partition_exprs,1,optional" frugal:"1,optional,list" json:"partition_exprs,omitempty"` + SortInfo *TSortInfo `thrift:"sort_info,2,optional" frugal:"2,optional,TSortInfo" json:"sort_info,omitempty"` + HasGlobalLimit *bool `thrift:"has_global_limit,3,optional" frugal:"3,optional,bool" json:"has_global_limit,omitempty"` + TopNAlgorithm *TopNAlgorithm `thrift:"top_n_algorithm,4,optional" frugal:"4,optional,TopNAlgorithm" json:"top_n_algorithm,omitempty"` + PartitionInnerLimit *int64 `thrift:"partition_inner_limit,5,optional" frugal:"5,optional,i64" json:"partition_inner_limit,omitempty"` + PtopnPhase *TPartTopNPhase `thrift:"ptopn_phase,6,optional" frugal:"6,optional,TPartTopNPhase" json:"ptopn_phase,omitempty"` } func NewTPartitionSortNode() *TPartitionSortNode { @@ -29825,7 +37530,6 @@ func NewTPartitionSortNode() *TPartitionSortNode { } func (p *TPartitionSortNode) InitDefault() { - *p = TPartitionSortNode{} } var TPartitionSortNode_PartitionExprs_DEFAULT []*exprs.TExpr @@ -29872,6 +37576,15 @@ func (p *TPartitionSortNode) GetPartitionInnerLimit() (v int64) { } return *p.PartitionInnerLimit } + +var TPartitionSortNode_PtopnPhase_DEFAULT TPartTopNPhase + +func (p *TPartitionSortNode) GetPtopnPhase() (v TPartTopNPhase) { + if !p.IsSetPtopnPhase() { + return TPartitionSortNode_PtopnPhase_DEFAULT + } + return *p.PtopnPhase +} func (p *TPartitionSortNode) SetPartitionExprs(val []*exprs.TExpr) { p.PartitionExprs = val } @@ -29887,6 +37600,9 @@ func (p *TPartitionSortNode) SetTopNAlgorithm(val *TopNAlgorithm) { func (p *TPartitionSortNode) SetPartitionInnerLimit(val *int64) { p.PartitionInnerLimit = val } +func (p *TPartitionSortNode) SetPtopnPhase(val *TPartTopNPhase) { + p.PtopnPhase = val +} var fieldIDToName_TPartitionSortNode = map[int16]string{ 1: "partition_exprs", @@ -29894,6 +37610,7 @@ var fieldIDToName_TPartitionSortNode = map[int16]string{ 3: "has_global_limit", 4: "top_n_algorithm", 5: "partition_inner_limit", + 6: "ptopn_phase", } func (p *TPartitionSortNode) IsSetPartitionExprs() bool { @@ -29916,6 +37633,10 @@ func (p *TPartitionSortNode) IsSetPartitionInnerLimit() bool { return p.PartitionInnerLimit != nil } +func (p *TPartitionSortNode) IsSetPtopnPhase() bool { + return p.PtopnPhase != nil +} + func (p *TPartitionSortNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -29940,57 +37661,54 @@ func (p *TPartitionSortNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I32 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -30020,54 +37738,76 @@ func (p *TPartitionSortNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.PartitionExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionExprs = append(p.PartitionExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionExprs = _field return nil } - func (p *TPartitionSortNode) ReadField2(iprot thrift.TProtocol) error { - p.SortInfo = NewTSortInfo() - if err := p.SortInfo.Read(iprot); err != nil { + _field := NewTSortInfo() + if err := _field.Read(iprot); err != nil { return err } + p.SortInfo = _field return nil } - func (p *TPartitionSortNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasGlobalLimit = &v + _field = &v } + p.HasGlobalLimit = _field return nil } - func (p *TPartitionSortNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *TopNAlgorithm if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TopNAlgorithm(v) - p.TopNAlgorithm = &tmp + _field = &tmp } + p.TopNAlgorithm = _field return nil } - func (p *TPartitionSortNode) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PartitionInnerLimit = &v + _field = &v } + p.PartitionInnerLimit = _field + return nil +} +func (p *TPartitionSortNode) ReadField6(iprot thrift.TProtocol) error { + + var _field *TPartTopNPhase + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TPartTopNPhase(v) + _field = &tmp + } + p.PtopnPhase = _field return nil } @@ -30097,7 +37837,10 @@ func (p *TPartitionSortNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -30219,11 +37962,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *TPartitionSortNode) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPtopnPhase() { + if err = oprot.WriteFieldBegin("ptopn_phase", thrift.I32, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.PtopnPhase)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + func (p *TPartitionSortNode) String() string { if p == nil { return "" } return fmt.Sprintf("TPartitionSortNode(%+v)", *p) + } func (p *TPartitionSortNode) DeepEqual(ano *TPartitionSortNode) bool { @@ -30247,6 +38010,9 @@ func (p *TPartitionSortNode) DeepEqual(ano *TPartitionSortNode) bool { if !p.Field5DeepEqual(ano.PartitionInnerLimit) { return false } + if !p.Field6DeepEqual(ano.PtopnPhase) { + return false + } return true } @@ -30306,6 +38072,18 @@ func (p *TPartitionSortNode) Field5DeepEqual(src *int64) bool { } return true } +func (p *TPartitionSortNode) Field6DeepEqual(src *TPartTopNPhase) bool { + + if p.PtopnPhase == src { + return true + } else if p.PtopnPhase == nil || src == nil { + return false + } + if *p.PtopnPhase != *src { + return false + } + return true +} type TAnalyticWindowBoundary struct { Type TAnalyticWindowBoundaryType `thrift:"type,1,required" frugal:"1,required,TAnalyticWindowBoundaryType" json:"type"` @@ -30318,7 +38096,6 @@ func NewTAnalyticWindowBoundary() *TAnalyticWindowBoundary { } func (p *TAnalyticWindowBoundary) InitDefault() { - *p = TAnalyticWindowBoundary{} } func (p *TAnalyticWindowBoundary) GetType() (v TAnalyticWindowBoundaryType) { @@ -30392,37 +38169,30 @@ func (p *TAnalyticWindowBoundary) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -30454,28 +38224,33 @@ RequiredFieldNotSetError: } func (p *TAnalyticWindowBoundary) ReadField1(iprot thrift.TProtocol) error { + + var _field TAnalyticWindowBoundaryType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TAnalyticWindowBoundaryType(v) + _field = TAnalyticWindowBoundaryType(v) } + p.Type = _field return nil } - func (p *TAnalyticWindowBoundary) ReadField2(iprot thrift.TProtocol) error { - p.RangeOffsetPredicate = exprs.NewTExpr() - if err := p.RangeOffsetPredicate.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.RangeOffsetPredicate = _field return nil } - func (p *TAnalyticWindowBoundary) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.RowsOffsetValue = &v + _field = &v } + p.RowsOffsetValue = _field return nil } @@ -30497,7 +38272,6 @@ func (p *TAnalyticWindowBoundary) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -30576,6 +38350,7 @@ func (p *TAnalyticWindowBoundary) String() string { return "" } return fmt.Sprintf("TAnalyticWindowBoundary(%+v)", *p) + } func (p *TAnalyticWindowBoundary) DeepEqual(ano *TAnalyticWindowBoundary) bool { @@ -30634,7 +38409,6 @@ func NewTAnalyticWindow() *TAnalyticWindow { } func (p *TAnalyticWindow) InitDefault() { - *p = TAnalyticWindow{} } func (p *TAnalyticWindow) GetType() (v TAnalyticWindowType) { @@ -30708,37 +38482,30 @@ func (p *TAnalyticWindow) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRUCT { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -30770,27 +38537,30 @@ RequiredFieldNotSetError: } func (p *TAnalyticWindow) ReadField1(iprot thrift.TProtocol) error { + + var _field TAnalyticWindowType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TAnalyticWindowType(v) + _field = TAnalyticWindowType(v) } + p.Type = _field return nil } - func (p *TAnalyticWindow) ReadField2(iprot thrift.TProtocol) error { - p.WindowStart = NewTAnalyticWindowBoundary() - if err := p.WindowStart.Read(iprot); err != nil { + _field := NewTAnalyticWindowBoundary() + if err := _field.Read(iprot); err != nil { return err } + p.WindowStart = _field return nil } - func (p *TAnalyticWindow) ReadField3(iprot thrift.TProtocol) error { - p.WindowEnd = NewTAnalyticWindowBoundary() - if err := p.WindowEnd.Read(iprot); err != nil { + _field := NewTAnalyticWindowBoundary() + if err := _field.Read(iprot); err != nil { return err } + p.WindowEnd = _field return nil } @@ -30812,7 +38582,6 @@ func (p *TAnalyticWindow) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -30891,6 +38660,7 @@ func (p *TAnalyticWindow) String() string { return "" } return fmt.Sprintf("TAnalyticWindow(%+v)", *p) + } func (p *TAnalyticWindow) DeepEqual(ano *TAnalyticWindow) bool { @@ -30943,6 +38713,7 @@ type TAnalyticNode struct { BufferedTupleId *types.TTupleId `thrift:"buffered_tuple_id,7,optional" frugal:"7,optional,i32" json:"buffered_tuple_id,omitempty"` PartitionByEq *exprs.TExpr `thrift:"partition_by_eq,8,optional" frugal:"8,optional,exprs.TExpr" json:"partition_by_eq,omitempty"` OrderByEq *exprs.TExpr `thrift:"order_by_eq,9,optional" frugal:"9,optional,exprs.TExpr" json:"order_by_eq,omitempty"` + IsColocate *bool `thrift:"is_colocate,10,optional" frugal:"10,optional,bool" json:"is_colocate,omitempty"` } func NewTAnalyticNode() *TAnalyticNode { @@ -30950,7 +38721,6 @@ func NewTAnalyticNode() *TAnalyticNode { } func (p *TAnalyticNode) InitDefault() { - *p = TAnalyticNode{} } func (p *TAnalyticNode) GetPartitionExprs() (v []*exprs.TExpr) { @@ -31008,6 +38778,15 @@ func (p *TAnalyticNode) GetOrderByEq() (v *exprs.TExpr) { } return p.OrderByEq } + +var TAnalyticNode_IsColocate_DEFAULT bool + +func (p *TAnalyticNode) GetIsColocate() (v bool) { + if !p.IsSetIsColocate() { + return TAnalyticNode_IsColocate_DEFAULT + } + return *p.IsColocate +} func (p *TAnalyticNode) SetPartitionExprs(val []*exprs.TExpr) { p.PartitionExprs = val } @@ -31035,17 +38814,21 @@ func (p *TAnalyticNode) SetPartitionByEq(val *exprs.TExpr) { func (p *TAnalyticNode) SetOrderByEq(val *exprs.TExpr) { p.OrderByEq = val } +func (p *TAnalyticNode) SetIsColocate(val *bool) { + p.IsColocate = val +} var fieldIDToName_TAnalyticNode = map[int16]string{ - 1: "partition_exprs", - 2: "order_by_exprs", - 3: "analytic_functions", - 4: "window", - 5: "intermediate_tuple_id", - 6: "output_tuple_id", - 7: "buffered_tuple_id", - 8: "partition_by_eq", - 9: "order_by_eq", + 1: "partition_exprs", + 2: "order_by_exprs", + 3: "analytic_functions", + 4: "window", + 5: "intermediate_tuple_id", + 6: "output_tuple_id", + 7: "buffered_tuple_id", + 8: "partition_by_eq", + 9: "order_by_eq", + 10: "is_colocate", } func (p *TAnalyticNode) IsSetWindow() bool { @@ -31064,6 +38847,10 @@ func (p *TAnalyticNode) IsSetOrderByEq() bool { return p.OrderByEq != nil } +func (p *TAnalyticNode) IsSetIsColocate() bool { + return p.IsColocate != nil +} + func (p *TAnalyticNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -31094,10 +38881,8 @@ func (p *TAnalyticNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPartitionExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -31105,10 +38890,8 @@ func (p *TAnalyticNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOrderByExprs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -31116,20 +38899,16 @@ func (p *TAnalyticNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetAnalyticFunctions = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { @@ -31137,10 +38916,8 @@ func (p *TAnalyticNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIntermediateTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { @@ -31148,47 +38925,46 @@ func (p *TAnalyticNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOutputTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRUCT { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -31244,109 +39020,136 @@ func (p *TAnalyticNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.PartitionExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.PartitionExprs = append(p.PartitionExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.PartitionExprs = _field return nil } - func (p *TAnalyticNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OrderByExprs = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.OrderByExprs = append(p.OrderByExprs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OrderByExprs = _field return nil } - func (p *TAnalyticNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.AnalyticFunctions = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.AnalyticFunctions = append(p.AnalyticFunctions, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.AnalyticFunctions = _field return nil } - func (p *TAnalyticNode) ReadField4(iprot thrift.TProtocol) error { - p.Window = NewTAnalyticWindow() - if err := p.Window.Read(iprot); err != nil { + _field := NewTAnalyticWindow() + if err := _field.Read(iprot); err != nil { return err } + p.Window = _field return nil } - func (p *TAnalyticNode) ReadField5(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.IntermediateTupleId = v + _field = v } + p.IntermediateTupleId = _field return nil } - func (p *TAnalyticNode) ReadField6(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = v + _field = v } + p.OutputTupleId = _field return nil } - func (p *TAnalyticNode) ReadField7(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BufferedTupleId = &v + _field = &v } + p.BufferedTupleId = _field return nil } - func (p *TAnalyticNode) ReadField8(iprot thrift.TProtocol) error { - p.PartitionByEq = exprs.NewTExpr() - if err := p.PartitionByEq.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.PartitionByEq = _field return nil } - func (p *TAnalyticNode) ReadField9(iprot thrift.TProtocol) error { - p.OrderByEq = exprs.NewTExpr() - if err := p.OrderByEq.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { + return err + } + p.OrderByEq = _field + return nil +} +func (p *TAnalyticNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { return err + } else { + _field = &v } + p.IsColocate = _field return nil } @@ -31392,7 +39195,10 @@ func (p *TAnalyticNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } - + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -31596,11 +39402,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } +func (p *TAnalyticNode) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetIsColocate() { + if err = oprot.WriteFieldBegin("is_colocate", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsColocate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + func (p *TAnalyticNode) String() string { if p == nil { return "" } return fmt.Sprintf("TAnalyticNode(%+v)", *p) + } func (p *TAnalyticNode) DeepEqual(ano *TAnalyticNode) bool { @@ -31636,6 +39462,9 @@ func (p *TAnalyticNode) DeepEqual(ano *TAnalyticNode) bool { if !p.Field9DeepEqual(ano.OrderByEq) { return false } + if !p.Field10DeepEqual(ano.IsColocate) { + return false + } return true } @@ -31725,6 +39554,18 @@ func (p *TAnalyticNode) Field9DeepEqual(src *exprs.TExpr) bool { } return true } +func (p *TAnalyticNode) Field10DeepEqual(src *bool) bool { + + if p.IsColocate == src { + return true + } else if p.IsColocate == nil || src == nil { + return false + } + if *p.IsColocate != *src { + return false + } + return true +} type TMergeNode struct { TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` @@ -31737,7 +39578,6 @@ func NewTMergeNode() *TMergeNode { } func (p *TMergeNode) InitDefault() { - *p = TMergeNode{} } func (p *TMergeNode) GetTupleId() (v types.TTupleId) { @@ -31795,10 +39635,8 @@ func (p *TMergeNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -31806,10 +39644,8 @@ func (p *TMergeNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResultExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -31817,17 +39653,14 @@ func (p *TMergeNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetConstExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -31869,28 +39702,33 @@ RequiredFieldNotSetError: } func (p *TMergeNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TMergeNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ResultExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -31901,28 +39739,31 @@ func (p *TMergeNode) ReadField2(iprot thrift.TProtocol) error { return err } - p.ResultExprLists = append(p.ResultExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ResultExprLists = _field return nil } - func (p *TMergeNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ConstExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -31933,11 +39774,12 @@ func (p *TMergeNode) ReadField3(iprot thrift.TProtocol) error { return err } - p.ConstExprLists = append(p.ConstExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ConstExprLists = _field return nil } @@ -31959,7 +39801,6 @@ func (p *TMergeNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -32066,6 +39907,7 @@ func (p *TMergeNode) String() string { return "" } return fmt.Sprintf("TMergeNode(%+v)", *p) + } func (p *TMergeNode) DeepEqual(ano *TMergeNode) bool { @@ -32144,7 +39986,6 @@ func NewTUnionNode() *TUnionNode { } func (p *TUnionNode) InitDefault() { - *p = TUnionNode{} } func (p *TUnionNode) GetTupleId() (v types.TTupleId) { @@ -32211,10 +40052,8 @@ func (p *TUnionNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -32222,10 +40061,8 @@ func (p *TUnionNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResultExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -32233,10 +40070,8 @@ func (p *TUnionNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetConstExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -32244,17 +40079,14 @@ func (p *TUnionNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFirstMaterializedChildIdx = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -32301,28 +40133,33 @@ RequiredFieldNotSetError: } func (p *TUnionNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TUnionNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ResultExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -32333,28 +40170,31 @@ func (p *TUnionNode) ReadField2(iprot thrift.TProtocol) error { return err } - p.ResultExprLists = append(p.ResultExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ResultExprLists = _field return nil } - func (p *TUnionNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ConstExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -32365,20 +40205,23 @@ func (p *TUnionNode) ReadField3(iprot thrift.TProtocol) error { return err } - p.ConstExprLists = append(p.ConstExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ConstExprLists = _field return nil } - func (p *TUnionNode) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FirstMaterializedChildIdx = v + _field = v } + p.FirstMaterializedChildIdx = _field return nil } @@ -32404,7 +40247,6 @@ func (p *TUnionNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -32528,6 +40370,7 @@ func (p *TUnionNode) String() string { return "" } return fmt.Sprintf("TUnionNode(%+v)", *p) + } func (p *TUnionNode) DeepEqual(ano *TUnionNode) bool { @@ -32609,6 +40452,7 @@ type TIntersectNode struct { ResultExprLists [][]*exprs.TExpr `thrift:"result_expr_lists,2,required" frugal:"2,required,list>" json:"result_expr_lists"` ConstExprLists [][]*exprs.TExpr `thrift:"const_expr_lists,3,required" frugal:"3,required,list>" json:"const_expr_lists"` FirstMaterializedChildIdx int64 `thrift:"first_materialized_child_idx,4,required" frugal:"4,required,i64" json:"first_materialized_child_idx"` + IsColocate *bool `thrift:"is_colocate,5,optional" frugal:"5,optional,bool" json:"is_colocate,omitempty"` } func NewTIntersectNode() *TIntersectNode { @@ -32616,7 +40460,6 @@ func NewTIntersectNode() *TIntersectNode { } func (p *TIntersectNode) InitDefault() { - *p = TIntersectNode{} } func (p *TIntersectNode) GetTupleId() (v types.TTupleId) { @@ -32634,6 +40477,15 @@ func (p *TIntersectNode) GetConstExprLists() (v [][]*exprs.TExpr) { func (p *TIntersectNode) GetFirstMaterializedChildIdx() (v int64) { return p.FirstMaterializedChildIdx } + +var TIntersectNode_IsColocate_DEFAULT bool + +func (p *TIntersectNode) GetIsColocate() (v bool) { + if !p.IsSetIsColocate() { + return TIntersectNode_IsColocate_DEFAULT + } + return *p.IsColocate +} func (p *TIntersectNode) SetTupleId(val types.TTupleId) { p.TupleId = val } @@ -32646,12 +40498,20 @@ func (p *TIntersectNode) SetConstExprLists(val [][]*exprs.TExpr) { func (p *TIntersectNode) SetFirstMaterializedChildIdx(val int64) { p.FirstMaterializedChildIdx = val } +func (p *TIntersectNode) SetIsColocate(val *bool) { + p.IsColocate = val +} var fieldIDToName_TIntersectNode = map[int16]string{ 1: "tuple_id", 2: "result_expr_lists", 3: "const_expr_lists", 4: "first_materialized_child_idx", + 5: "is_colocate", +} + +func (p *TIntersectNode) IsSetIsColocate() bool { + return p.IsColocate != nil } func (p *TIntersectNode) Read(iprot thrift.TProtocol) (err error) { @@ -32683,10 +40543,8 @@ func (p *TIntersectNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -32694,10 +40552,8 @@ func (p *TIntersectNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResultExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -32705,10 +40561,8 @@ func (p *TIntersectNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetConstExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -32716,17 +40570,22 @@ func (p *TIntersectNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFirstMaterializedChildIdx = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -32773,28 +40632,33 @@ RequiredFieldNotSetError: } func (p *TIntersectNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TIntersectNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ResultExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -32805,28 +40669,31 @@ func (p *TIntersectNode) ReadField2(iprot thrift.TProtocol) error { return err } - p.ResultExprLists = append(p.ResultExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ResultExprLists = _field return nil } - func (p *TIntersectNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ConstExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -32837,20 +40704,34 @@ func (p *TIntersectNode) ReadField3(iprot thrift.TProtocol) error { return err } - p.ConstExprLists = append(p.ConstExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ConstExprLists = _field return nil } - func (p *TIntersectNode) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FirstMaterializedChildIdx = v + _field = v } + p.FirstMaterializedChildIdx = _field + return nil +} +func (p *TIntersectNode) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsColocate = _field return nil } @@ -32876,7 +40757,10 @@ func (p *TIntersectNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -32995,11 +40879,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } +func (p *TIntersectNode) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetIsColocate() { + if err = oprot.WriteFieldBegin("is_colocate", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsColocate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + func (p *TIntersectNode) String() string { if p == nil { return "" } return fmt.Sprintf("TIntersectNode(%+v)", *p) + } func (p *TIntersectNode) DeepEqual(ano *TIntersectNode) bool { @@ -33020,6 +40924,9 @@ func (p *TIntersectNode) DeepEqual(ano *TIntersectNode) bool { if !p.Field4DeepEqual(ano.FirstMaterializedChildIdx) { return false } + if !p.Field5DeepEqual(ano.IsColocate) { + return false + } return true } @@ -33075,12 +40982,25 @@ func (p *TIntersectNode) Field4DeepEqual(src int64) bool { } return true } +func (p *TIntersectNode) Field5DeepEqual(src *bool) bool { + + if p.IsColocate == src { + return true + } else if p.IsColocate == nil || src == nil { + return false + } + if *p.IsColocate != *src { + return false + } + return true +} type TExceptNode struct { TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` ResultExprLists [][]*exprs.TExpr `thrift:"result_expr_lists,2,required" frugal:"2,required,list>" json:"result_expr_lists"` ConstExprLists [][]*exprs.TExpr `thrift:"const_expr_lists,3,required" frugal:"3,required,list>" json:"const_expr_lists"` FirstMaterializedChildIdx int64 `thrift:"first_materialized_child_idx,4,required" frugal:"4,required,i64" json:"first_materialized_child_idx"` + IsColocate *bool `thrift:"is_colocate,5,optional" frugal:"5,optional,bool" json:"is_colocate,omitempty"` } func NewTExceptNode() *TExceptNode { @@ -33088,7 +41008,6 @@ func NewTExceptNode() *TExceptNode { } func (p *TExceptNode) InitDefault() { - *p = TExceptNode{} } func (p *TExceptNode) GetTupleId() (v types.TTupleId) { @@ -33106,6 +41025,15 @@ func (p *TExceptNode) GetConstExprLists() (v [][]*exprs.TExpr) { func (p *TExceptNode) GetFirstMaterializedChildIdx() (v int64) { return p.FirstMaterializedChildIdx } + +var TExceptNode_IsColocate_DEFAULT bool + +func (p *TExceptNode) GetIsColocate() (v bool) { + if !p.IsSetIsColocate() { + return TExceptNode_IsColocate_DEFAULT + } + return *p.IsColocate +} func (p *TExceptNode) SetTupleId(val types.TTupleId) { p.TupleId = val } @@ -33118,12 +41046,20 @@ func (p *TExceptNode) SetConstExprLists(val [][]*exprs.TExpr) { func (p *TExceptNode) SetFirstMaterializedChildIdx(val int64) { p.FirstMaterializedChildIdx = val } +func (p *TExceptNode) SetIsColocate(val *bool) { + p.IsColocate = val +} var fieldIDToName_TExceptNode = map[int16]string{ 1: "tuple_id", 2: "result_expr_lists", 3: "const_expr_lists", 4: "first_materialized_child_idx", + 5: "is_colocate", +} + +func (p *TExceptNode) IsSetIsColocate() bool { + return p.IsColocate != nil } func (p *TExceptNode) Read(iprot thrift.TProtocol) (err error) { @@ -33155,10 +41091,8 @@ func (p *TExceptNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -33166,10 +41100,8 @@ func (p *TExceptNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetResultExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -33177,10 +41109,8 @@ func (p *TExceptNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetConstExprLists = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -33188,17 +41118,22 @@ func (p *TExceptNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFirstMaterializedChildIdx = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -33245,28 +41180,33 @@ RequiredFieldNotSetError: } func (p *TExceptNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = v + _field = v } + p.TupleId = _field return nil } - func (p *TExceptNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ResultExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -33277,28 +41217,31 @@ func (p *TExceptNode) ReadField2(iprot thrift.TProtocol) error { return err } - p.ResultExprLists = append(p.ResultExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ResultExprLists = _field return nil } - func (p *TExceptNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ConstExprLists = make([][]*exprs.TExpr, 0, size) + _field := make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _, size, err := iprot.ReadListBegin() if err != nil { return err } _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() + _elem1 := &values[i] + _elem1.InitDefault() + if err := _elem1.Read(iprot); err != nil { return err } @@ -33309,20 +41252,34 @@ func (p *TExceptNode) ReadField3(iprot thrift.TProtocol) error { return err } - p.ConstExprLists = append(p.ConstExprLists, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ConstExprLists = _field return nil } - func (p *TExceptNode) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.FirstMaterializedChildIdx = v + _field = v + } + p.FirstMaterializedChildIdx = _field + return nil +} +func (p *TExceptNode) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v } + p.IsColocate = _field return nil } @@ -33348,7 +41305,10 @@ func (p *TExceptNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -33467,11 +41427,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } +func (p *TExceptNode) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetIsColocate() { + if err = oprot.WriteFieldBegin("is_colocate", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsColocate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + func (p *TExceptNode) String() string { if p == nil { return "" } return fmt.Sprintf("TExceptNode(%+v)", *p) + } func (p *TExceptNode) DeepEqual(ano *TExceptNode) bool { @@ -33492,6 +41472,9 @@ func (p *TExceptNode) DeepEqual(ano *TExceptNode) bool { if !p.Field4DeepEqual(ano.FirstMaterializedChildIdx) { return false } + if !p.Field5DeepEqual(ano.IsColocate) { + return false + } return true } @@ -33547,11 +41530,24 @@ func (p *TExceptNode) Field4DeepEqual(src int64) bool { } return true } +func (p *TExceptNode) Field5DeepEqual(src *bool) bool { + + if p.IsColocate == src { + return true + } else if p.IsColocate == nil || src == nil { + return false + } + if *p.IsColocate != *src { + return false + } + return true +} type TExchangeNode struct { - InputRowTuples []types.TTupleId `thrift:"input_row_tuples,1,required" frugal:"1,required,list" json:"input_row_tuples"` - SortInfo *TSortInfo `thrift:"sort_info,2,optional" frugal:"2,optional,TSortInfo" json:"sort_info,omitempty"` - Offset *int64 `thrift:"offset,3,optional" frugal:"3,optional,i64" json:"offset,omitempty"` + InputRowTuples []types.TTupleId `thrift:"input_row_tuples,1,required" frugal:"1,required,list" json:"input_row_tuples"` + SortInfo *TSortInfo `thrift:"sort_info,2,optional" frugal:"2,optional,TSortInfo" json:"sort_info,omitempty"` + Offset *int64 `thrift:"offset,3,optional" frugal:"3,optional,i64" json:"offset,omitempty"` + PartitionType *partitions.TPartitionType `thrift:"partition_type,4,optional" frugal:"4,optional,TPartitionType" json:"partition_type,omitempty"` } func NewTExchangeNode() *TExchangeNode { @@ -33559,7 +41555,6 @@ func NewTExchangeNode() *TExchangeNode { } func (p *TExchangeNode) InitDefault() { - *p = TExchangeNode{} } func (p *TExchangeNode) GetInputRowTuples() (v []types.TTupleId) { @@ -33583,6 +41578,15 @@ func (p *TExchangeNode) GetOffset() (v int64) { } return *p.Offset } + +var TExchangeNode_PartitionType_DEFAULT partitions.TPartitionType + +func (p *TExchangeNode) GetPartitionType() (v partitions.TPartitionType) { + if !p.IsSetPartitionType() { + return TExchangeNode_PartitionType_DEFAULT + } + return *p.PartitionType +} func (p *TExchangeNode) SetInputRowTuples(val []types.TTupleId) { p.InputRowTuples = val } @@ -33592,11 +41596,15 @@ func (p *TExchangeNode) SetSortInfo(val *TSortInfo) { func (p *TExchangeNode) SetOffset(val *int64) { p.Offset = val } +func (p *TExchangeNode) SetPartitionType(val *partitions.TPartitionType) { + p.PartitionType = val +} var fieldIDToName_TExchangeNode = map[int16]string{ 1: "input_row_tuples", 2: "sort_info", 3: "offset", + 4: "partition_type", } func (p *TExchangeNode) IsSetSortInfo() bool { @@ -33607,6 +41615,10 @@ func (p *TExchangeNode) IsSetOffset() bool { return p.Offset != nil } +func (p *TExchangeNode) IsSetPartitionType() bool { + return p.PartitionType != nil +} + func (p *TExchangeNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -33633,37 +41645,38 @@ func (p *TExchangeNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetInputRowTuples = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -33699,8 +41712,9 @@ func (p *TExchangeNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.InputRowTuples = make([]types.TTupleId, 0, size) + _field := make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err @@ -33708,28 +41722,43 @@ func (p *TExchangeNode) ReadField1(iprot thrift.TProtocol) error { _elem = v } - p.InputRowTuples = append(p.InputRowTuples, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.InputRowTuples = _field return nil } - func (p *TExchangeNode) ReadField2(iprot thrift.TProtocol) error { - p.SortInfo = NewTSortInfo() - if err := p.SortInfo.Read(iprot); err != nil { + _field := NewTSortInfo() + if err := _field.Read(iprot); err != nil { return err } + p.SortInfo = _field return nil } - func (p *TExchangeNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Offset = &v + _field = &v } + p.Offset = _field + return nil +} +func (p *TExchangeNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *partitions.TPartitionType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := partitions.TPartitionType(v) + _field = &tmp + } + p.PartitionType = _field return nil } @@ -33751,7 +41780,10 @@ func (p *TExchangeNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -33833,11 +41865,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TExchangeNode) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionType() { + if err = oprot.WriteFieldBegin("partition_type", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.PartitionType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + func (p *TExchangeNode) String() string { if p == nil { return "" } return fmt.Sprintf("TExchangeNode(%+v)", *p) + } func (p *TExchangeNode) DeepEqual(ano *TExchangeNode) bool { @@ -33855,6 +41907,9 @@ func (p *TExchangeNode) DeepEqual(ano *TExchangeNode) bool { if !p.Field3DeepEqual(ano.Offset) { return false } + if !p.Field4DeepEqual(ano.PartitionType) { + return false + } return true } @@ -33890,6 +41945,18 @@ func (p *TExchangeNode) Field3DeepEqual(src *int64) bool { } return true } +func (p *TExchangeNode) Field4DeepEqual(src *partitions.TPartitionType) bool { + + if p.PartitionType == src { + return true + } else if p.PartitionType == nil || src == nil { + return false + } + if *p.PartitionType != *src { + return false + } + return true +} type TOlapRewriteNode struct { Columns []*exprs.TExpr `thrift:"columns,1,required" frugal:"1,required,list" json:"columns"` @@ -33902,7 +41969,6 @@ func NewTOlapRewriteNode() *TOlapRewriteNode { } func (p *TOlapRewriteNode) InitDefault() { - *p = TOlapRewriteNode{} } func (p *TOlapRewriteNode) GetColumns() (v []*exprs.TExpr) { @@ -33960,10 +42026,8 @@ func (p *TOlapRewriteNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumns = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { @@ -33971,10 +42035,8 @@ func (p *TOlapRewriteNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetColumnTypes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -33982,17 +42044,14 @@ func (p *TOlapRewriteNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetOutputTupleId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -34038,47 +42097,56 @@ func (p *TOlapRewriteNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Columns = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Columns = append(p.Columns, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Columns = _field return nil } - func (p *TOlapRewriteNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ColumnTypes = make([]*types.TColumnType, 0, size) + _field := make([]*types.TColumnType, 0, size) + values := make([]types.TColumnType, size) for i := 0; i < size; i++ { - _elem := types.NewTColumnType() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ColumnTypes = append(p.ColumnTypes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ColumnTypes = _field return nil } - func (p *TOlapRewriteNode) ReadField3(iprot thrift.TProtocol) error { + + var _field types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = v + _field = v } + p.OutputTupleId = _field return nil } @@ -34100,7 +42168,6 @@ func (p *TOlapRewriteNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -34191,6 +42258,7 @@ func (p *TOlapRewriteNode) String() string { return "" } return fmt.Sprintf("TOlapRewriteNode(%+v)", *p) + } func (p *TOlapRewriteNode) DeepEqual(ano *TOlapRewriteNode) bool { @@ -34255,7 +42323,6 @@ func NewTTableFunctionNode() *TTableFunctionNode { } func (p *TTableFunctionNode) InitDefault() { - *p = TTableFunctionNode{} } var TTableFunctionNode_FnCallExprList_DEFAULT []*exprs.TExpr @@ -34319,27 +42386,22 @@ func (p *TTableFunctionNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -34369,28 +42431,32 @@ func (p *TTableFunctionNode) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.FnCallExprList = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.FnCallExprList = append(p.FnCallExprList, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.FnCallExprList = _field return nil } - func (p *TTableFunctionNode) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OutputSlotIds = make([]types.TSlotId, 0, size) + _field := make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { + var _elem types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err @@ -34398,11 +42464,12 @@ func (p *TTableFunctionNode) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.OutputSlotIds = append(p.OutputSlotIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OutputSlotIds = _field return nil } @@ -34420,7 +42487,6 @@ func (p *TTableFunctionNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -34498,6 +42564,7 @@ func (p *TTableFunctionNode) String() string { return "" } return fmt.Sprintf("TTableFunctionNode(%+v)", *p) + } func (p *TTableFunctionNode) DeepEqual(ano *TTableFunctionNode) bool { @@ -34560,13 +42627,10 @@ func NewTBackendResourceProfile() *TBackendResourceProfile { } func (p *TBackendResourceProfile) InitDefault() { - *p = TBackendResourceProfile{ - - MinReservation: 0, - MaxReservation: 12188490189880, - SpillableBufferSize: 2097152, - MaxRowBufferSize: 4294967296, - } + p.MinReservation = 0 + p.MaxReservation = 12188490189880 + p.SpillableBufferSize = 2097152 + p.MaxRowBufferSize = 4294967296 } func (p *TBackendResourceProfile) GetMinReservation() (v int64) { @@ -34649,10 +42713,8 @@ func (p *TBackendResourceProfile) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetMinReservation = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -34660,37 +42722,30 @@ func (p *TBackendResourceProfile) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetMaxReservation = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -34727,38 +42782,47 @@ RequiredFieldNotSetError: } func (p *TBackendResourceProfile) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MinReservation = v + _field = v } + p.MinReservation = _field return nil } - func (p *TBackendResourceProfile) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxReservation = v + _field = v } + p.MaxReservation = _field return nil } - func (p *TBackendResourceProfile) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.SpillableBufferSize = v + _field = v } + p.SpillableBufferSize = _field return nil } - func (p *TBackendResourceProfile) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.MaxRowBufferSize = v + _field = v } + p.MaxRowBufferSize = _field return nil } @@ -34784,7 +42848,6 @@ func (p *TBackendResourceProfile) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -34842,7 +42905,392 @@ func (p *TBackendResourceProfile) writeField3(oprot thrift.TProtocol) (err error if err = oprot.WriteFieldBegin("spillable_buffer_size", thrift.I64, 3); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.SpillableBufferSize); err != nil { + if err := oprot.WriteI64(p.SpillableBufferSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TBackendResourceProfile) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxRowBufferSize() { + if err = oprot.WriteFieldBegin("max_row_buffer_size", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.MaxRowBufferSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TBackendResourceProfile) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBackendResourceProfile(%+v)", *p) + +} + +func (p *TBackendResourceProfile) DeepEqual(ano *TBackendResourceProfile) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.MinReservation) { + return false + } + if !p.Field2DeepEqual(ano.MaxReservation) { + return false + } + if !p.Field3DeepEqual(ano.SpillableBufferSize) { + return false + } + if !p.Field4DeepEqual(ano.MaxRowBufferSize) { + return false + } + return true +} + +func (p *TBackendResourceProfile) Field1DeepEqual(src int64) bool { + + if p.MinReservation != src { + return false + } + return true +} +func (p *TBackendResourceProfile) Field2DeepEqual(src int64) bool { + + if p.MaxReservation != src { + return false + } + return true +} +func (p *TBackendResourceProfile) Field3DeepEqual(src int64) bool { + + if p.SpillableBufferSize != src { + return false + } + return true +} +func (p *TBackendResourceProfile) Field4DeepEqual(src int64) bool { + + if p.MaxRowBufferSize != src { + return false + } + return true +} + +type TAssertNumRowsNode struct { + DesiredNumRows *int64 `thrift:"desired_num_rows,1,optional" frugal:"1,optional,i64" json:"desired_num_rows,omitempty"` + SubqueryString *string `thrift:"subquery_string,2,optional" frugal:"2,optional,string" json:"subquery_string,omitempty"` + Assertion *TAssertion `thrift:"assertion,3,optional" frugal:"3,optional,TAssertion" json:"assertion,omitempty"` + ShouldConvertOutputToNullable *bool `thrift:"should_convert_output_to_nullable,4,optional" frugal:"4,optional,bool" json:"should_convert_output_to_nullable,omitempty"` +} + +func NewTAssertNumRowsNode() *TAssertNumRowsNode { + return &TAssertNumRowsNode{} +} + +func (p *TAssertNumRowsNode) InitDefault() { +} + +var TAssertNumRowsNode_DesiredNumRows_DEFAULT int64 + +func (p *TAssertNumRowsNode) GetDesiredNumRows() (v int64) { + if !p.IsSetDesiredNumRows() { + return TAssertNumRowsNode_DesiredNumRows_DEFAULT + } + return *p.DesiredNumRows +} + +var TAssertNumRowsNode_SubqueryString_DEFAULT string + +func (p *TAssertNumRowsNode) GetSubqueryString() (v string) { + if !p.IsSetSubqueryString() { + return TAssertNumRowsNode_SubqueryString_DEFAULT + } + return *p.SubqueryString +} + +var TAssertNumRowsNode_Assertion_DEFAULT TAssertion + +func (p *TAssertNumRowsNode) GetAssertion() (v TAssertion) { + if !p.IsSetAssertion() { + return TAssertNumRowsNode_Assertion_DEFAULT + } + return *p.Assertion +} + +var TAssertNumRowsNode_ShouldConvertOutputToNullable_DEFAULT bool + +func (p *TAssertNumRowsNode) GetShouldConvertOutputToNullable() (v bool) { + if !p.IsSetShouldConvertOutputToNullable() { + return TAssertNumRowsNode_ShouldConvertOutputToNullable_DEFAULT + } + return *p.ShouldConvertOutputToNullable +} +func (p *TAssertNumRowsNode) SetDesiredNumRows(val *int64) { + p.DesiredNumRows = val +} +func (p *TAssertNumRowsNode) SetSubqueryString(val *string) { + p.SubqueryString = val +} +func (p *TAssertNumRowsNode) SetAssertion(val *TAssertion) { + p.Assertion = val +} +func (p *TAssertNumRowsNode) SetShouldConvertOutputToNullable(val *bool) { + p.ShouldConvertOutputToNullable = val +} + +var fieldIDToName_TAssertNumRowsNode = map[int16]string{ + 1: "desired_num_rows", + 2: "subquery_string", + 3: "assertion", + 4: "should_convert_output_to_nullable", +} + +func (p *TAssertNumRowsNode) IsSetDesiredNumRows() bool { + return p.DesiredNumRows != nil +} + +func (p *TAssertNumRowsNode) IsSetSubqueryString() bool { + return p.SubqueryString != nil +} + +func (p *TAssertNumRowsNode) IsSetAssertion() bool { + return p.Assertion != nil +} + +func (p *TAssertNumRowsNode) IsSetShouldConvertOutputToNullable() bool { + return p.ShouldConvertOutputToNullable != nil +} + +func (p *TAssertNumRowsNode) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I32 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAssertNumRowsNode[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TAssertNumRowsNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DesiredNumRows = _field + return nil +} +func (p *TAssertNumRowsNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SubqueryString = _field + return nil +} +func (p *TAssertNumRowsNode) ReadField3(iprot thrift.TProtocol) error { + + var _field *TAssertion + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TAssertion(v) + _field = &tmp + } + p.Assertion = _field + return nil +} +func (p *TAssertNumRowsNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ShouldConvertOutputToNullable = _field + return nil +} + +func (p *TAssertNumRowsNode) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TAssertNumRowsNode"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TAssertNumRowsNode) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDesiredNumRows() { + if err = oprot.WriteFieldBegin("desired_num_rows", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DesiredNumRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TAssertNumRowsNode) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSubqueryString() { + if err = oprot.WriteFieldBegin("subquery_string", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SubqueryString); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TAssertNumRowsNode) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetAssertion() { + if err = oprot.WriteFieldBegin("assertion", thrift.I32, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.Assertion)); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -34856,12 +43304,12 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } -func (p *TBackendResourceProfile) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxRowBufferSize() { - if err = oprot.WriteFieldBegin("max_row_buffer_size", thrift.I64, 4); err != nil { +func (p *TAssertNumRowsNode) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetShouldConvertOutputToNullable() { + if err = oprot.WriteFieldBegin("should_convert_output_to_nullable", thrift.BOOL, 4); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI64(p.MaxRowBufferSize); err != nil { + if err := oprot.WriteBool(*p.ShouldConvertOutputToNullable); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -34875,135 +43323,141 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TBackendResourceProfile) String() string { +func (p *TAssertNumRowsNode) String() string { if p == nil { return "" } - return fmt.Sprintf("TBackendResourceProfile(%+v)", *p) + return fmt.Sprintf("TAssertNumRowsNode(%+v)", *p) + } -func (p *TBackendResourceProfile) DeepEqual(ano *TBackendResourceProfile) bool { +func (p *TAssertNumRowsNode) DeepEqual(ano *TAssertNumRowsNode) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.MinReservation) { + if !p.Field1DeepEqual(ano.DesiredNumRows) { return false } - if !p.Field2DeepEqual(ano.MaxReservation) { + if !p.Field2DeepEqual(ano.SubqueryString) { return false } - if !p.Field3DeepEqual(ano.SpillableBufferSize) { + if !p.Field3DeepEqual(ano.Assertion) { return false } - if !p.Field4DeepEqual(ano.MaxRowBufferSize) { + if !p.Field4DeepEqual(ano.ShouldConvertOutputToNullable) { return false } return true } -func (p *TBackendResourceProfile) Field1DeepEqual(src int64) bool { +func (p *TAssertNumRowsNode) Field1DeepEqual(src *int64) bool { - if p.MinReservation != src { + if p.DesiredNumRows == src { + return true + } else if p.DesiredNumRows == nil || src == nil { + return false + } + if *p.DesiredNumRows != *src { return false } return true } -func (p *TBackendResourceProfile) Field2DeepEqual(src int64) bool { +func (p *TAssertNumRowsNode) Field2DeepEqual(src *string) bool { - if p.MaxReservation != src { + if p.SubqueryString == src { + return true + } else if p.SubqueryString == nil || src == nil { + return false + } + if strings.Compare(*p.SubqueryString, *src) != 0 { return false } return true } -func (p *TBackendResourceProfile) Field3DeepEqual(src int64) bool { +func (p *TAssertNumRowsNode) Field3DeepEqual(src *TAssertion) bool { - if p.SpillableBufferSize != src { + if p.Assertion == src { + return true + } else if p.Assertion == nil || src == nil { + return false + } + if *p.Assertion != *src { return false } return true } -func (p *TBackendResourceProfile) Field4DeepEqual(src int64) bool { +func (p *TAssertNumRowsNode) Field4DeepEqual(src *bool) bool { - if p.MaxRowBufferSize != src { + if p.ShouldConvertOutputToNullable == src { + return true + } else if p.ShouldConvertOutputToNullable == nil || src == nil { + return false + } + if *p.ShouldConvertOutputToNullable != *src { return false } return true } -type TAssertNumRowsNode struct { - DesiredNumRows *int64 `thrift:"desired_num_rows,1,optional" frugal:"1,optional,i64" json:"desired_num_rows,omitempty"` - SubqueryString *string `thrift:"subquery_string,2,optional" frugal:"2,optional,string" json:"subquery_string,omitempty"` - Assertion *TAssertion `thrift:"assertion,3,optional" frugal:"3,optional,TAssertion" json:"assertion,omitempty"` +type TTopnFilterDesc struct { + SourceNodeId int32 `thrift:"source_node_id,1,required" frugal:"1,required,i32" json:"source_node_id"` + IsAsc bool `thrift:"is_asc,2,required" frugal:"2,required,bool" json:"is_asc"` + NullFirst bool `thrift:"null_first,3,required" frugal:"3,required,bool" json:"null_first"` + TargetNodeIdToTargetExpr map[types.TPlanNodeId]*exprs.TExpr `thrift:"target_node_id_to_target_expr,4,required" frugal:"4,required,map" json:"target_node_id_to_target_expr"` } -func NewTAssertNumRowsNode() *TAssertNumRowsNode { - return &TAssertNumRowsNode{} +func NewTTopnFilterDesc() *TTopnFilterDesc { + return &TTopnFilterDesc{} } -func (p *TAssertNumRowsNode) InitDefault() { - *p = TAssertNumRowsNode{} +func (p *TTopnFilterDesc) InitDefault() { } -var TAssertNumRowsNode_DesiredNumRows_DEFAULT int64 - -func (p *TAssertNumRowsNode) GetDesiredNumRows() (v int64) { - if !p.IsSetDesiredNumRows() { - return TAssertNumRowsNode_DesiredNumRows_DEFAULT - } - return *p.DesiredNumRows +func (p *TTopnFilterDesc) GetSourceNodeId() (v int32) { + return p.SourceNodeId } -var TAssertNumRowsNode_SubqueryString_DEFAULT string - -func (p *TAssertNumRowsNode) GetSubqueryString() (v string) { - if !p.IsSetSubqueryString() { - return TAssertNumRowsNode_SubqueryString_DEFAULT - } - return *p.SubqueryString +func (p *TTopnFilterDesc) GetIsAsc() (v bool) { + return p.IsAsc } -var TAssertNumRowsNode_Assertion_DEFAULT TAssertion - -func (p *TAssertNumRowsNode) GetAssertion() (v TAssertion) { - if !p.IsSetAssertion() { - return TAssertNumRowsNode_Assertion_DEFAULT - } - return *p.Assertion -} -func (p *TAssertNumRowsNode) SetDesiredNumRows(val *int64) { - p.DesiredNumRows = val +func (p *TTopnFilterDesc) GetNullFirst() (v bool) { + return p.NullFirst } -func (p *TAssertNumRowsNode) SetSubqueryString(val *string) { - p.SubqueryString = val + +func (p *TTopnFilterDesc) GetTargetNodeIdToTargetExpr() (v map[types.TPlanNodeId]*exprs.TExpr) { + return p.TargetNodeIdToTargetExpr } -func (p *TAssertNumRowsNode) SetAssertion(val *TAssertion) { - p.Assertion = val +func (p *TTopnFilterDesc) SetSourceNodeId(val int32) { + p.SourceNodeId = val } - -var fieldIDToName_TAssertNumRowsNode = map[int16]string{ - 1: "desired_num_rows", - 2: "subquery_string", - 3: "assertion", +func (p *TTopnFilterDesc) SetIsAsc(val bool) { + p.IsAsc = val } - -func (p *TAssertNumRowsNode) IsSetDesiredNumRows() bool { - return p.DesiredNumRows != nil +func (p *TTopnFilterDesc) SetNullFirst(val bool) { + p.NullFirst = val } - -func (p *TAssertNumRowsNode) IsSetSubqueryString() bool { - return p.SubqueryString != nil +func (p *TTopnFilterDesc) SetTargetNodeIdToTargetExpr(val map[types.TPlanNodeId]*exprs.TExpr) { + p.TargetNodeIdToTargetExpr = val } -func (p *TAssertNumRowsNode) IsSetAssertion() bool { - return p.Assertion != nil +var fieldIDToName_TTopnFilterDesc = map[int16]string{ + 1: "source_node_id", + 2: "is_asc", + 3: "null_first", + 4: "target_node_id_to_target_expr", } -func (p *TAssertNumRowsNode) Read(iprot thrift.TProtocol) (err error) { +func (p *TTopnFilterDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 + var issetSourceNodeId bool = false + var issetIsAsc bool = false + var issetNullFirst bool = false + var issetTargetNodeIdToTargetExpr bool = false if _, err = iprot.ReadStructBegin(); err != nil { goto ReadStructBeginError @@ -35020,41 +43474,46 @@ func (p *TAssertNumRowsNode) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetSourceNodeId = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + issetIsAsc = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + issetNullFirst = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + issetTargetNodeIdToTargetExpr = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -35063,13 +43522,32 @@ func (p *TAssertNumRowsNode) Read(iprot thrift.TProtocol) (err error) { goto ReadStructEndError } + if !issetSourceNodeId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetIsAsc { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetNullFirst { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetTargetNodeIdToTargetExpr { + fieldId = 4 + goto RequiredFieldNotSetError + } return nil ReadStructBeginError: return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAssertNumRowsNode[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTopnFilterDesc[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -35077,39 +43555,76 @@ ReadFieldEndError: return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTopnFilterDesc[fieldId])) } -func (p *TAssertNumRowsNode) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *TTopnFilterDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DesiredNumRows = &v + _field = v } + p.SourceNodeId = _field return nil } +func (p *TTopnFilterDesc) ReadField2(iprot thrift.TProtocol) error { -func (p *TAssertNumRowsNode) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { + var _field bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - p.SubqueryString = &v + _field = v } + p.IsAsc = _field return nil } +func (p *TTopnFilterDesc) ReadField3(iprot thrift.TProtocol) error { -func (p *TAssertNumRowsNode) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { + var _field bool + if v, err := iprot.ReadBool(); err != nil { return err } else { - tmp := TAssertion(v) - p.Assertion = &tmp + _field = v } + p.NullFirst = _field return nil } +func (p *TTopnFilterDesc) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } -func (p *TAssertNumRowsNode) Write(oprot thrift.TProtocol) (err error) { + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TargetNodeIdToTargetExpr = _field + return nil +} + +func (p *TTopnFilterDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("TAssertNumRowsNode"); err != nil { + if err = oprot.WriteStructBegin("TTopnFilterDesc"); err != nil { goto WriteStructBeginError } if p != nil { @@ -35125,7 +43640,10 @@ func (p *TAssertNumRowsNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -35144,17 +43662,15 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TAssertNumRowsNode) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetDesiredNumRows() { - if err = oprot.WriteFieldBegin("desired_num_rows", thrift.I64, 1); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI64(*p.DesiredNumRows); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTopnFilterDesc) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("source_node_id", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.SourceNodeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -35163,17 +43679,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TAssertNumRowsNode) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetSubqueryString() { - if err = oprot.WriteFieldBegin("subquery_string", thrift.STRING, 2); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteString(*p.SubqueryString); err != nil { - return err - } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError - } +func (p *TTopnFilterDesc) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("is_asc", thrift.BOOL, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsAsc); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } return nil WriteFieldBeginError: @@ -35182,100 +43696,132 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } -func (p *TAssertNumRowsNode) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetAssertion() { - if err = oprot.WriteFieldBegin("assertion", thrift.I32, 3); err != nil { - goto WriteFieldBeginError - } - if err := oprot.WriteI32(int32(*p.Assertion)); err != nil { +func (p *TTopnFilterDesc) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("null_first", thrift.BOOL, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.NullFirst); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TTopnFilterDesc) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("target_node_id_to_target_expr", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.TargetNodeIdToTargetExpr)); err != nil { + return err + } + for k, v := range p.TargetNodeIdToTargetExpr { + if err := oprot.WriteI32(k); err != nil { return err } - if err = oprot.WriteFieldEnd(); err != nil { - goto WriteFieldEndError + if err := v.Write(oprot); err != nil { + return err } } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) } -func (p *TAssertNumRowsNode) String() string { +func (p *TTopnFilterDesc) String() string { if p == nil { return "" } - return fmt.Sprintf("TAssertNumRowsNode(%+v)", *p) + return fmt.Sprintf("TTopnFilterDesc(%+v)", *p) + } -func (p *TAssertNumRowsNode) DeepEqual(ano *TAssertNumRowsNode) bool { +func (p *TTopnFilterDesc) DeepEqual(ano *TTopnFilterDesc) bool { if p == ano { return true } else if p == nil || ano == nil { return false } - if !p.Field1DeepEqual(ano.DesiredNumRows) { + if !p.Field1DeepEqual(ano.SourceNodeId) { return false } - if !p.Field2DeepEqual(ano.SubqueryString) { + if !p.Field2DeepEqual(ano.IsAsc) { return false } - if !p.Field3DeepEqual(ano.Assertion) { + if !p.Field3DeepEqual(ano.NullFirst) { + return false + } + if !p.Field4DeepEqual(ano.TargetNodeIdToTargetExpr) { return false } return true } -func (p *TAssertNumRowsNode) Field1DeepEqual(src *int64) bool { +func (p *TTopnFilterDesc) Field1DeepEqual(src int32) bool { - if p.DesiredNumRows == src { - return true - } else if p.DesiredNumRows == nil || src == nil { - return false - } - if *p.DesiredNumRows != *src { + if p.SourceNodeId != src { return false } return true } -func (p *TAssertNumRowsNode) Field2DeepEqual(src *string) bool { +func (p *TTopnFilterDesc) Field2DeepEqual(src bool) bool { - if p.SubqueryString == src { - return true - } else if p.SubqueryString == nil || src == nil { + if p.IsAsc != src { return false } - if strings.Compare(*p.SubqueryString, *src) != 0 { + return true +} +func (p *TTopnFilterDesc) Field3DeepEqual(src bool) bool { + + if p.NullFirst != src { return false } return true } -func (p *TAssertNumRowsNode) Field3DeepEqual(src *TAssertion) bool { +func (p *TTopnFilterDesc) Field4DeepEqual(src map[types.TPlanNodeId]*exprs.TExpr) bool { - if p.Assertion == src { - return true - } else if p.Assertion == nil || src == nil { + if len(p.TargetNodeIdToTargetExpr) != len(src) { return false } - if *p.Assertion != *src { - return false + for k, v := range p.TargetNodeIdToTargetExpr { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } } return true } type TRuntimeFilterDesc struct { - FilterId int32 `thrift:"filter_id,1,required" frugal:"1,required,i32" json:"filter_id"` - SrcExpr *exprs.TExpr `thrift:"src_expr,2,required" frugal:"2,required,exprs.TExpr" json:"src_expr"` - ExprOrder int32 `thrift:"expr_order,3,required" frugal:"3,required,i32" json:"expr_order"` - PlanIdToTargetExpr map[types.TPlanNodeId]*exprs.TExpr `thrift:"planId_to_target_expr,4,required" frugal:"4,required,map" json:"planId_to_target_expr"` - IsBroadcastJoin bool `thrift:"is_broadcast_join,5,required" frugal:"5,required,bool" json:"is_broadcast_join"` - HasLocalTargets bool `thrift:"has_local_targets,6,required" frugal:"6,required,bool" json:"has_local_targets"` - HasRemoteTargets bool `thrift:"has_remote_targets,7,required" frugal:"7,required,bool" json:"has_remote_targets"` - Type TRuntimeFilterType `thrift:"type,8,required" frugal:"8,required,TRuntimeFilterType" json:"type"` - BloomFilterSizeBytes *int64 `thrift:"bloom_filter_size_bytes,9,optional" frugal:"9,optional,i64" json:"bloom_filter_size_bytes,omitempty"` - BitmapTargetExpr *exprs.TExpr `thrift:"bitmap_target_expr,10,optional" frugal:"10,optional,exprs.TExpr" json:"bitmap_target_expr,omitempty"` - BitmapFilterNotIn *bool `thrift:"bitmap_filter_not_in,11,optional" frugal:"11,optional,bool" json:"bitmap_filter_not_in,omitempty"` - OptRemoteRf *bool `thrift:"opt_remote_rf,12,optional" frugal:"12,optional,bool" json:"opt_remote_rf,omitempty"` + FilterId int32 `thrift:"filter_id,1,required" frugal:"1,required,i32" json:"filter_id"` + SrcExpr *exprs.TExpr `thrift:"src_expr,2,required" frugal:"2,required,exprs.TExpr" json:"src_expr"` + ExprOrder int32 `thrift:"expr_order,3,required" frugal:"3,required,i32" json:"expr_order"` + PlanIdToTargetExpr map[types.TPlanNodeId]*exprs.TExpr `thrift:"planId_to_target_expr,4,required" frugal:"4,required,map" json:"planId_to_target_expr"` + IsBroadcastJoin bool `thrift:"is_broadcast_join,5,required" frugal:"5,required,bool" json:"is_broadcast_join"` + HasLocalTargets bool `thrift:"has_local_targets,6,required" frugal:"6,required,bool" json:"has_local_targets"` + HasRemoteTargets bool `thrift:"has_remote_targets,7,required" frugal:"7,required,bool" json:"has_remote_targets"` + Type TRuntimeFilterType `thrift:"type,8,required" frugal:"8,required,TRuntimeFilterType" json:"type"` + BloomFilterSizeBytes *int64 `thrift:"bloom_filter_size_bytes,9,optional" frugal:"9,optional,i64" json:"bloom_filter_size_bytes,omitempty"` + BitmapTargetExpr *exprs.TExpr `thrift:"bitmap_target_expr,10,optional" frugal:"10,optional,exprs.TExpr" json:"bitmap_target_expr,omitempty"` + BitmapFilterNotIn *bool `thrift:"bitmap_filter_not_in,11,optional" frugal:"11,optional,bool" json:"bitmap_filter_not_in,omitempty"` + OptRemoteRf *bool `thrift:"opt_remote_rf,12,optional" frugal:"12,optional,bool" json:"opt_remote_rf,omitempty"` + MinMaxType *TMinMaxRuntimeFilterType `thrift:"min_max_type,13,optional" frugal:"13,optional,TMinMaxRuntimeFilterType" json:"min_max_type,omitempty"` + BloomFilterSizeCalculatedByNdv *bool `thrift:"bloom_filter_size_calculated_by_ndv,14,optional" frugal:"14,optional,bool" json:"bloom_filter_size_calculated_by_ndv,omitempty"` + NullAware *bool `thrift:"null_aware,15,optional" frugal:"15,optional,bool" json:"null_aware,omitempty"` + SyncFilterSize *bool `thrift:"sync_filter_size,16,optional" frugal:"16,optional,bool" json:"sync_filter_size,omitempty"` } func NewTRuntimeFilterDesc() *TRuntimeFilterDesc { @@ -35283,7 +43829,6 @@ func NewTRuntimeFilterDesc() *TRuntimeFilterDesc { } func (p *TRuntimeFilterDesc) InitDefault() { - *p = TRuntimeFilterDesc{} } func (p *TRuntimeFilterDesc) GetFilterId() (v int32) { @@ -35358,6 +43903,42 @@ func (p *TRuntimeFilterDesc) GetOptRemoteRf() (v bool) { } return *p.OptRemoteRf } + +var TRuntimeFilterDesc_MinMaxType_DEFAULT TMinMaxRuntimeFilterType + +func (p *TRuntimeFilterDesc) GetMinMaxType() (v TMinMaxRuntimeFilterType) { + if !p.IsSetMinMaxType() { + return TRuntimeFilterDesc_MinMaxType_DEFAULT + } + return *p.MinMaxType +} + +var TRuntimeFilterDesc_BloomFilterSizeCalculatedByNdv_DEFAULT bool + +func (p *TRuntimeFilterDesc) GetBloomFilterSizeCalculatedByNdv() (v bool) { + if !p.IsSetBloomFilterSizeCalculatedByNdv() { + return TRuntimeFilterDesc_BloomFilterSizeCalculatedByNdv_DEFAULT + } + return *p.BloomFilterSizeCalculatedByNdv +} + +var TRuntimeFilterDesc_NullAware_DEFAULT bool + +func (p *TRuntimeFilterDesc) GetNullAware() (v bool) { + if !p.IsSetNullAware() { + return TRuntimeFilterDesc_NullAware_DEFAULT + } + return *p.NullAware +} + +var TRuntimeFilterDesc_SyncFilterSize_DEFAULT bool + +func (p *TRuntimeFilterDesc) GetSyncFilterSize() (v bool) { + if !p.IsSetSyncFilterSize() { + return TRuntimeFilterDesc_SyncFilterSize_DEFAULT + } + return *p.SyncFilterSize +} func (p *TRuntimeFilterDesc) SetFilterId(val int32) { p.FilterId = val } @@ -35394,6 +43975,18 @@ func (p *TRuntimeFilterDesc) SetBitmapFilterNotIn(val *bool) { func (p *TRuntimeFilterDesc) SetOptRemoteRf(val *bool) { p.OptRemoteRf = val } +func (p *TRuntimeFilterDesc) SetMinMaxType(val *TMinMaxRuntimeFilterType) { + p.MinMaxType = val +} +func (p *TRuntimeFilterDesc) SetBloomFilterSizeCalculatedByNdv(val *bool) { + p.BloomFilterSizeCalculatedByNdv = val +} +func (p *TRuntimeFilterDesc) SetNullAware(val *bool) { + p.NullAware = val +} +func (p *TRuntimeFilterDesc) SetSyncFilterSize(val *bool) { + p.SyncFilterSize = val +} var fieldIDToName_TRuntimeFilterDesc = map[int16]string{ 1: "filter_id", @@ -35408,6 +44001,10 @@ var fieldIDToName_TRuntimeFilterDesc = map[int16]string{ 10: "bitmap_target_expr", 11: "bitmap_filter_not_in", 12: "opt_remote_rf", + 13: "min_max_type", + 14: "bloom_filter_size_calculated_by_ndv", + 15: "null_aware", + 16: "sync_filter_size", } func (p *TRuntimeFilterDesc) IsSetSrcExpr() bool { @@ -35430,6 +44027,22 @@ func (p *TRuntimeFilterDesc) IsSetOptRemoteRf() bool { return p.OptRemoteRf != nil } +func (p *TRuntimeFilterDesc) IsSetMinMaxType() bool { + return p.MinMaxType != nil +} + +func (p *TRuntimeFilterDesc) IsSetBloomFilterSizeCalculatedByNdv() bool { + return p.BloomFilterSizeCalculatedByNdv != nil +} + +func (p *TRuntimeFilterDesc) IsSetNullAware() bool { + return p.NullAware != nil +} + +func (p *TRuntimeFilterDesc) IsSetSyncFilterSize() bool { + return p.SyncFilterSize != nil +} + func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -35463,10 +44076,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFilterId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { @@ -35474,10 +44085,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSrcExpr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -35485,10 +44094,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetExprOrder = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.MAP { @@ -35496,10 +44103,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPlanIdToTargetExpr = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { @@ -35507,10 +44112,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsBroadcastJoin = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { @@ -35518,10 +44121,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHasLocalTargets = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.BOOL { @@ -35529,10 +44130,8 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHasRemoteTargets = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I32 { @@ -35540,57 +44139,78 @@ func (p *TRuntimeFilterDesc) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.BOOL { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.BOOL { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I32 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -35657,37 +44277,42 @@ RequiredFieldNotSetError: } func (p *TRuntimeFilterDesc) ReadField1(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.FilterId = v + _field = v } + p.FilterId = _field return nil } - func (p *TRuntimeFilterDesc) ReadField2(iprot thrift.TProtocol) error { - p.SrcExpr = exprs.NewTExpr() - if err := p.SrcExpr.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.SrcExpr = _field return nil } - func (p *TRuntimeFilterDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.ExprOrder = v + _field = v } + p.ExprOrder = _field return nil } - func (p *TRuntimeFilterDesc) ReadField4(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.PlanIdToTargetExpr = make(map[types.TPlanNodeId]*exprs.TExpr, size) + _field := make(map[types.TPlanNodeId]*exprs.TExpr, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { var _key types.TPlanNodeId if v, err := iprot.ReadI32(); err != nil { @@ -35695,87 +44320,149 @@ func (p *TRuntimeFilterDesc) ReadField4(iprot thrift.TProtocol) error { } else { _key = v } - _val := exprs.NewTExpr() + + _val := &values[i] + _val.InitDefault() if err := _val.Read(iprot); err != nil { return err } - p.PlanIdToTargetExpr[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.PlanIdToTargetExpr = _field return nil } - func (p *TRuntimeFilterDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsBroadcastJoin = v + _field = v } + p.IsBroadcastJoin = _field return nil } - func (p *TRuntimeFilterDesc) ReadField6(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasLocalTargets = v + _field = v } + p.HasLocalTargets = _field return nil } - func (p *TRuntimeFilterDesc) ReadField7(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasRemoteTargets = v + _field = v } + p.HasRemoteTargets = _field return nil } - func (p *TRuntimeFilterDesc) ReadField8(iprot thrift.TProtocol) error { + + var _field TRuntimeFilterType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TRuntimeFilterType(v) + _field = TRuntimeFilterType(v) } + p.Type = _field return nil } - func (p *TRuntimeFilterDesc) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BloomFilterSizeBytes = &v + _field = &v } + p.BloomFilterSizeBytes = _field return nil } - func (p *TRuntimeFilterDesc) ReadField10(iprot thrift.TProtocol) error { - p.BitmapTargetExpr = exprs.NewTExpr() - if err := p.BitmapTargetExpr.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.BitmapTargetExpr = _field return nil } - func (p *TRuntimeFilterDesc) ReadField11(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.BitmapFilterNotIn = &v + _field = &v } + p.BitmapFilterNotIn = _field return nil } - func (p *TRuntimeFilterDesc) ReadField12(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.OptRemoteRf = _field + return nil +} +func (p *TRuntimeFilterDesc) ReadField13(iprot thrift.TProtocol) error { + + var _field *TMinMaxRuntimeFilterType + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + tmp := TMinMaxRuntimeFilterType(v) + _field = &tmp + } + p.MinMaxType = _field + return nil +} +func (p *TRuntimeFilterDesc) ReadField14(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.BloomFilterSizeCalculatedByNdv = _field + return nil +} +func (p *TRuntimeFilterDesc) ReadField15(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.NullAware = _field + return nil +} +func (p *TRuntimeFilterDesc) ReadField16(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.OptRemoteRf = &v + _field = &v } + p.SyncFilterSize = _field return nil } @@ -35833,7 +44520,22 @@ func (p *TRuntimeFilterDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } - + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -35911,11 +44613,9 @@ func (p *TRuntimeFilterDesc) writeField4(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.PlanIdToTargetExpr { - if err := oprot.WriteI32(k); err != nil { return err } - if err := v.Write(oprot); err != nil { return err } @@ -36077,11 +44777,88 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TRuntimeFilterDesc) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetMinMaxType() { + if err = oprot.WriteFieldBegin("min_max_type", thrift.I32, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.MinMaxType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TRuntimeFilterDesc) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetBloomFilterSizeCalculatedByNdv() { + if err = oprot.WriteFieldBegin("bloom_filter_size_calculated_by_ndv", thrift.BOOL, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.BloomFilterSizeCalculatedByNdv); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TRuntimeFilterDesc) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetNullAware() { + if err = oprot.WriteFieldBegin("null_aware", thrift.BOOL, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.NullAware); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TRuntimeFilterDesc) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetSyncFilterSize() { + if err = oprot.WriteFieldBegin("sync_filter_size", thrift.BOOL, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.SyncFilterSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + func (p *TRuntimeFilterDesc) String() string { if p == nil { return "" } return fmt.Sprintf("TRuntimeFilterDesc(%+v)", *p) + } func (p *TRuntimeFilterDesc) DeepEqual(ano *TRuntimeFilterDesc) bool { @@ -36126,6 +44903,18 @@ func (p *TRuntimeFilterDesc) DeepEqual(ano *TRuntimeFilterDesc) bool { if !p.Field12DeepEqual(ano.OptRemoteRf) { return false } + if !p.Field13DeepEqual(ano.MinMaxType) { + return false + } + if !p.Field14DeepEqual(ano.BloomFilterSizeCalculatedByNdv) { + return false + } + if !p.Field15DeepEqual(ano.NullAware) { + return false + } + if !p.Field16DeepEqual(ano.SyncFilterSize) { + return false + } return true } @@ -36234,6 +45023,54 @@ func (p *TRuntimeFilterDesc) Field12DeepEqual(src *bool) bool { } return true } +func (p *TRuntimeFilterDesc) Field13DeepEqual(src *TMinMaxRuntimeFilterType) bool { + + if p.MinMaxType == src { + return true + } else if p.MinMaxType == nil || src == nil { + return false + } + if *p.MinMaxType != *src { + return false + } + return true +} +func (p *TRuntimeFilterDesc) Field14DeepEqual(src *bool) bool { + + if p.BloomFilterSizeCalculatedByNdv == src { + return true + } else if p.BloomFilterSizeCalculatedByNdv == nil || src == nil { + return false + } + if *p.BloomFilterSizeCalculatedByNdv != *src { + return false + } + return true +} +func (p *TRuntimeFilterDesc) Field15DeepEqual(src *bool) bool { + + if p.NullAware == src { + return true + } else if p.NullAware == nil || src == nil { + return false + } + if *p.NullAware != *src { + return false + } + return true +} +func (p *TRuntimeFilterDesc) Field16DeepEqual(src *bool) bool { + + if p.SyncFilterSize == src { + return true + } else if p.SyncFilterSize == nil || src == nil { + return false + } + if *p.SyncFilterSize != *src { + return false + } + return true +} type TDataGenScanNode struct { TupleId *types.TTupleId `thrift:"tuple_id,1,optional" frugal:"1,optional,i32" json:"tuple_id,omitempty"` @@ -36245,7 +45082,6 @@ func NewTDataGenScanNode() *TDataGenScanNode { } func (p *TDataGenScanNode) InitDefault() { - *p = TDataGenScanNode{} } var TDataGenScanNode_TupleId_DEFAULT types.TTupleId @@ -36309,27 +45145,22 @@ func (p *TDataGenScanNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -36355,21 +45186,26 @@ ReadStructEndError: } func (p *TDataGenScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TupleId = &v + _field = &v } + p.TupleId = _field return nil } - func (p *TDataGenScanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field *TDataGenFunctionName if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TDataGenFunctionName(v) - p.FuncName = &tmp + _field = &tmp } + p.FuncName = _field return nil } @@ -36387,7 +45223,6 @@ func (p *TDataGenScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -36449,6 +45284,7 @@ func (p *TDataGenScanNode) String() string { return "" } return fmt.Sprintf("TDataGenScanNode(%+v)", *p) + } func (p *TDataGenScanNode) DeepEqual(ano *TDataGenScanNode) bool { @@ -36500,7 +45336,6 @@ func NewTGroupCommitScanNode() *TGroupCommitScanNode { } func (p *TGroupCommitScanNode) InitDefault() { - *p = TGroupCommitScanNode{} } var TGroupCommitScanNode_TableId_DEFAULT int64 @@ -36547,17 +45382,14 @@ func (p *TGroupCommitScanNode) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -36583,11 +45415,14 @@ ReadStructEndError: } func (p *TGroupCommitScanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TableId = &v + _field = &v } + p.TableId = _field return nil } @@ -36601,7 +45436,6 @@ func (p *TGroupCommitScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -36644,6 +45478,7 @@ func (p *TGroupCommitScanNode) String() string { return "" } return fmt.Sprintf("TGroupCommitScanNode(%+v)", *p) + } func (p *TGroupCommitScanNode) DeepEqual(ano *TGroupCommitScanNode) bool { @@ -36672,52 +45507,58 @@ func (p *TGroupCommitScanNode) Field1DeepEqual(src *int64) bool { } type TPlanNode struct { - NodeId types.TPlanNodeId `thrift:"node_id,1,required" frugal:"1,required,i32" json:"node_id"` - NodeType TPlanNodeType `thrift:"node_type,2,required" frugal:"2,required,TPlanNodeType" json:"node_type"` - NumChildren int32 `thrift:"num_children,3,required" frugal:"3,required,i32" json:"num_children"` - Limit int64 `thrift:"limit,4,required" frugal:"4,required,i64" json:"limit"` - RowTuples []types.TTupleId `thrift:"row_tuples,5,required" frugal:"5,required,list" json:"row_tuples"` - NullableTuples []bool `thrift:"nullable_tuples,6,required" frugal:"6,required,list" json:"nullable_tuples"` - Conjuncts []*exprs.TExpr `thrift:"conjuncts,7,optional" frugal:"7,optional,list" json:"conjuncts,omitempty"` - CompactData bool `thrift:"compact_data,8,required" frugal:"8,required,bool" json:"compact_data"` - HashJoinNode *THashJoinNode `thrift:"hash_join_node,11,optional" frugal:"11,optional,THashJoinNode" json:"hash_join_node,omitempty"` - AggNode *TAggregationNode `thrift:"agg_node,12,optional" frugal:"12,optional,TAggregationNode" json:"agg_node,omitempty"` - SortNode *TSortNode `thrift:"sort_node,13,optional" frugal:"13,optional,TSortNode" json:"sort_node,omitempty"` - MergeNode *TMergeNode `thrift:"merge_node,14,optional" frugal:"14,optional,TMergeNode" json:"merge_node,omitempty"` - ExchangeNode *TExchangeNode `thrift:"exchange_node,15,optional" frugal:"15,optional,TExchangeNode" json:"exchange_node,omitempty"` - MysqlScanNode *TMySQLScanNode `thrift:"mysql_scan_node,17,optional" frugal:"17,optional,TMySQLScanNode" json:"mysql_scan_node,omitempty"` - OlapScanNode *TOlapScanNode `thrift:"olap_scan_node,18,optional" frugal:"18,optional,TOlapScanNode" json:"olap_scan_node,omitempty"` - CsvScanNode *TCsvScanNode `thrift:"csv_scan_node,19,optional" frugal:"19,optional,TCsvScanNode" json:"csv_scan_node,omitempty"` - BrokerScanNode *TBrokerScanNode `thrift:"broker_scan_node,20,optional" frugal:"20,optional,TBrokerScanNode" json:"broker_scan_node,omitempty"` - PreAggNode *TPreAggregationNode `thrift:"pre_agg_node,21,optional" frugal:"21,optional,TPreAggregationNode" json:"pre_agg_node,omitempty"` - SchemaScanNode *TSchemaScanNode `thrift:"schema_scan_node,22,optional" frugal:"22,optional,TSchemaScanNode" json:"schema_scan_node,omitempty"` - MergeJoinNode *TMergeJoinNode `thrift:"merge_join_node,23,optional" frugal:"23,optional,TMergeJoinNode" json:"merge_join_node,omitempty"` - MetaScanNode *TMetaScanNode `thrift:"meta_scan_node,24,optional" frugal:"24,optional,TMetaScanNode" json:"meta_scan_node,omitempty"` - AnalyticNode *TAnalyticNode `thrift:"analytic_node,25,optional" frugal:"25,optional,TAnalyticNode" json:"analytic_node,omitempty"` - OlapRewriteNode *TOlapRewriteNode `thrift:"olap_rewrite_node,26,optional" frugal:"26,optional,TOlapRewriteNode" json:"olap_rewrite_node,omitempty"` - UnionNode *TUnionNode `thrift:"union_node,28,optional" frugal:"28,optional,TUnionNode" json:"union_node,omitempty"` - ResourceProfile *TBackendResourceProfile `thrift:"resource_profile,29,optional" frugal:"29,optional,TBackendResourceProfile" json:"resource_profile,omitempty"` - EsScanNode *TEsScanNode `thrift:"es_scan_node,30,optional" frugal:"30,optional,TEsScanNode" json:"es_scan_node,omitempty"` - RepeatNode *TRepeatNode `thrift:"repeat_node,31,optional" frugal:"31,optional,TRepeatNode" json:"repeat_node,omitempty"` - AssertNumRowsNode *TAssertNumRowsNode `thrift:"assert_num_rows_node,32,optional" frugal:"32,optional,TAssertNumRowsNode" json:"assert_num_rows_node,omitempty"` - IntersectNode *TIntersectNode `thrift:"intersect_node,33,optional" frugal:"33,optional,TIntersectNode" json:"intersect_node,omitempty"` - ExceptNode *TExceptNode `thrift:"except_node,34,optional" frugal:"34,optional,TExceptNode" json:"except_node,omitempty"` - OdbcScanNode *TOdbcScanNode `thrift:"odbc_scan_node,35,optional" frugal:"35,optional,TOdbcScanNode" json:"odbc_scan_node,omitempty"` - RuntimeFilters []*TRuntimeFilterDesc `thrift:"runtime_filters,36,optional" frugal:"36,optional,list" json:"runtime_filters,omitempty"` - GroupCommitScanNode *TGroupCommitScanNode `thrift:"group_commit_scan_node,37,optional" frugal:"37,optional,TGroupCommitScanNode" json:"group_commit_scan_node,omitempty"` - Vconjunct *exprs.TExpr `thrift:"vconjunct,40,optional" frugal:"40,optional,exprs.TExpr" json:"vconjunct,omitempty"` - TableFunctionNode *TTableFunctionNode `thrift:"table_function_node,41,optional" frugal:"41,optional,TTableFunctionNode" json:"table_function_node,omitempty"` - OutputSlotIds []types.TSlotId `thrift:"output_slot_ids,42,optional" frugal:"42,optional,list" json:"output_slot_ids,omitempty"` - DataGenScanNode *TDataGenScanNode `thrift:"data_gen_scan_node,43,optional" frugal:"43,optional,TDataGenScanNode" json:"data_gen_scan_node,omitempty"` - FileScanNode *TFileScanNode `thrift:"file_scan_node,44,optional" frugal:"44,optional,TFileScanNode" json:"file_scan_node,omitempty"` - JdbcScanNode *TJdbcScanNode `thrift:"jdbc_scan_node,45,optional" frugal:"45,optional,TJdbcScanNode" json:"jdbc_scan_node,omitempty"` - NestedLoopJoinNode *TNestedLoopJoinNode `thrift:"nested_loop_join_node,46,optional" frugal:"46,optional,TNestedLoopJoinNode" json:"nested_loop_join_node,omitempty"` - TestExternalScanNode *TTestExternalScanNode `thrift:"test_external_scan_node,47,optional" frugal:"47,optional,TTestExternalScanNode" json:"test_external_scan_node,omitempty"` - PushDownAggTypeOpt *TPushAggOp `thrift:"push_down_agg_type_opt,48,optional" frugal:"48,optional,TPushAggOp" json:"push_down_agg_type_opt,omitempty"` - PushDownCount *int64 `thrift:"push_down_count,49,optional" frugal:"49,optional,i64" json:"push_down_count,omitempty"` - Projections []*exprs.TExpr `thrift:"projections,101,optional" frugal:"101,optional,list" json:"projections,omitempty"` - OutputTupleId *types.TTupleId `thrift:"output_tuple_id,102,optional" frugal:"102,optional,i32" json:"output_tuple_id,omitempty"` - PartitionSortNode *TPartitionSortNode `thrift:"partition_sort_node,103,optional" frugal:"103,optional,TPartitionSortNode" json:"partition_sort_node,omitempty"` + NodeId types.TPlanNodeId `thrift:"node_id,1,required" frugal:"1,required,i32" json:"node_id"` + NodeType TPlanNodeType `thrift:"node_type,2,required" frugal:"2,required,TPlanNodeType" json:"node_type"` + NumChildren int32 `thrift:"num_children,3,required" frugal:"3,required,i32" json:"num_children"` + Limit int64 `thrift:"limit,4,required" frugal:"4,required,i64" json:"limit"` + RowTuples []types.TTupleId `thrift:"row_tuples,5,required" frugal:"5,required,list" json:"row_tuples"` + NullableTuples []bool `thrift:"nullable_tuples,6,required" frugal:"6,required,list" json:"nullable_tuples"` + Conjuncts []*exprs.TExpr `thrift:"conjuncts,7,optional" frugal:"7,optional,list" json:"conjuncts,omitempty"` + CompactData bool `thrift:"compact_data,8,required" frugal:"8,required,bool" json:"compact_data"` + HashJoinNode *THashJoinNode `thrift:"hash_join_node,11,optional" frugal:"11,optional,THashJoinNode" json:"hash_join_node,omitempty"` + AggNode *TAggregationNode `thrift:"agg_node,12,optional" frugal:"12,optional,TAggregationNode" json:"agg_node,omitempty"` + SortNode *TSortNode `thrift:"sort_node,13,optional" frugal:"13,optional,TSortNode" json:"sort_node,omitempty"` + MergeNode *TMergeNode `thrift:"merge_node,14,optional" frugal:"14,optional,TMergeNode" json:"merge_node,omitempty"` + ExchangeNode *TExchangeNode `thrift:"exchange_node,15,optional" frugal:"15,optional,TExchangeNode" json:"exchange_node,omitempty"` + MysqlScanNode *TMySQLScanNode `thrift:"mysql_scan_node,17,optional" frugal:"17,optional,TMySQLScanNode" json:"mysql_scan_node,omitempty"` + OlapScanNode *TOlapScanNode `thrift:"olap_scan_node,18,optional" frugal:"18,optional,TOlapScanNode" json:"olap_scan_node,omitempty"` + CsvScanNode *TCsvScanNode `thrift:"csv_scan_node,19,optional" frugal:"19,optional,TCsvScanNode" json:"csv_scan_node,omitempty"` + BrokerScanNode *TBrokerScanNode `thrift:"broker_scan_node,20,optional" frugal:"20,optional,TBrokerScanNode" json:"broker_scan_node,omitempty"` + PreAggNode *TPreAggregationNode `thrift:"pre_agg_node,21,optional" frugal:"21,optional,TPreAggregationNode" json:"pre_agg_node,omitempty"` + SchemaScanNode *TSchemaScanNode `thrift:"schema_scan_node,22,optional" frugal:"22,optional,TSchemaScanNode" json:"schema_scan_node,omitempty"` + MergeJoinNode *TMergeJoinNode `thrift:"merge_join_node,23,optional" frugal:"23,optional,TMergeJoinNode" json:"merge_join_node,omitempty"` + MetaScanNode *TMetaScanNode `thrift:"meta_scan_node,24,optional" frugal:"24,optional,TMetaScanNode" json:"meta_scan_node,omitempty"` + AnalyticNode *TAnalyticNode `thrift:"analytic_node,25,optional" frugal:"25,optional,TAnalyticNode" json:"analytic_node,omitempty"` + OlapRewriteNode *TOlapRewriteNode `thrift:"olap_rewrite_node,26,optional" frugal:"26,optional,TOlapRewriteNode" json:"olap_rewrite_node,omitempty"` + UnionNode *TUnionNode `thrift:"union_node,28,optional" frugal:"28,optional,TUnionNode" json:"union_node,omitempty"` + ResourceProfile *TBackendResourceProfile `thrift:"resource_profile,29,optional" frugal:"29,optional,TBackendResourceProfile" json:"resource_profile,omitempty"` + EsScanNode *TEsScanNode `thrift:"es_scan_node,30,optional" frugal:"30,optional,TEsScanNode" json:"es_scan_node,omitempty"` + RepeatNode *TRepeatNode `thrift:"repeat_node,31,optional" frugal:"31,optional,TRepeatNode" json:"repeat_node,omitempty"` + AssertNumRowsNode *TAssertNumRowsNode `thrift:"assert_num_rows_node,32,optional" frugal:"32,optional,TAssertNumRowsNode" json:"assert_num_rows_node,omitempty"` + IntersectNode *TIntersectNode `thrift:"intersect_node,33,optional" frugal:"33,optional,TIntersectNode" json:"intersect_node,omitempty"` + ExceptNode *TExceptNode `thrift:"except_node,34,optional" frugal:"34,optional,TExceptNode" json:"except_node,omitempty"` + OdbcScanNode *TOdbcScanNode `thrift:"odbc_scan_node,35,optional" frugal:"35,optional,TOdbcScanNode" json:"odbc_scan_node,omitempty"` + RuntimeFilters []*TRuntimeFilterDesc `thrift:"runtime_filters,36,optional" frugal:"36,optional,list" json:"runtime_filters,omitempty"` + GroupCommitScanNode *TGroupCommitScanNode `thrift:"group_commit_scan_node,37,optional" frugal:"37,optional,TGroupCommitScanNode" json:"group_commit_scan_node,omitempty"` + Vconjunct *exprs.TExpr `thrift:"vconjunct,40,optional" frugal:"40,optional,exprs.TExpr" json:"vconjunct,omitempty"` + TableFunctionNode *TTableFunctionNode `thrift:"table_function_node,41,optional" frugal:"41,optional,TTableFunctionNode" json:"table_function_node,omitempty"` + OutputSlotIds []types.TSlotId `thrift:"output_slot_ids,42,optional" frugal:"42,optional,list" json:"output_slot_ids,omitempty"` + DataGenScanNode *TDataGenScanNode `thrift:"data_gen_scan_node,43,optional" frugal:"43,optional,TDataGenScanNode" json:"data_gen_scan_node,omitempty"` + FileScanNode *TFileScanNode `thrift:"file_scan_node,44,optional" frugal:"44,optional,TFileScanNode" json:"file_scan_node,omitempty"` + JdbcScanNode *TJdbcScanNode `thrift:"jdbc_scan_node,45,optional" frugal:"45,optional,TJdbcScanNode" json:"jdbc_scan_node,omitempty"` + NestedLoopJoinNode *TNestedLoopJoinNode `thrift:"nested_loop_join_node,46,optional" frugal:"46,optional,TNestedLoopJoinNode" json:"nested_loop_join_node,omitempty"` + TestExternalScanNode *TTestExternalScanNode `thrift:"test_external_scan_node,47,optional" frugal:"47,optional,TTestExternalScanNode" json:"test_external_scan_node,omitempty"` + PushDownAggTypeOpt *TPushAggOp `thrift:"push_down_agg_type_opt,48,optional" frugal:"48,optional,TPushAggOp" json:"push_down_agg_type_opt,omitempty"` + PushDownCount *int64 `thrift:"push_down_count,49,optional" frugal:"49,optional,i64" json:"push_down_count,omitempty"` + DistributeExprLists [][]*exprs.TExpr `thrift:"distribute_expr_lists,50,optional" frugal:"50,optional,list>" json:"distribute_expr_lists,omitempty"` + IsSerialOperator *bool `thrift:"is_serial_operator,51,optional" frugal:"51,optional,bool" json:"is_serial_operator,omitempty"` + Projections []*exprs.TExpr `thrift:"projections,101,optional" frugal:"101,optional,list" json:"projections,omitempty"` + OutputTupleId *types.TTupleId `thrift:"output_tuple_id,102,optional" frugal:"102,optional,i32" json:"output_tuple_id,omitempty"` + PartitionSortNode *TPartitionSortNode `thrift:"partition_sort_node,103,optional" frugal:"103,optional,TPartitionSortNode" json:"partition_sort_node,omitempty"` + IntermediateProjectionsList [][]*exprs.TExpr `thrift:"intermediate_projections_list,104,optional" frugal:"104,optional,list>" json:"intermediate_projections_list,omitempty"` + IntermediateOutputTupleIdList []types.TTupleId `thrift:"intermediate_output_tuple_id_list,105,optional" frugal:"105,optional,list" json:"intermediate_output_tuple_id_list,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,106,optional" frugal:"106,optional,list" json:"topn_filter_source_node_ids,omitempty"` + NereidsId *int32 `thrift:"nereids_id,107,optional" frugal:"107,optional,i32" json:"nereids_id,omitempty"` } func NewTPlanNode() *TPlanNode { @@ -36725,7 +45566,6 @@ func NewTPlanNode() *TPlanNode { } func (p *TPlanNode) InitDefault() { - *p = TPlanNode{} } func (p *TPlanNode) GetNodeId() (v types.TPlanNodeId) { @@ -37080,6 +45920,24 @@ func (p *TPlanNode) GetPushDownCount() (v int64) { return *p.PushDownCount } +var TPlanNode_DistributeExprLists_DEFAULT [][]*exprs.TExpr + +func (p *TPlanNode) GetDistributeExprLists() (v [][]*exprs.TExpr) { + if !p.IsSetDistributeExprLists() { + return TPlanNode_DistributeExprLists_DEFAULT + } + return p.DistributeExprLists +} + +var TPlanNode_IsSerialOperator_DEFAULT bool + +func (p *TPlanNode) GetIsSerialOperator() (v bool) { + if !p.IsSetIsSerialOperator() { + return TPlanNode_IsSerialOperator_DEFAULT + } + return *p.IsSerialOperator +} + var TPlanNode_Projections_DEFAULT []*exprs.TExpr func (p *TPlanNode) GetProjections() (v []*exprs.TExpr) { @@ -37106,6 +45964,42 @@ func (p *TPlanNode) GetPartitionSortNode() (v *TPartitionSortNode) { } return p.PartitionSortNode } + +var TPlanNode_IntermediateProjectionsList_DEFAULT [][]*exprs.TExpr + +func (p *TPlanNode) GetIntermediateProjectionsList() (v [][]*exprs.TExpr) { + if !p.IsSetIntermediateProjectionsList() { + return TPlanNode_IntermediateProjectionsList_DEFAULT + } + return p.IntermediateProjectionsList +} + +var TPlanNode_IntermediateOutputTupleIdList_DEFAULT []types.TTupleId + +func (p *TPlanNode) GetIntermediateOutputTupleIdList() (v []types.TTupleId) { + if !p.IsSetIntermediateOutputTupleIdList() { + return TPlanNode_IntermediateOutputTupleIdList_DEFAULT + } + return p.IntermediateOutputTupleIdList +} + +var TPlanNode_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TPlanNode) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPlanNode_TopnFilterSourceNodeIds_DEFAULT + } + return p.TopnFilterSourceNodeIds +} + +var TPlanNode_NereidsId_DEFAULT int32 + +func (p *TPlanNode) GetNereidsId() (v int32) { + if !p.IsSetNereidsId() { + return TPlanNode_NereidsId_DEFAULT + } + return *p.NereidsId +} func (p *TPlanNode) SetNodeId(val types.TPlanNodeId) { p.NodeId = val } @@ -37235,6 +46129,12 @@ func (p *TPlanNode) SetPushDownAggTypeOpt(val *TPushAggOp) { func (p *TPlanNode) SetPushDownCount(val *int64) { p.PushDownCount = val } +func (p *TPlanNode) SetDistributeExprLists(val [][]*exprs.TExpr) { + p.DistributeExprLists = val +} +func (p *TPlanNode) SetIsSerialOperator(val *bool) { + p.IsSerialOperator = val +} func (p *TPlanNode) SetProjections(val []*exprs.TExpr) { p.Projections = val } @@ -37244,6 +46144,18 @@ func (p *TPlanNode) SetOutputTupleId(val *types.TTupleId) { func (p *TPlanNode) SetPartitionSortNode(val *TPartitionSortNode) { p.PartitionSortNode = val } +func (p *TPlanNode) SetIntermediateProjectionsList(val [][]*exprs.TExpr) { + p.IntermediateProjectionsList = val +} +func (p *TPlanNode) SetIntermediateOutputTupleIdList(val []types.TTupleId) { + p.IntermediateOutputTupleIdList = val +} +func (p *TPlanNode) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} +func (p *TPlanNode) SetNereidsId(val *int32) { + p.NereidsId = val +} var fieldIDToName_TPlanNode = map[int16]string{ 1: "node_id", @@ -37289,9 +46201,15 @@ var fieldIDToName_TPlanNode = map[int16]string{ 47: "test_external_scan_node", 48: "push_down_agg_type_opt", 49: "push_down_count", + 50: "distribute_expr_lists", + 51: "is_serial_operator", 101: "projections", 102: "output_tuple_id", 103: "partition_sort_node", + 104: "intermediate_projections_list", + 105: "intermediate_output_tuple_id_list", + 106: "topn_filter_source_node_ids", + 107: "nereids_id", } func (p *TPlanNode) IsSetConjuncts() bool { @@ -37438,6 +46356,14 @@ func (p *TPlanNode) IsSetPushDownCount() bool { return p.PushDownCount != nil } +func (p *TPlanNode) IsSetDistributeExprLists() bool { + return p.DistributeExprLists != nil +} + +func (p *TPlanNode) IsSetIsSerialOperator() bool { + return p.IsSerialOperator != nil +} + func (p *TPlanNode) IsSetProjections() bool { return p.Projections != nil } @@ -37450,6 +46376,22 @@ func (p *TPlanNode) IsSetPartitionSortNode() bool { return p.PartitionSortNode != nil } +func (p *TPlanNode) IsSetIntermediateProjectionsList() bool { + return p.IntermediateProjectionsList != nil +} + +func (p *TPlanNode) IsSetIntermediateOutputTupleIdList() bool { + return p.IntermediateOutputTupleIdList != nil +} + +func (p *TPlanNode) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil +} + +func (p *TPlanNode) IsSetNereidsId() bool { + return p.NereidsId != nil +} + func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -37482,10 +46424,8 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodeId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -37493,10 +46433,8 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodeType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -37504,10 +46442,8 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumChildren = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -37515,10 +46451,8 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLimit = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { @@ -37526,10 +46460,8 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRowTuples = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.LIST { @@ -37537,20 +46469,16 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNullableTuples = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.BOOL { @@ -37558,397 +46486,366 @@ func (p *TPlanNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCompactData = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRUCT { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRUCT { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.STRUCT { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.STRUCT { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.STRUCT { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 17: if fieldTypeId == thrift.STRUCT { if err = p.ReadField17(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 18: if fieldTypeId == thrift.STRUCT { if err = p.ReadField18(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 19: if fieldTypeId == thrift.STRUCT { if err = p.ReadField19(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 20: if fieldTypeId == thrift.STRUCT { if err = p.ReadField20(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 21: if fieldTypeId == thrift.STRUCT { if err = p.ReadField21(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 22: if fieldTypeId == thrift.STRUCT { if err = p.ReadField22(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 23: if fieldTypeId == thrift.STRUCT { if err = p.ReadField23(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 24: if fieldTypeId == thrift.STRUCT { if err = p.ReadField24(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 25: if fieldTypeId == thrift.STRUCT { if err = p.ReadField25(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 26: if fieldTypeId == thrift.STRUCT { if err = p.ReadField26(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 28: if fieldTypeId == thrift.STRUCT { if err = p.ReadField28(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 29: if fieldTypeId == thrift.STRUCT { if err = p.ReadField29(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 30: if fieldTypeId == thrift.STRUCT { if err = p.ReadField30(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 31: if fieldTypeId == thrift.STRUCT { if err = p.ReadField31(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 32: if fieldTypeId == thrift.STRUCT { if err = p.ReadField32(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 33: if fieldTypeId == thrift.STRUCT { if err = p.ReadField33(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 34: if fieldTypeId == thrift.STRUCT { if err = p.ReadField34(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 35: if fieldTypeId == thrift.STRUCT { if err = p.ReadField35(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 36: if fieldTypeId == thrift.LIST { if err = p.ReadField36(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 37: if fieldTypeId == thrift.STRUCT { if err = p.ReadField37(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 40: if fieldTypeId == thrift.STRUCT { if err = p.ReadField40(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 41: if fieldTypeId == thrift.STRUCT { if err = p.ReadField41(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 42: if fieldTypeId == thrift.LIST { if err = p.ReadField42(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 43: if fieldTypeId == thrift.STRUCT { if err = p.ReadField43(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 44: if fieldTypeId == thrift.STRUCT { if err = p.ReadField44(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 45: if fieldTypeId == thrift.STRUCT { if err = p.ReadField45(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 46: if fieldTypeId == thrift.STRUCT { if err = p.ReadField46(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 47: if fieldTypeId == thrift.STRUCT { if err = p.ReadField47(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 48: if fieldTypeId == thrift.I32 { if err = p.ReadField48(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 49: if fieldTypeId == thrift.I64 { if err = p.ReadField49(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 50: + if fieldTypeId == thrift.LIST { + if err = p.ReadField50(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 51: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField51(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 101: if fieldTypeId == thrift.LIST { if err = p.ReadField101(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 102: if fieldTypeId == thrift.I32 { if err = p.ReadField102(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 103: if fieldTypeId == thrift.STRUCT { if err = p.ReadField103(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 104: + if fieldTypeId == thrift.LIST { + if err = p.ReadField104(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 105: + if fieldTypeId == thrift.LIST { + if err = p.ReadField105(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 106: + if fieldTypeId == thrift.LIST { + if err = p.ReadField106(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 107: + if fieldTypeId == thrift.I32 { + if err = p.ReadField107(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -38010,48 +46907,57 @@ RequiredFieldNotSetError: } func (p *TPlanNode) ReadField1(iprot thrift.TProtocol) error { + + var _field types.TPlanNodeId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NodeId = v + _field = v } + p.NodeId = _field return nil } - func (p *TPlanNode) ReadField2(iprot thrift.TProtocol) error { + + var _field TPlanNodeType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NodeType = TPlanNodeType(v) + _field = TPlanNodeType(v) } + p.NodeType = _field return nil } - func (p *TPlanNode) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumChildren = v + _field = v } + p.NumChildren = _field return nil } - func (p *TPlanNode) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Limit = v + _field = v } + p.Limit = _field return nil } - func (p *TPlanNode) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RowTuples = make([]types.TTupleId, 0, size) + _field := make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { + var _elem types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err @@ -38059,21 +46965,22 @@ func (p *TPlanNode) ReadField5(iprot thrift.TProtocol) error { _elem = v } - p.RowTuples = append(p.RowTuples, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RowTuples = _field return nil } - func (p *TPlanNode) ReadField6(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.NullableTuples = make([]bool, 0, size) + _field := make([]bool, 0, size) for i := 0; i < size; i++ { + var _elem bool if v, err := iprot.ReadBool(); err != nil { return err @@ -38081,278 +46988,287 @@ func (p *TPlanNode) ReadField6(iprot thrift.TProtocol) error { _elem = v } - p.NullableTuples = append(p.NullableTuples, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.NullableTuples = _field return nil } - func (p *TPlanNode) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Conjuncts = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Conjuncts = append(p.Conjuncts, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Conjuncts = _field return nil } - func (p *TPlanNode) ReadField8(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.CompactData = v + _field = v } + p.CompactData = _field return nil } - func (p *TPlanNode) ReadField11(iprot thrift.TProtocol) error { - p.HashJoinNode = NewTHashJoinNode() - if err := p.HashJoinNode.Read(iprot); err != nil { + _field := NewTHashJoinNode() + if err := _field.Read(iprot); err != nil { return err } + p.HashJoinNode = _field return nil } - func (p *TPlanNode) ReadField12(iprot thrift.TProtocol) error { - p.AggNode = NewTAggregationNode() - if err := p.AggNode.Read(iprot); err != nil { + _field := NewTAggregationNode() + if err := _field.Read(iprot); err != nil { return err } + p.AggNode = _field return nil } - func (p *TPlanNode) ReadField13(iprot thrift.TProtocol) error { - p.SortNode = NewTSortNode() - if err := p.SortNode.Read(iprot); err != nil { + _field := NewTSortNode() + if err := _field.Read(iprot); err != nil { return err } + p.SortNode = _field return nil } - func (p *TPlanNode) ReadField14(iprot thrift.TProtocol) error { - p.MergeNode = NewTMergeNode() - if err := p.MergeNode.Read(iprot); err != nil { + _field := NewTMergeNode() + if err := _field.Read(iprot); err != nil { return err } + p.MergeNode = _field return nil } - func (p *TPlanNode) ReadField15(iprot thrift.TProtocol) error { - p.ExchangeNode = NewTExchangeNode() - if err := p.ExchangeNode.Read(iprot); err != nil { + _field := NewTExchangeNode() + if err := _field.Read(iprot); err != nil { return err } + p.ExchangeNode = _field return nil } - func (p *TPlanNode) ReadField17(iprot thrift.TProtocol) error { - p.MysqlScanNode = NewTMySQLScanNode() - if err := p.MysqlScanNode.Read(iprot); err != nil { + _field := NewTMySQLScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.MysqlScanNode = _field return nil } - func (p *TPlanNode) ReadField18(iprot thrift.TProtocol) error { - p.OlapScanNode = NewTOlapScanNode() - if err := p.OlapScanNode.Read(iprot); err != nil { + _field := NewTOlapScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.OlapScanNode = _field return nil } - func (p *TPlanNode) ReadField19(iprot thrift.TProtocol) error { - p.CsvScanNode = NewTCsvScanNode() - if err := p.CsvScanNode.Read(iprot); err != nil { + _field := NewTCsvScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.CsvScanNode = _field return nil } - func (p *TPlanNode) ReadField20(iprot thrift.TProtocol) error { - p.BrokerScanNode = NewTBrokerScanNode() - if err := p.BrokerScanNode.Read(iprot); err != nil { + _field := NewTBrokerScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.BrokerScanNode = _field return nil } - func (p *TPlanNode) ReadField21(iprot thrift.TProtocol) error { - p.PreAggNode = NewTPreAggregationNode() - if err := p.PreAggNode.Read(iprot); err != nil { + _field := NewTPreAggregationNode() + if err := _field.Read(iprot); err != nil { return err } + p.PreAggNode = _field return nil } - func (p *TPlanNode) ReadField22(iprot thrift.TProtocol) error { - p.SchemaScanNode = NewTSchemaScanNode() - if err := p.SchemaScanNode.Read(iprot); err != nil { + _field := NewTSchemaScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.SchemaScanNode = _field return nil } - func (p *TPlanNode) ReadField23(iprot thrift.TProtocol) error { - p.MergeJoinNode = NewTMergeJoinNode() - if err := p.MergeJoinNode.Read(iprot); err != nil { + _field := NewTMergeJoinNode() + if err := _field.Read(iprot); err != nil { return err } + p.MergeJoinNode = _field return nil } - func (p *TPlanNode) ReadField24(iprot thrift.TProtocol) error { - p.MetaScanNode = NewTMetaScanNode() - if err := p.MetaScanNode.Read(iprot); err != nil { + _field := NewTMetaScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.MetaScanNode = _field return nil } - func (p *TPlanNode) ReadField25(iprot thrift.TProtocol) error { - p.AnalyticNode = NewTAnalyticNode() - if err := p.AnalyticNode.Read(iprot); err != nil { + _field := NewTAnalyticNode() + if err := _field.Read(iprot); err != nil { return err } + p.AnalyticNode = _field return nil } - func (p *TPlanNode) ReadField26(iprot thrift.TProtocol) error { - p.OlapRewriteNode = NewTOlapRewriteNode() - if err := p.OlapRewriteNode.Read(iprot); err != nil { + _field := NewTOlapRewriteNode() + if err := _field.Read(iprot); err != nil { return err } + p.OlapRewriteNode = _field return nil } - func (p *TPlanNode) ReadField28(iprot thrift.TProtocol) error { - p.UnionNode = NewTUnionNode() - if err := p.UnionNode.Read(iprot); err != nil { + _field := NewTUnionNode() + if err := _field.Read(iprot); err != nil { return err } + p.UnionNode = _field return nil } - func (p *TPlanNode) ReadField29(iprot thrift.TProtocol) error { - p.ResourceProfile = NewTBackendResourceProfile() - if err := p.ResourceProfile.Read(iprot); err != nil { + _field := NewTBackendResourceProfile() + if err := _field.Read(iprot); err != nil { return err } + p.ResourceProfile = _field return nil } - func (p *TPlanNode) ReadField30(iprot thrift.TProtocol) error { - p.EsScanNode = NewTEsScanNode() - if err := p.EsScanNode.Read(iprot); err != nil { + _field := NewTEsScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.EsScanNode = _field return nil } - func (p *TPlanNode) ReadField31(iprot thrift.TProtocol) error { - p.RepeatNode = NewTRepeatNode() - if err := p.RepeatNode.Read(iprot); err != nil { + _field := NewTRepeatNode() + if err := _field.Read(iprot); err != nil { return err } + p.RepeatNode = _field return nil } - func (p *TPlanNode) ReadField32(iprot thrift.TProtocol) error { - p.AssertNumRowsNode = NewTAssertNumRowsNode() - if err := p.AssertNumRowsNode.Read(iprot); err != nil { + _field := NewTAssertNumRowsNode() + if err := _field.Read(iprot); err != nil { return err } + p.AssertNumRowsNode = _field return nil } - func (p *TPlanNode) ReadField33(iprot thrift.TProtocol) error { - p.IntersectNode = NewTIntersectNode() - if err := p.IntersectNode.Read(iprot); err != nil { + _field := NewTIntersectNode() + if err := _field.Read(iprot); err != nil { return err } + p.IntersectNode = _field return nil } - func (p *TPlanNode) ReadField34(iprot thrift.TProtocol) error { - p.ExceptNode = NewTExceptNode() - if err := p.ExceptNode.Read(iprot); err != nil { + _field := NewTExceptNode() + if err := _field.Read(iprot); err != nil { return err } + p.ExceptNode = _field return nil } - func (p *TPlanNode) ReadField35(iprot thrift.TProtocol) error { - p.OdbcScanNode = NewTOdbcScanNode() - if err := p.OdbcScanNode.Read(iprot); err != nil { + _field := NewTOdbcScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.OdbcScanNode = _field return nil } - func (p *TPlanNode) ReadField36(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.RuntimeFilters = make([]*TRuntimeFilterDesc, 0, size) + _field := make([]*TRuntimeFilterDesc, 0, size) + values := make([]TRuntimeFilterDesc, size) for i := 0; i < size; i++ { - _elem := NewTRuntimeFilterDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.RuntimeFilters = append(p.RuntimeFilters, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.RuntimeFilters = _field return nil } - func (p *TPlanNode) ReadField37(iprot thrift.TProtocol) error { - p.GroupCommitScanNode = NewTGroupCommitScanNode() - if err := p.GroupCommitScanNode.Read(iprot); err != nil { + _field := NewTGroupCommitScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.GroupCommitScanNode = _field return nil } - func (p *TPlanNode) ReadField40(iprot thrift.TProtocol) error { - p.Vconjunct = exprs.NewTExpr() - if err := p.Vconjunct.Read(iprot); err != nil { + _field := exprs.NewTExpr() + if err := _field.Read(iprot); err != nil { return err } + p.Vconjunct = _field return nil } - func (p *TPlanNode) ReadField41(iprot thrift.TProtocol) error { - p.TableFunctionNode = NewTTableFunctionNode() - if err := p.TableFunctionNode.Read(iprot); err != nil { + _field := NewTTableFunctionNode() + if err := _field.Read(iprot); err != nil { return err } + p.TableFunctionNode = _field return nil } - func (p *TPlanNode) ReadField42(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.OutputSlotIds = make([]types.TSlotId, 0, size) + _field := make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { + var _elem types.TSlotId if v, err := iprot.ReadI32(); err != nil { return err @@ -38360,107 +47276,255 @@ func (p *TPlanNode) ReadField42(iprot thrift.TProtocol) error { _elem = v } - p.OutputSlotIds = append(p.OutputSlotIds, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.OutputSlotIds = _field return nil } - func (p *TPlanNode) ReadField43(iprot thrift.TProtocol) error { - p.DataGenScanNode = NewTDataGenScanNode() - if err := p.DataGenScanNode.Read(iprot); err != nil { + _field := NewTDataGenScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.DataGenScanNode = _field return nil } - func (p *TPlanNode) ReadField44(iprot thrift.TProtocol) error { - p.FileScanNode = NewTFileScanNode() - if err := p.FileScanNode.Read(iprot); err != nil { + _field := NewTFileScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.FileScanNode = _field return nil } - func (p *TPlanNode) ReadField45(iprot thrift.TProtocol) error { - p.JdbcScanNode = NewTJdbcScanNode() - if err := p.JdbcScanNode.Read(iprot); err != nil { + _field := NewTJdbcScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.JdbcScanNode = _field return nil } - func (p *TPlanNode) ReadField46(iprot thrift.TProtocol) error { - p.NestedLoopJoinNode = NewTNestedLoopJoinNode() - if err := p.NestedLoopJoinNode.Read(iprot); err != nil { + _field := NewTNestedLoopJoinNode() + if err := _field.Read(iprot); err != nil { return err } + p.NestedLoopJoinNode = _field return nil } - func (p *TPlanNode) ReadField47(iprot thrift.TProtocol) error { - p.TestExternalScanNode = NewTTestExternalScanNode() - if err := p.TestExternalScanNode.Read(iprot); err != nil { + _field := NewTTestExternalScanNode() + if err := _field.Read(iprot); err != nil { return err } + p.TestExternalScanNode = _field return nil } - func (p *TPlanNode) ReadField48(iprot thrift.TProtocol) error { + + var _field *TPushAggOp if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TPushAggOp(v) - p.PushDownAggTypeOpt = &tmp + _field = &tmp } + p.PushDownAggTypeOpt = _field return nil } - func (p *TPlanNode) ReadField49(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PushDownCount = &v + _field = &v } + p.PushDownCount = _field return nil } +func (p *TPlanNode) ReadField50(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([][]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem1 := &values[i] + _elem1.InitDefault() + + if err := _elem1.Read(iprot); err != nil { + return err + } + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DistributeExprLists = _field + return nil +} +func (p *TPlanNode) ReadField51(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsSerialOperator = _field + return nil +} func (p *TPlanNode) ReadField101(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Projections = make([]*exprs.TExpr, 0, size) + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Projections = append(p.Projections, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Projections = _field return nil } - func (p *TPlanNode) ReadField102(iprot thrift.TProtocol) error { + + var _field *types.TTupleId if v, err := iprot.ReadI32(); err != nil { return err } else { - p.OutputTupleId = &v + _field = &v } + p.OutputTupleId = _field return nil } - func (p *TPlanNode) ReadField103(iprot thrift.TProtocol) error { - p.PartitionSortNode = NewTPartitionSortNode() - if err := p.PartitionSortNode.Read(iprot); err != nil { + _field := NewTPartitionSortNode() + if err := _field.Read(iprot); err != nil { return err } + p.PartitionSortNode = _field + return nil +} +func (p *TPlanNode) ReadField104(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([][]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _elem := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem1 := &values[i] + _elem1.InitDefault() + + if err := _elem1.Read(iprot); err != nil { + return err + } + + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.IntermediateProjectionsList = _field + return nil +} +func (p *TPlanNode) ReadField105(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]types.TTupleId, 0, size) + for i := 0; i < size; i++ { + + var _elem types.TTupleId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.IntermediateOutputTupleIdList = _field + return nil +} +func (p *TPlanNode) ReadField106(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TopnFilterSourceNodeIds = _field + return nil +} +func (p *TPlanNode) ReadField107(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.NereidsId = _field return nil } @@ -38642,6 +47706,14 @@ func (p *TPlanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 49 goto WriteFieldError } + if err = p.writeField50(oprot); err != nil { + fieldId = 50 + goto WriteFieldError + } + if err = p.writeField51(oprot); err != nil { + fieldId = 51 + goto WriteFieldError + } if err = p.writeField101(oprot); err != nil { fieldId = 101 goto WriteFieldError @@ -38654,7 +47726,22 @@ func (p *TPlanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 103 goto WriteFieldError } - + if err = p.writeField104(oprot); err != nil { + fieldId = 104 + goto WriteFieldError + } + if err = p.writeField105(oprot); err != nil { + fieldId = 105 + goto WriteFieldError + } + if err = p.writeField106(oprot); err != nil { + fieldId = 106 + goto WriteFieldError + } + if err = p.writeField107(oprot); err != nil { + fieldId = 107 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -39516,6 +48603,60 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 49 end error: ", p), err) } +func (p *TPlanNode) writeField50(oprot thrift.TProtocol) (err error) { + if p.IsSetDistributeExprLists() { + if err = oprot.WriteFieldBegin("distribute_expr_lists", thrift.LIST, 50); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.LIST, len(p.DistributeExprLists)); err != nil { + return err + } + for _, v := range p.DistributeExprLists { + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 50 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 50 end error: ", p), err) +} + +func (p *TPlanNode) writeField51(oprot thrift.TProtocol) (err error) { + if p.IsSetIsSerialOperator() { + if err = oprot.WriteFieldBegin("is_serial_operator", thrift.BOOL, 51); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsSerialOperator); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 51 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 51 end error: ", p), err) +} + func (p *TPlanNode) writeField101(oprot thrift.TProtocol) (err error) { if p.IsSetProjections() { if err = oprot.WriteFieldBegin("projections", thrift.LIST, 101); err != nil { @@ -39581,11 +48722,120 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 103 end error: ", p), err) } +func (p *TPlanNode) writeField104(oprot thrift.TProtocol) (err error) { + if p.IsSetIntermediateProjectionsList() { + if err = oprot.WriteFieldBegin("intermediate_projections_list", thrift.LIST, 104); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.LIST, len(p.IntermediateProjectionsList)); err != nil { + return err + } + for _, v := range p.IntermediateProjectionsList { + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return err + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 104 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 104 end error: ", p), err) +} + +func (p *TPlanNode) writeField105(oprot thrift.TProtocol) (err error) { + if p.IsSetIntermediateOutputTupleIdList() { + if err = oprot.WriteFieldBegin("intermediate_output_tuple_id_list", thrift.LIST, 105); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.IntermediateOutputTupleIdList)); err != nil { + return err + } + for _, v := range p.IntermediateOutputTupleIdList { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 105 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 105 end error: ", p), err) +} + +func (p *TPlanNode) writeField106(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 106); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 106 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 106 end error: ", p), err) +} + +func (p *TPlanNode) writeField107(oprot thrift.TProtocol) (err error) { + if p.IsSetNereidsId() { + if err = oprot.WriteFieldBegin("nereids_id", thrift.I32, 107); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NereidsId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 107 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 107 end error: ", p), err) +} + func (p *TPlanNode) String() string { if p == nil { return "" } return fmt.Sprintf("TPlanNode(%+v)", *p) + } func (p *TPlanNode) DeepEqual(ano *TPlanNode) bool { @@ -39723,6 +48973,12 @@ func (p *TPlanNode) DeepEqual(ano *TPlanNode) bool { if !p.Field49DeepEqual(ano.PushDownCount) { return false } + if !p.Field50DeepEqual(ano.DistributeExprLists) { + return false + } + if !p.Field51DeepEqual(ano.IsSerialOperator) { + return false + } if !p.Field101DeepEqual(ano.Projections) { return false } @@ -39732,6 +48988,18 @@ func (p *TPlanNode) DeepEqual(ano *TPlanNode) bool { if !p.Field103DeepEqual(ano.PartitionSortNode) { return false } + if !p.Field104DeepEqual(ano.IntermediateProjectionsList) { + return false + } + if !p.Field105DeepEqual(ano.IntermediateOutputTupleIdList) { + return false + } + if !p.Field106DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } + if !p.Field107DeepEqual(ano.NereidsId) { + return false + } return true } @@ -40076,6 +49344,37 @@ func (p *TPlanNode) Field49DeepEqual(src *int64) bool { } return true } +func (p *TPlanNode) Field50DeepEqual(src [][]*exprs.TExpr) bool { + + if len(p.DistributeExprLists) != len(src) { + return false + } + for i, v := range p.DistributeExprLists { + _src := src[i] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true +} +func (p *TPlanNode) Field51DeepEqual(src *bool) bool { + + if p.IsSerialOperator == src { + return true + } else if p.IsSerialOperator == nil || src == nil { + return false + } + if *p.IsSerialOperator != *src { + return false + } + return true +} func (p *TPlanNode) Field101DeepEqual(src []*exprs.TExpr) bool { if len(p.Projections) != len(src) { @@ -40108,6 +49407,63 @@ func (p *TPlanNode) Field103DeepEqual(src *TPartitionSortNode) bool { } return true } +func (p *TPlanNode) Field104DeepEqual(src [][]*exprs.TExpr) bool { + + if len(p.IntermediateProjectionsList) != len(src) { + return false + } + for i, v := range p.IntermediateProjectionsList { + _src := src[i] + if len(v) != len(_src) { + return false + } + for i, v := range v { + _src1 := _src[i] + if !v.DeepEqual(_src1) { + return false + } + } + } + return true +} +func (p *TPlanNode) Field105DeepEqual(src []types.TTupleId) bool { + + if len(p.IntermediateOutputTupleIdList) != len(src) { + return false + } + for i, v := range p.IntermediateOutputTupleIdList { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TPlanNode) Field106DeepEqual(src []int32) bool { + + if len(p.TopnFilterSourceNodeIds) != len(src) { + return false + } + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TPlanNode) Field107DeepEqual(src *int32) bool { + + if p.NereidsId == src { + return true + } else if p.NereidsId == nil || src == nil { + return false + } + if *p.NereidsId != *src { + return false + } + return true +} type TPlan struct { Nodes []*TPlanNode `thrift:"nodes,1,required" frugal:"1,required,list" json:"nodes"` @@ -40118,7 +49474,6 @@ func NewTPlan() *TPlan { } func (p *TPlan) InitDefault() { - *p = TPlan{} } func (p *TPlan) GetNodes() (v []*TPlanNode) { @@ -40158,17 +49513,14 @@ func (p *TPlan) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -40204,18 +49556,22 @@ func (p *TPlan) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Nodes = make([]*TPlanNode, 0, size) + _field := make([]*TPlanNode, 0, size) + values := make([]TPlanNode, size) for i := 0; i < size; i++ { - _elem := NewTPlanNode() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Nodes = append(p.Nodes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Nodes = _field return nil } @@ -40229,7 +49585,6 @@ func (p *TPlan) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -40278,6 +49633,7 @@ func (p *TPlan) String() string { return "" } return fmt.Sprintf("TPlan(%+v)", *p) + } func (p *TPlan) DeepEqual(ano *TPlan) bool { diff --git a/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go b/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go index 52274d9b..4dfc203e 100644 --- a/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go +++ b/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package plannodes @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs" "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/opcodes" @@ -1234,6 +1235,20 @@ func (p *THdfsParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -1348,6 +1363,19 @@ func (p *THdfsParams) FastReadField5(buf []byte) (int, error) { return offset, nil } +func (p *THdfsParams) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RootPath = &v + + } + return offset, nil +} + // for compatibility func (p *THdfsParams) FastWrite(buf []byte) int { return 0 @@ -1362,6 +1390,7 @@ func (p *THdfsParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWri offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -1377,6 +1406,7 @@ func (p *THdfsParams) BLength() int { l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1445,6 +1475,17 @@ func (p *THdfsParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWri return offset } +func (p *THdfsParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRootPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "root_path", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RootPath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *THdfsParams) field1Length() int { l := 0 if p.IsSetFsName() { @@ -1503,6 +1544,17 @@ func (p *THdfsParams) field5Length() int { return l } +func (p *THdfsParams) field6Length() int { + l := 0 + if p.IsSetRootPath() { + l += bthrift.Binary.FieldBeginLength("root_path", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.RootPath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TBrokerRangeDesc) FastRead(buf []byte) (int, error) { var err error var offset int @@ -4422,6 +4474,20 @@ func (p *TFileTextScanRangeParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4535,6 +4601,19 @@ func (p *TFileTextScanRangeParams) FastReadField6(buf []byte) (int, error) { return offset, nil } +func (p *TFileTextScanRangeParams) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NullFormat = &v + + } + return offset, nil +} + // for compatibility func (p *TFileTextScanRangeParams) FastWrite(buf []byte) int { return 0 @@ -4550,6 +4629,7 @@ func (p *TFileTextScanRangeParams) FastWriteNocopy(buf []byte, binaryWriter bthr offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -4566,6 +4646,7 @@ func (p *TFileTextScanRangeParams) BLength() int { l += p.field4Length() l += p.field5Length() l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -4638,6 +4719,17 @@ func (p *TFileTextScanRangeParams) fastWriteField6(buf []byte, binaryWriter bthr return offset } +func (p *TFileTextScanRangeParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNullFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "null_format", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.NullFormat) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TFileTextScanRangeParams) field1Length() int { l := 0 if p.IsSetColumnSeparator() { @@ -4704,6 +4796,17 @@ func (p *TFileTextScanRangeParams) field6Length() int { return l } +func (p *TFileTextScanRangeParams) field7Length() int { + l := 0 + if p.IsSetNullFormat() { + l += bthrift.Binary.FieldBeginLength("null_format", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.NullFormat) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TFileScanSlotInfo) FastRead(buf []byte) (int, error) { var err error var offset int @@ -5064,6 +5167,20 @@ func (p *TFileAttributes) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 1001: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1001(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -5242,6 +5359,19 @@ func (p *TFileAttributes) FastReadField11(buf []byte) (int, error) { return offset, nil } +func (p *TFileAttributes) FastReadField1001(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IgnoreCsvRedundantCol = &v + + } + return offset, nil +} + // for compatibility func (p *TFileAttributes) FastWrite(buf []byte) int { return 0 @@ -5258,6 +5388,7 @@ func (p *TFileAttributes) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField1001(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) @@ -5283,6 +5414,7 @@ func (p *TFileAttributes) BLength() int { l += p.field9Length() l += p.field10Length() l += p.field11Length() + l += p.field1001Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -5409,6 +5541,17 @@ func (p *TFileAttributes) fastWriteField11(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TFileAttributes) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIgnoreCsvRedundantCol() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ignore_csv_redundant_col", thrift.BOOL, 1001) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IgnoreCsvRedundantCol) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TFileAttributes) field1Length() int { l := 0 if p.IsSetTextParams() { @@ -5529,6 +5672,17 @@ func (p *TFileAttributes) field11Length() int { return l } +func (p *TFileAttributes) field1001Length() int { + l := 0 + if p.IsSetIgnoreCsvRedundantCol() { + l += bthrift.Binary.FieldBeginLength("ignore_csv_redundant_col", thrift.BOOL, 1001) + l += bthrift.Binary.BoolLength(*p.IgnoreCsvRedundantCol) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TIcebergDeleteFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int @@ -5607,6 +5761,20 @@ func (p *TIcebergDeleteFileDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -5711,6 +5879,19 @@ func (p *TIcebergDeleteFileDesc) FastReadField4(buf []byte) (int, error) { return offset, nil } +func (p *TIcebergDeleteFileDesc) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Content = &v + + } + return offset, nil +} + // for compatibility func (p *TIcebergDeleteFileDesc) FastWrite(buf []byte) int { return 0 @@ -5722,6 +5903,7 @@ func (p *TIcebergDeleteFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrif if p != nil { offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) } @@ -5738,6 +5920,7 @@ func (p *TIcebergDeleteFileDesc) BLength() int { l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -5796,6 +5979,17 @@ func (p *TIcebergDeleteFileDesc) fastWriteField4(buf []byte, binaryWriter bthrif return offset } +func (p *TIcebergDeleteFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetContent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "content", thrift.I32, 5) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.Content) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TIcebergDeleteFileDesc) field1Length() int { l := 0 if p.IsSetPath() { @@ -5842,6 +6036,17 @@ func (p *TIcebergDeleteFileDesc) field4Length() int { return l } +func (p *TIcebergDeleteFileDesc) field5Length() int { + l := 0 + if p.IsSetContent() { + l += bthrift.Binary.FieldBeginLength("content", thrift.I32, 5) + l += bthrift.Binary.I32Length(*p.Content) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TIcebergFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int @@ -5934,6 +6139,34 @@ func (p *TIcebergFileDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -6048,6 +6281,32 @@ func (p *TIcebergFileDesc) FastReadField5(buf []byte) (int, error) { return offset, nil } +func (p *TIcebergFileDesc) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.OriginalFilePath = &v + + } + return offset, nil +} + +func (p *TIcebergFileDesc) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RowCount = &v + + } + return offset, nil +} + // for compatibility func (p *TIcebergFileDesc) FastWrite(buf []byte) int { return 0 @@ -6060,8 +6319,10 @@ func (p *TIcebergFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -6077,6 +6338,8 @@ func (p *TIcebergFileDesc) BLength() int { l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -6144,6 +6407,28 @@ func (p *TIcebergFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TIcebergFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOriginalFilePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "original_file_path", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.OriginalFilePath) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergFileDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRowCount() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_count", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowCount) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TIcebergFileDesc) field1Length() int { l := 0 if p.IsSetFormatVersion() { @@ -6201,7 +6486,29 @@ func (p *TIcebergFileDesc) field5Length() int { return l } -func (p *TPaimonFileDesc) FastRead(buf []byte) (int, error) { +func (p *TIcebergFileDesc) field6Length() int { + l := 0 + if p.IsSetOriginalFilePath() { + l += bthrift.Binary.FieldBeginLength("original_file_path", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.OriginalFilePath) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergFileDesc) field7Length() int { + l := 0 + if p.IsSetRowCount() { + l += bthrift.Binary.FieldBeginLength("row_count", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.RowCount) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPaimonDeletionFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6238,7 +6545,7 @@ func (p *TPaimonFileDesc) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -6252,7 +6559,7 @@ func (p *TPaimonFileDesc) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -6265,48 +6572,6 @@ func (p *TPaimonFileDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -6333,7 +6598,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPaimonFileDesc[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPaimonDeletionFileDesc[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6342,299 +6607,143 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TPaimonFileDesc) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.PaimonSplit = &v - - } - return offset, nil -} - -func (p *TPaimonFileDesc) FastReadField2(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.PaimonColumnNames = &v - - } - return offset, nil -} - -func (p *TPaimonFileDesc) FastReadField3(buf []byte) (int, error) { +func (p *TPaimonDeletionFileDesc) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DbName = &v + p.Path = &v } return offset, nil } -func (p *TPaimonFileDesc) FastReadField4(buf []byte) (int, error) { +func (p *TPaimonDeletionFileDesc) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TableName = &v + p.Offset = &v } return offset, nil } -func (p *TPaimonFileDesc) FastReadField5(buf []byte) (int, error) { +func (p *TPaimonDeletionFileDesc) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.PaimonPredicate = &v - - } - return offset, nil -} - -func (p *TPaimonFileDesc) FastReadField6(buf []byte) (int, error) { - offset := 0 - - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PaimonOptions = make(map[string]string, size) - for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _val = v - - } + p.Length = &v - p.PaimonOptions[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } return offset, nil } // for compatibility -func (p *TPaimonFileDesc) FastWrite(buf []byte) int { +func (p *TPaimonDeletionFileDesc) FastWrite(buf []byte) int { return 0 } -func (p *TPaimonFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonDeletionFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPaimonFileDesc") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPaimonDeletionFileDesc") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPaimonFileDesc) BLength() int { +func (p *TPaimonDeletionFileDesc) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPaimonFileDesc") + l += bthrift.Binary.StructBeginLength("TPaimonDeletionFileDesc") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPaimonFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPaimonSplit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_split", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonSplit) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPaimonFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPaimonColumnNames() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_column_names", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonColumnNames) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPaimonFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDbName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPaimonFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonDeletionFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) + if p.IsSetPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "path", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Path) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPaimonFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonDeletionFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPaimonPredicate() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_predicate", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonPredicate) + if p.IsSetOffset() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "offset", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Offset) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPaimonFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonDeletionFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPaimonOptions() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_options", thrift.MAP, 6) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) - var length int - for k, v := range p.PaimonOptions { - length++ - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "length", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Length) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPaimonFileDesc) field1Length() int { - l := 0 - if p.IsSetPaimonSplit() { - l += bthrift.Binary.FieldBeginLength("paimon_split", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.PaimonSplit) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPaimonFileDesc) field2Length() int { - l := 0 - if p.IsSetPaimonColumnNames() { - l += bthrift.Binary.FieldBeginLength("paimon_column_names", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.PaimonColumnNames) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPaimonFileDesc) field3Length() int { - l := 0 - if p.IsSetDbName() { - l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.DbName) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPaimonFileDesc) field4Length() int { +func (p *TPaimonDeletionFileDesc) field1Length() int { l := 0 - if p.IsSetTableName() { - l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.TableName) + if p.IsSetPath() { + l += bthrift.Binary.FieldBeginLength("path", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Path) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPaimonFileDesc) field5Length() int { +func (p *TPaimonDeletionFileDesc) field2Length() int { l := 0 - if p.IsSetPaimonPredicate() { - l += bthrift.Binary.FieldBeginLength("paimon_predicate", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.PaimonPredicate) + if p.IsSetOffset() { + l += bthrift.Binary.FieldBeginLength("offset", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.Offset) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPaimonFileDesc) field6Length() int { +func (p *TPaimonDeletionFileDesc) field3Length() int { l := 0 - if p.IsSetPaimonOptions() { - l += bthrift.Binary.FieldBeginLength("paimon_options", thrift.MAP, 6) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.PaimonOptions)) - for k, v := range p.PaimonOptions { - - l += bthrift.Binary.StringLengthNocopy(k) - - l += bthrift.Binary.StringLengthNocopy(v) + if p.IsSetLength() { + l += bthrift.Binary.FieldBeginLength("length", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.Length) - } - l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -6727,7 +6836,7 @@ func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { } } case 6: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.MAP { l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { @@ -6741,7 +6850,7 @@ func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { } } case 7: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { @@ -6755,7 +6864,7 @@ func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { } } case 8: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { @@ -6769,7 +6878,7 @@ func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { } } case 9: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField9(buf[offset:]) offset += l if err != nil { @@ -6783,7 +6892,7 @@ func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { } } case 10: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { @@ -6796,6 +6905,62 @@ func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -6822,7 +6987,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THudiFileDesc[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPaimonFileDesc[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -6831,107 +6996,104 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *THudiFileDesc) FastReadField1(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.InstantTime = &v + p.PaimonSplit = &v } return offset, nil } -func (p *THudiFileDesc) FastReadField2(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField2(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Serde = &v + p.PaimonColumnNames = &v } return offset, nil } -func (p *THudiFileDesc) FastReadField3(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.InputFormat = &v + p.DbName = &v } return offset, nil } -func (p *THudiFileDesc) FastReadField4(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField4(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BasePath = &v + p.TableName = &v } return offset, nil } -func (p *THudiFileDesc) FastReadField5(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField5(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DataFilePath = &v - - } - return offset, nil -} - -func (p *THudiFileDesc) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.DataFileLength = &v + p.PaimonPredicate = &v } return offset, nil } -func (p *THudiFileDesc) FastReadField7(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField6(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.DeltaLogs = make([]string, 0, size) + p.PaimonOptions = make(map[string]string, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _elem = v + _key = v } - p.DeltaLogs = append(p.DeltaLogs, _elem) + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.PaimonOptions[_key] = _val } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -6939,124 +7101,169 @@ func (p *THudiFileDesc) FastReadField7(buf []byte) (int, error) { return offset, nil } -func (p *THudiFileDesc) FastReadField8(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField7(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err + } else { + offset += l + p.CtlId = &v + } - p.ColumnNames = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return offset, nil +} - _elem = v +func (p *TPaimonFileDesc) FastReadField8(buf []byte) (int, error) { + offset := 0 - } + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbId = &v - p.ColumnNames = append(p.ColumnNames, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TPaimonFileDesc) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.TblId = &v + } return offset, nil } -func (p *THudiFileDesc) FastReadField9(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField10(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err + } else { + offset += l + p.LastUpdateTime = &v + } - p.ColumnTypes = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return offset, nil +} - _elem = v +func (p *TPaimonFileDesc) FastReadField11(buf []byte) (int, error) { + offset := 0 - } + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FileFormat = &v - p.ColumnTypes = append(p.ColumnTypes, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TPaimonFileDesc) FastReadField12(buf []byte) (int, error) { + offset := 0 + + tmp := NewTPaimonDeletionFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } + p.DeletionFile = tmp return offset, nil } -func (p *THudiFileDesc) FastReadField10(buf []byte) (int, error) { +func (p *TPaimonFileDesc) FastReadField13(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.NestedFields = make([]string, 0, size) + p.HadoopConf = make(map[string]string, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _elem = v + _key = v } - p.NestedFields = append(p.NestedFields, _elem) + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.HadoopConf[_key] = _val } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPaimonFileDesc) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.PaimonTable = &v + } return offset, nil } // for compatibility -func (p *THudiFileDesc) FastWrite(buf []byte) int { +func (p *TPaimonFileDesc) FastWrite(buf []byte) int { return 0 } -func (p *THudiFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THudiFileDesc") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPaimonFileDesc") if p != nil { - offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *THudiFileDesc) BLength() int { +func (p *TPaimonFileDesc) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("THudiFileDesc") + l += bthrift.Binary.StructBeginLength("TPaimonFileDesc") if p != nil { l += p.field1Length() l += p.field2Length() @@ -7068,281 +7275,359 @@ func (p *THudiFileDesc) BLength() int { l += p.field8Length() l += p.field9Length() l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *THudiFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetInstantTime() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "instant_time", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.InstantTime) + if p.IsSetPaimonSplit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_split", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonSplit) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSerde() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "serde", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Serde) + if p.IsSetPaimonColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_column_names", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonColumnNames) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetInputFormat() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "input_format", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.InputFormat) + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetBasePath() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_path", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BasePath) + if p.IsSetTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDataFilePath() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_file_path", thrift.STRING, 5) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DataFilePath) + if p.IsSetPaimonPredicate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_predicate", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonPredicate) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDataFileLength() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_file_length", thrift.I64, 6) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DataFileLength) + if p.IsSetPaimonOptions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_options", thrift.MAP, 6) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.PaimonOptions { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDeltaLogs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delta_logs", thrift.LIST, 7) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.DeltaLogs { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetCtlId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ctl_id", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CtlId) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumnNames() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_names", thrift.LIST, 8) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ColumnNames { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetDbId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_id", thrift.I64, 8) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DbId) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetColumnTypes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_types", thrift.LIST, 9) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ColumnTypes { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + if p.IsSetTblId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tbl_id", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TblId) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPaimonFileDesc) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNestedFields() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "nested_fields", thrift.LIST, 10) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + if p.IsSetLastUpdateTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_update_time", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.LastUpdateTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPaimonFileDesc) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_format", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FileFormat) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPaimonFileDesc) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDeletionFile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "deletion_file", thrift.STRUCT, 12) + offset += p.DeletionFile.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPaimonFileDesc) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHadoopConf() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hadoop_conf", thrift.MAP, 13) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) var length int - for _, v := range p.NestedFields { + for k, v := range p.HadoopConf { length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *THudiFileDesc) field1Length() int { +func (p *TPaimonFileDesc) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPaimonTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_table", thrift.STRING, 14) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PaimonTable) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPaimonFileDesc) field1Length() int { l := 0 - if p.IsSetInstantTime() { - l += bthrift.Binary.FieldBeginLength("instant_time", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.InstantTime) + if p.IsSetPaimonSplit() { + l += bthrift.Binary.FieldBeginLength("paimon_split", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.PaimonSplit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field2Length() int { +func (p *TPaimonFileDesc) field2Length() int { l := 0 - if p.IsSetSerde() { - l += bthrift.Binary.FieldBeginLength("serde", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Serde) + if p.IsSetPaimonColumnNames() { + l += bthrift.Binary.FieldBeginLength("paimon_column_names", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.PaimonColumnNames) l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field3Length() int { +func (p *TPaimonFileDesc) field3Length() int { l := 0 - if p.IsSetInputFormat() { - l += bthrift.Binary.FieldBeginLength("input_format", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.InputFormat) + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field4Length() int { +func (p *TPaimonFileDesc) field4Length() int { l := 0 - if p.IsSetBasePath() { - l += bthrift.Binary.FieldBeginLength("base_path", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.BasePath) + if p.IsSetTableName() { + l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.TableName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field5Length() int { +func (p *TPaimonFileDesc) field5Length() int { l := 0 - if p.IsSetDataFilePath() { - l += bthrift.Binary.FieldBeginLength("data_file_path", thrift.STRING, 5) - l += bthrift.Binary.StringLengthNocopy(*p.DataFilePath) + if p.IsSetPaimonPredicate() { + l += bthrift.Binary.FieldBeginLength("paimon_predicate", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.PaimonPredicate) l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field6Length() int { +func (p *TPaimonFileDesc) field6Length() int { l := 0 - if p.IsSetDataFileLength() { - l += bthrift.Binary.FieldBeginLength("data_file_length", thrift.I64, 6) - l += bthrift.Binary.I64Length(*p.DataFileLength) + if p.IsSetPaimonOptions() { + l += bthrift.Binary.FieldBeginLength("paimon_options", thrift.MAP, 6) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.PaimonOptions)) + for k, v := range p.PaimonOptions { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field7Length() int { +func (p *TPaimonFileDesc) field7Length() int { l := 0 - if p.IsSetDeltaLogs() { - l += bthrift.Binary.FieldBeginLength("delta_logs", thrift.LIST, 7) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.DeltaLogs)) - for _, v := range p.DeltaLogs { - l += bthrift.Binary.StringLengthNocopy(v) + if p.IsSetCtlId() { + l += bthrift.Binary.FieldBeginLength("ctl_id", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.CtlId) - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field8Length() int { +func (p *TPaimonFileDesc) field8Length() int { l := 0 - if p.IsSetColumnNames() { - l += bthrift.Binary.FieldBeginLength("column_names", thrift.LIST, 8) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnNames)) - for _, v := range p.ColumnNames { - l += bthrift.Binary.StringLengthNocopy(v) + if p.IsSetDbId() { + l += bthrift.Binary.FieldBeginLength("db_id", thrift.I64, 8) + l += bthrift.Binary.I64Length(*p.DbId) - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field9Length() int { +func (p *TPaimonFileDesc) field9Length() int { l := 0 - if p.IsSetColumnTypes() { - l += bthrift.Binary.FieldBeginLength("column_types", thrift.LIST, 9) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnTypes)) - for _, v := range p.ColumnTypes { - l += bthrift.Binary.StringLengthNocopy(v) + if p.IsSetTblId() { + l += bthrift.Binary.FieldBeginLength("tbl_id", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.TblId) - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *THudiFileDesc) field10Length() int { +func (p *TPaimonFileDesc) field10Length() int { l := 0 - if p.IsSetNestedFields() { - l += bthrift.Binary.FieldBeginLength("nested_fields", thrift.LIST, 10) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.NestedFields)) - for _, v := range p.NestedFields { + if p.IsSetLastUpdateTime() { + l += bthrift.Binary.FieldBeginLength("last_update_time", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.LastUpdateTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPaimonFileDesc) field11Length() int { + l := 0 + if p.IsSetFileFormat() { + l += bthrift.Binary.FieldBeginLength("file_format", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.FileFormat) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPaimonFileDesc) field12Length() int { + l := 0 + if p.IsSetDeletionFile() { + l += bthrift.Binary.FieldBeginLength("deletion_file", thrift.STRUCT, 12) + l += p.DeletionFile.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPaimonFileDesc) field13Length() int { + l := 0 + if p.IsSetHadoopConf() { + l += bthrift.Binary.FieldBeginLength("hadoop_conf", thrift.MAP, 13) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.HadoopConf)) + for k, v := range p.HadoopConf { + + l += bthrift.Binary.StringLengthNocopy(k) + l += bthrift.Binary.StringLengthNocopy(v) } - l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.MapEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTransactionalHiveDeleteDeltaDesc) FastRead(buf []byte) (int, error) { +func (p *TPaimonFileDesc) field14Length() int { + l := 0 + if p.IsSetPaimonTable() { + l += bthrift.Binary.FieldBeginLength("paimon_table", thrift.STRING, 14) + l += bthrift.Binary.StringLengthNocopy(*p.PaimonTable) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7379,7 +7664,7 @@ func (p *TTransactionalHiveDeleteDeltaDesc) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -7392,17 +7677,143 @@ func (p *TTransactionalHiveDeleteDeltaDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { goto ReadFieldEndError } } @@ -7418,7 +7829,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDeleteDeltaDesc[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTrinoConnectorFileDesc[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7427,42 +7838,78 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTransactionalHiveDeleteDeltaDesc) FastReadField1(buf []byte) (int, error) { +func (p *TTrinoConnectorFileDesc) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DirectoryLocation = &v + p.CatalogName = &v } return offset, nil } -func (p *TTransactionalHiveDeleteDeltaDesc) FastReadField2(buf []byte) (int, error) { +func (p *TTrinoConnectorFileDesc) FastReadField2(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DbName = &v + + } + return offset, nil +} + +func (p *TTrinoConnectorFileDesc) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableName = &v + + } + return offset, nil +} + +func (p *TTrinoConnectorFileDesc) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.FileNames = make([]string, 0, size) + p.TrinoConnectorOptions = make(map[string]string, size) for i := 0; i < size; i++ { - var _elem string + var _key string if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _elem = v + _key = v } - p.FileNames = append(p.FileNames, _elem) + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.TrinoConnectorOptions[_key] = _val } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -7470,300 +7917,405 @@ func (p *TTransactionalHiveDeleteDeltaDesc) FastReadField2(buf []byte) (int, err return offset, nil } -// for compatibility -func (p *TTransactionalHiveDeleteDeltaDesc) FastWrite(buf []byte) int { - return 0 -} - -func (p *TTransactionalHiveDeleteDeltaDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTrinoConnectorFileDesc) FastReadField5(buf []byte) (int, error) { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTransactionalHiveDeleteDeltaDesc") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TrinoConnectorTableHandle = &v + } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset + return offset, nil } -func (p *TTransactionalHiveDeleteDeltaDesc) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TTransactionalHiveDeleteDeltaDesc") - if p != nil { - l += p.field1Length() - l += p.field2Length() +func (p *TTrinoConnectorFileDesc) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TrinoConnectorColumnHandles = &v + } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l + return offset, nil } -func (p *TTransactionalHiveDeleteDeltaDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTrinoConnectorFileDesc) FastReadField7(buf []byte) (int, error) { offset := 0 - if p.IsSetDirectoryLocation() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "directory_location", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DirectoryLocation) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TrinoConnectorColumnMetadata = &v + } - return offset + return offset, nil } -func (p *TTransactionalHiveDeleteDeltaDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTrinoConnectorFileDesc) FastReadField8(buf []byte) (int, error) { offset := 0 - if p.IsSetFileNames() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_names", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.FileNames { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TrinoConnectorColumnNames = &v + } - return offset + return offset, nil } -func (p *TTransactionalHiveDeleteDeltaDesc) field1Length() int { - l := 0 - if p.IsSetDirectoryLocation() { - l += bthrift.Binary.FieldBeginLength("directory_location", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.DirectoryLocation) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TTransactionalHiveDeleteDeltaDesc) field2Length() int { - l := 0 - if p.IsSetFileNames() { - l += bthrift.Binary.FieldBeginLength("file_names", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.FileNames)) - for _, v := range p.FileNames { - l += bthrift.Binary.StringLengthNocopy(v) - - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TTransactionalHiveDesc) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } +func (p *TTrinoConnectorFileDesc) FastReadField9(buf []byte) (int, error) { + offset := 0 - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 2: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } + p.TrinoConnectorSplit = &v - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDesc[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTransactionalHiveDesc) FastReadField1(buf []byte) (int, error) { +func (p *TTrinoConnectorFileDesc) FastReadField10(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Partition = &v + p.TrinoConnectorPredicate = &v } return offset, nil } -func (p *TTransactionalHiveDesc) FastReadField2(buf []byte) (int, error) { +func (p *TTrinoConnectorFileDesc) FastReadField11(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.DeleteDeltas = make([]*TTransactionalHiveDeleteDeltaDesc, 0, size) - for i := 0; i < size; i++ { - _elem := NewTTransactionalHiveDeleteDeltaDesc() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.DeleteDeltas = append(p.DeleteDeltas, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.TrinoConnectorTrascationHandle = &v + } return offset, nil } // for compatibility -func (p *TTransactionalHiveDesc) FastWrite(buf []byte) int { +func (p *TTrinoConnectorFileDesc) FastWrite(buf []byte) int { return 0 } -func (p *TTransactionalHiveDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTrinoConnectorFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTransactionalHiveDesc") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTrinoConnectorFileDesc") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTransactionalHiveDesc) BLength() int { +func (p *TTrinoConnectorFileDesc) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTransactionalHiveDesc") + l += bthrift.Binary.StructBeginLength("TTrinoConnectorFileDesc") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTransactionalHiveDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTrinoConnectorFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPartition() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Partition) + if p.IsSetCatalogName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CatalogName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTransactionalHiveDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTrinoConnectorFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDeleteDeltas() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delete_deltas", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + if p.IsSetDbName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "db_name", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DbName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_name", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorOptions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_options", thrift.MAP, 4) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) var length int - for _, v := range p.DeleteDeltas { + for k, v := range p.TrinoConnectorOptions { length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTransactionalHiveDesc) field1Length() int { +func (p *TTrinoConnectorFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorTableHandle() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_table_handle", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorTableHandle) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorColumnHandles() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_column_handles", thrift.STRING, 6) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorColumnHandles) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorColumnMetadata() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_column_metadata", thrift.STRING, 7) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorColumnMetadata) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_column_names", thrift.STRING, 8) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorColumnNames) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorSplit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_split", thrift.STRING, 9) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorSplit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorPredicate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_predicate", thrift.STRING, 10) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorPredicate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorTrascationHandle() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_trascation_handle", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TrinoConnectorTrascationHandle) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTrinoConnectorFileDesc) field1Length() int { l := 0 - if p.IsSetPartition() { - l += bthrift.Binary.FieldBeginLength("partition", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.Partition) + if p.IsSetCatalogName() { + l += bthrift.Binary.FieldBeginLength("catalog_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.CatalogName) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTransactionalHiveDesc) field2Length() int { +func (p *TTrinoConnectorFileDesc) field2Length() int { l := 0 - if p.IsSetDeleteDeltas() { - l += bthrift.Binary.FieldBeginLength("delete_deltas", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DeleteDeltas)) - for _, v := range p.DeleteDeltas { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetDbName() { + l += bthrift.Binary.FieldBeginLength("db_name", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.DbName) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableFormatFileDesc) FastRead(buf []byte) (int, error) { +func (p *TTrinoConnectorFileDesc) field3Length() int { + l := 0 + if p.IsSetTableName() { + l += bthrift.Binary.FieldBeginLength("table_name", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.TableName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field4Length() int { + l := 0 + if p.IsSetTrinoConnectorOptions() { + l += bthrift.Binary.FieldBeginLength("trino_connector_options", thrift.MAP, 4) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.TrinoConnectorOptions)) + for k, v := range p.TrinoConnectorOptions { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field5Length() int { + l := 0 + if p.IsSetTrinoConnectorTableHandle() { + l += bthrift.Binary.FieldBeginLength("trino_connector_table_handle", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorTableHandle) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field6Length() int { + l := 0 + if p.IsSetTrinoConnectorColumnHandles() { + l += bthrift.Binary.FieldBeginLength("trino_connector_column_handles", thrift.STRING, 6) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorColumnHandles) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field7Length() int { + l := 0 + if p.IsSetTrinoConnectorColumnMetadata() { + l += bthrift.Binary.FieldBeginLength("trino_connector_column_metadata", thrift.STRING, 7) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorColumnMetadata) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field8Length() int { + l := 0 + if p.IsSetTrinoConnectorColumnNames() { + l += bthrift.Binary.FieldBeginLength("trino_connector_column_names", thrift.STRING, 8) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorColumnNames) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field9Length() int { + l := 0 + if p.IsSetTrinoConnectorSplit() { + l += bthrift.Binary.FieldBeginLength("trino_connector_split", thrift.STRING, 9) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorSplit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field10Length() int { + l := 0 + if p.IsSetTrinoConnectorPredicate() { + l += bthrift.Binary.FieldBeginLength("trino_connector_predicate", thrift.STRING, 10) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorPredicate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTrinoConnectorFileDesc) field11Length() int { + l := 0 + if p.IsSetTrinoConnectorTrascationHandle() { + l += bthrift.Binary.FieldBeginLength("trino_connector_trascation_handle", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.TrinoConnectorTrascationHandle) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMaxComputeFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -7800,7 +8352,7 @@ func (p *TTableFormatFileDesc) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -7814,7 +8366,7 @@ func (p *TTableFormatFileDesc) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -7827,34 +8379,6 @@ func (p *TTableFormatFileDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -7881,7 +8405,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableFormatFileDesc[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMaxComputeFileDesc[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -7890,209 +8414,143 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTableFormatFileDesc) FastReadField1(buf []byte) (int, error) { +func (p *TMaxComputeFileDesc) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TableFormatType = &v + p.PartitionSpec = &v } return offset, nil } -func (p *TTableFormatFileDesc) FastReadField2(buf []byte) (int, error) { +func (p *TMaxComputeFileDesc) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := NewTIcebergFileDesc() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - } - p.IcebergParams = tmp - return offset, nil -} - -func (p *TTableFormatFileDesc) FastReadField3(buf []byte) (int, error) { - offset := 0 + p.SessionId = &v - tmp := NewTHudiFileDesc() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.HudiParams = tmp return offset, nil } -func (p *TTableFormatFileDesc) FastReadField4(buf []byte) (int, error) { +func (p *TMaxComputeFileDesc) FastReadField3(buf []byte) (int, error) { offset := 0 - tmp := NewTPaimonFileDesc() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - } - p.PaimonParams = tmp - return offset, nil -} - -func (p *TTableFormatFileDesc) FastReadField5(buf []byte) (int, error) { - offset := 0 + p.TableBatchReadSession = &v - tmp := NewTTransactionalHiveDesc() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.TransactionalHiveParams = tmp return offset, nil } // for compatibility -func (p *TTableFormatFileDesc) FastWrite(buf []byte) int { +func (p *TMaxComputeFileDesc) FastWrite(buf []byte) int { return 0 } -func (p *TTableFormatFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMaxComputeFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableFormatFileDesc") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMaxComputeFileDesc") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTableFormatFileDesc) BLength() int { +func (p *TMaxComputeFileDesc) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTableFormatFileDesc") + l += bthrift.Binary.StructBeginLength("TMaxComputeFileDesc") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTableFormatFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMaxComputeFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTableFormatType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_format_type", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableFormatType) + if p.IsSetPartitionSpec() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_spec", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.PartitionSpec) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableFormatFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMaxComputeFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetIcebergParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_params", thrift.STRUCT, 2) - offset += p.IcebergParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if p.IsSetSessionId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "session_id", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SessionId) -func (p *TTableFormatFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetHudiParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hudi_params", thrift.STRUCT, 3) - offset += p.HudiParams.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableFormatFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMaxComputeFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPaimonParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_params", thrift.STRUCT, 4) - offset += p.PaimonParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} + if p.IsSetTableBatchReadSession() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_batch_read_session", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableBatchReadSession) -func (p *TTableFormatFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTransactionalHiveParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "transactional_hive_params", thrift.STRUCT, 5) - offset += p.TransactionalHiveParams.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableFormatFileDesc) field1Length() int { +func (p *TMaxComputeFileDesc) field1Length() int { l := 0 - if p.IsSetTableFormatType() { - l += bthrift.Binary.FieldBeginLength("table_format_type", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.TableFormatType) + if p.IsSetPartitionSpec() { + l += bthrift.Binary.FieldBeginLength("partition_spec", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.PartitionSpec) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableFormatFileDesc) field2Length() int { +func (p *TMaxComputeFileDesc) field2Length() int { l := 0 - if p.IsSetIcebergParams() { - l += bthrift.Binary.FieldBeginLength("iceberg_params", thrift.STRUCT, 2) - l += p.IcebergParams.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} + if p.IsSetSessionId() { + l += bthrift.Binary.FieldBeginLength("session_id", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.SessionId) -func (p *TTableFormatFileDesc) field3Length() int { - l := 0 - if p.IsSetHudiParams() { - l += bthrift.Binary.FieldBeginLength("hudi_params", thrift.STRUCT, 3) - l += p.HudiParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableFormatFileDesc) field4Length() int { +func (p *TMaxComputeFileDesc) field3Length() int { l := 0 - if p.IsSetPaimonParams() { - l += bthrift.Binary.FieldBeginLength("paimon_params", thrift.STRUCT, 4) - l += p.PaimonParams.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} + if p.IsSetTableBatchReadSession() { + l += bthrift.Binary.FieldBeginLength("table_batch_read_session", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.TableBatchReadSession) -func (p *TTableFormatFileDesc) field5Length() int { - l := 0 - if p.IsSetTransactionalHiveParams() { - l += bthrift.Binary.FieldBeginLength("transactional_hive_params", thrift.STRUCT, 5) - l += p.TransactionalHiveParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { +func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -8115,7 +8573,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -8129,7 +8587,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -8143,7 +8601,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -8157,7 +8615,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -8171,7 +8629,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 5: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { @@ -8185,7 +8643,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 6: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { @@ -8213,7 +8671,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 8: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { @@ -8227,7 +8685,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 9: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField9(buf[offset:]) offset += l if err != nil { @@ -8241,7 +8699,7 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { } } case 10: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { @@ -8254,179 +8712,11 @@ func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 12: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField12(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 13: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField13(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 14: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField14(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 15: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField15(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 16: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField16(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 17: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField17(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 18: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField18(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 19: - if fieldTypeId == thrift.MAP { - l, err = p.FastReadField19(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 20: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField20(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 21: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField21(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 22: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField22(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } } @@ -8448,7 +8738,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRangeParams[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THudiFileDesc[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -8457,91 +8747,85 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRangeParams) FastReadField1(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := types.TFileType(v) - p.FileType = &tmp + p.InstantTime = &v } return offset, nil } -func (p *TFileScanRangeParams) FastReadField2(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TFileFormatType(v) - p.FormatType = &tmp + p.Serde = &v } return offset, nil } -func (p *TFileScanRangeParams) FastReadField3(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TFileCompressType(v) - p.CompressType = &tmp + p.InputFormat = &v } return offset, nil } -func (p *TFileScanRangeParams) FastReadField4(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SrcTupleId = &v + p.BasePath = &v } return offset, nil } -func (p *TFileScanRangeParams) FastReadField5(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DestTupleId = &v + p.DataFilePath = &v } return offset, nil } -func (p *TFileScanRangeParams) FastReadField6(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.NumOfColumnsFromFile = &v + p.DataFileLength = &v } return offset, nil } -func (p *TFileScanRangeParams) FastReadField7(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField7(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -8549,16 +8833,19 @@ func (p *TFileScanRangeParams) FastReadField7(buf []byte) (int, error) { if err != nil { return offset, err } - p.RequiredSlots = make([]*TFileScanSlotInfo, 0, size) + p.DeltaLogs = make([]string, 0, size) for i := 0; i < size; i++ { - _elem := NewTFileScanSlotInfo() - if l, err := _elem.FastRead(buf[offset:]); err != nil { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + + _elem = v + } - p.RequiredSlots = append(p.RequiredSlots, _elem) + p.DeltaLogs = append(p.DeltaLogs, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -8568,52 +8855,29 @@ func (p *TFileScanRangeParams) FastReadField7(buf []byte) (int, error) { return offset, nil } -func (p *TFileScanRangeParams) FastReadField8(buf []byte) (int, error) { - offset := 0 - - tmp := NewTHdfsParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.HdfsParams = tmp - return offset, nil -} - -func (p *TFileScanRangeParams) FastReadField9(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField8(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.Properties = make(map[string]string, size) + p.ColumnNames = make([]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val string + var _elem string if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _val = v + _elem = v } - p.Properties[_key] = _val + p.ColumnNames = append(p.ColumnNames, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -8621,35 +8885,29 @@ func (p *TFileScanRangeParams) FastReadField9(buf []byte) (int, error) { return offset, nil } -func (p *TFileScanRangeParams) FastReadField10(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField9(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.ExprOfDestSlot = make(map[types.TSlotId]*exprs.TExpr, size) + p.ColumnTypes = make([]string, 0, size) for i := 0; i < size; i++ { - var _key types.TSlotId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _key = v + _elem = v } - _val := exprs.NewTExpr() - if l, err := _val.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.ExprOfDestSlot[_key] = _val + p.ColumnTypes = append(p.ColumnTypes, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -8657,35 +8915,29 @@ func (p *TFileScanRangeParams) FastReadField10(buf []byte) (int, error) { return offset, nil } -func (p *TFileScanRangeParams) FastReadField11(buf []byte) (int, error) { +func (p *THudiFileDesc) FastReadField10(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.DefaultValueOfSrcSlot = make(map[types.TSlotId]*exprs.TExpr, size) + p.NestedFields = make([]string, 0, size) for i := 0; i < size; i++ { - var _key types.TSlotId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - _key = v + _elem = v } - _val := exprs.NewTExpr() - if l, err := _val.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.DefaultValueOfSrcSlot[_key] = _val + p.NestedFields = append(p.NestedFields, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -8693,607 +8945,4380 @@ func (p *TFileScanRangeParams) FastReadField11(buf []byte) (int, error) { return offset, nil } -func (p *TFileScanRangeParams) FastReadField12(buf []byte) (int, error) { - offset := 0 +// for compatibility +func (p *THudiFileDesc) FastWrite(buf []byte) int { + return 0 +} - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err +func (p *THudiFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THudiFileDesc") + if p != nil { + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) } - p.DestSidToSrcSidWithoutTrans = make(map[types.TSlotId]types.TSlotId, size) - for i := 0; i < size; i++ { - var _key types.TSlotId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val types.TSlotId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _val = v - - } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} - p.DestSidToSrcSidWithoutTrans[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l +func (p *THudiFileDesc) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("THudiFileDesc") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TFileScanRangeParams) FastReadField13(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInstantTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "instant_time", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.InstantTime) - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.StrictMode = &v - + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField14(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetSerde() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "serde", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Serde) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size) - for i := 0; i < size; i++ { - _elem := types.NewTNetworkAddress() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.BrokerAddresses = append(p.BrokerAddresses, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField15(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetInputFormat() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "input_format", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.InputFormat) - tmp := NewTFileAttributes() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.FileAttributes = tmp - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField16(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetBasePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_path", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BasePath) - tmp := exprs.NewTExpr() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.PreFilterExprs = tmp - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField17(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetDataFilePath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_file_path", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DataFilePath) - tmp := NewTTableFormatFileDesc() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.TableFormatParams = tmp - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField18(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetDataFileLength() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "data_file_length", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DataFileLength) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.ColumnIdxs = make([]int32, 0, size) - for i := 0; i < size; i++ { - var _elem int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return offset +} - _elem = v +func (p *THudiFileDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDeltaLogs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delta_logs", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.DeltaLogs { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) } - - p.ColumnIdxs = append(p.ColumnIdxs, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField19(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_names", thrift.LIST, 8) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ColumnNames { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.SlotNameToSchemaPos = make(map[string]int32, size) - for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _key = v - - } - - var _val int32 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + return offset +} - _val = v +func (p *THudiFileDesc) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnTypes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_types", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ColumnTypes { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) } - - p.SlotNameToSchemaPos[_key] = _val - } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField20(buf []byte) (int, error) { +func (p *THudiFileDesc) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetNestedFields() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "nested_fields", thrift.LIST, 10) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.NestedFields { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PreFilterExprsList = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - - p.PreFilterExprsList = append(p.PreFilterExprsList, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset, nil + return offset } -func (p *TFileScanRangeParams) FastReadField21(buf []byte) (int, error) { - offset := 0 +func (p *THudiFileDesc) field1Length() int { + l := 0 + if p.IsSetInstantTime() { + l += bthrift.Binary.FieldBeginLength("instant_time", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.InstantTime) - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - p.LoadId = tmp - return offset, nil + return l } -func (p *TFileScanRangeParams) FastReadField22(buf []byte) (int, error) { - offset := 0 +func (p *THudiFileDesc) field2Length() int { + l := 0 + if p.IsSetSerde() { + l += bthrift.Binary.FieldBeginLength("serde", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Serde) - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() + } + return l +} - tmp := TTextSerdeType(v) - p.TextSerdeType = &tmp +func (p *THudiFileDesc) field3Length() int { + l := 0 + if p.IsSetInputFormat() { + l += bthrift.Binary.FieldBeginLength("input_format", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.InputFormat) + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -// for compatibility -func (p *TFileScanRangeParams) FastWrite(buf []byte) int { - return 0 -} +func (p *THudiFileDesc) field4Length() int { + l := 0 + if p.IsSetBasePath() { + l += bthrift.Binary.FieldBeginLength("base_path", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.BasePath) -func (p *TFileScanRangeParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFileScanRangeParams") - if p != nil { - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField13(buf[offset:], binaryWriter) - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) - offset += p.fastWriteField14(buf[offset:], binaryWriter) - offset += p.fastWriteField15(buf[offset:], binaryWriter) - offset += p.fastWriteField16(buf[offset:], binaryWriter) - offset += p.fastWriteField17(buf[offset:], binaryWriter) - offset += p.fastWriteField18(buf[offset:], binaryWriter) - offset += p.fastWriteField19(buf[offset:], binaryWriter) - offset += p.fastWriteField20(buf[offset:], binaryWriter) - offset += p.fastWriteField21(buf[offset:], binaryWriter) - offset += p.fastWriteField22(buf[offset:], binaryWriter) + l += bthrift.Binary.FieldEndLength() } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset + return l } -func (p *TFileScanRangeParams) BLength() int { +func (p *THudiFileDesc) field5Length() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFileScanRangeParams") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() - l += p.field13Length() - l += p.field14Length() - l += p.field15Length() - l += p.field16Length() - l += p.field17Length() - l += p.field18Length() - l += p.field19Length() - l += p.field20Length() - l += p.field21Length() - l += p.field22Length() + if p.IsSetDataFilePath() { + l += bthrift.Binary.FieldBeginLength("data_file_path", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.DataFilePath) + + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() return l } -func (p *TFileScanRangeParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFileType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_type", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileType)) +func (p *THudiFileDesc) field6Length() int { + l := 0 + if p.IsSetDataFileLength() { + l += bthrift.Binary.FieldBeginLength("data_file_length", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.DataFileLength) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TFileScanRangeParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFormatType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "format_type", thrift.I32, 2) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FormatType)) +func (p *THudiFileDesc) field7Length() int { + l := 0 + if p.IsSetDeltaLogs() { + l += bthrift.Binary.FieldBeginLength("delta_logs", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.DeltaLogs)) + for _, v := range p.DeltaLogs { + l += bthrift.Binary.StringLengthNocopy(v) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TFileScanRangeParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCompressType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compress_type", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressType)) +func (p *THudiFileDesc) field8Length() int { + l := 0 + if p.IsSetColumnNames() { + l += bthrift.Binary.FieldBeginLength("column_names", thrift.LIST, 8) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnNames)) + for _, v := range p.ColumnNames { + l += bthrift.Binary.StringLengthNocopy(v) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TFileScanRangeParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSrcTupleId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "src_tuple_id", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.SrcTupleId) +func (p *THudiFileDesc) field9Length() int { + l := 0 + if p.IsSetColumnTypes() { + l += bthrift.Binary.FieldBeginLength("column_types", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnTypes)) + for _, v := range p.ColumnTypes { + l += bthrift.Binary.StringLengthNocopy(v) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TFileScanRangeParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDestTupleId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dest_tuple_id", thrift.I32, 5) +func (p *THudiFileDesc) field10Length() int { + l := 0 + if p.IsSetNestedFields() { + l += bthrift.Binary.FieldBeginLength("nested_fields", thrift.LIST, 10) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.NestedFields)) + for _, v := range p.NestedFields { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLakeSoulFileDesc) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TLakeSoulFileDesc[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TLakeSoulFileDesc) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FilePaths = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.FilePaths = append(p.FilePaths, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TLakeSoulFileDesc) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PrimaryKeys = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.PrimaryKeys = append(p.PrimaryKeys, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TLakeSoulFileDesc) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionDescs = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.PartitionDescs = append(p.PartitionDescs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TLakeSoulFileDesc) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableSchema = &v + + } + return offset, nil +} + +func (p *TLakeSoulFileDesc) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Options = &v + + } + return offset, nil +} + +// for compatibility +func (p *TLakeSoulFileDesc) FastWrite(buf []byte) int { + return 0 +} + +func (p *TLakeSoulFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TLakeSoulFileDesc") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TLakeSoulFileDesc) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TLakeSoulFileDesc") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TLakeSoulFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFilePaths() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_paths", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.FilePaths { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLakeSoulFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPrimaryKeys() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "primary_keys", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.PrimaryKeys { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLakeSoulFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionDescs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_descs", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.PartitionDescs { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLakeSoulFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableSchema() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_schema", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableSchema) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLakeSoulFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOptions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "options", thrift.STRING, 5) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Options) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLakeSoulFileDesc) field1Length() int { + l := 0 + if p.IsSetFilePaths() { + l += bthrift.Binary.FieldBeginLength("file_paths", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.FilePaths)) + for _, v := range p.FilePaths { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLakeSoulFileDesc) field2Length() int { + l := 0 + if p.IsSetPrimaryKeys() { + l += bthrift.Binary.FieldBeginLength("primary_keys", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.PrimaryKeys)) + for _, v := range p.PrimaryKeys { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLakeSoulFileDesc) field3Length() int { + l := 0 + if p.IsSetPartitionDescs() { + l += bthrift.Binary.FieldBeginLength("partition_descs", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.PartitionDescs)) + for _, v := range p.PartitionDescs { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLakeSoulFileDesc) field4Length() int { + l := 0 + if p.IsSetTableSchema() { + l += bthrift.Binary.FieldBeginLength("table_schema", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.TableSchema) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLakeSoulFileDesc) field5Length() int { + l := 0 + if p.IsSetOptions() { + l += bthrift.Binary.FieldBeginLength("options", thrift.STRING, 5) + l += bthrift.Binary.StringLengthNocopy(*p.Options) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTransactionalHiveDeleteDeltaDesc) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDeleteDeltaDesc[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTransactionalHiveDeleteDeltaDesc) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DirectoryLocation = &v + + } + return offset, nil +} + +func (p *TTransactionalHiveDeleteDeltaDesc) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FileNames = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.FileNames = append(p.FileNames, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TTransactionalHiveDeleteDeltaDesc) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTransactionalHiveDeleteDeltaDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTransactionalHiveDeleteDeltaDesc") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTransactionalHiveDeleteDeltaDesc) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTransactionalHiveDeleteDeltaDesc") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTransactionalHiveDeleteDeltaDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDirectoryLocation() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "directory_location", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.DirectoryLocation) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTransactionalHiveDeleteDeltaDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_names", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.FileNames { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTransactionalHiveDeleteDeltaDesc) field1Length() int { + l := 0 + if p.IsSetDirectoryLocation() { + l += bthrift.Binary.FieldBeginLength("directory_location", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.DirectoryLocation) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTransactionalHiveDeleteDeltaDesc) field2Length() int { + l := 0 + if p.IsSetFileNames() { + l += bthrift.Binary.FieldBeginLength("file_names", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.FileNames)) + for _, v := range p.FileNames { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTransactionalHiveDesc) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTransactionalHiveDesc[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTransactionalHiveDesc) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Partition = &v + + } + return offset, nil +} + +func (p *TTransactionalHiveDesc) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DeleteDeltas = make([]*TTransactionalHiveDeleteDeltaDesc, 0, size) + for i := 0; i < size; i++ { + _elem := NewTTransactionalHiveDeleteDeltaDesc() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.DeleteDeltas = append(p.DeleteDeltas, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TTransactionalHiveDesc) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTransactionalHiveDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTransactionalHiveDesc") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTransactionalHiveDesc) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTransactionalHiveDesc") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTransactionalHiveDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartition() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Partition) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTransactionalHiveDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDeleteDeltas() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "delete_deltas", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.DeleteDeltas { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTransactionalHiveDesc) field1Length() int { + l := 0 + if p.IsSetPartition() { + l += bthrift.Binary.FieldBeginLength("partition", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Partition) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTransactionalHiveDesc) field2Length() int { + l := 0 + if p.IsSetDeleteDeltas() { + l += bthrift.Binary.FieldBeginLength("delete_deltas", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.DeleteDeltas)) + for _, v := range p.DeleteDeltas { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableFormatFileDesc[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TTableFormatFileDesc) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableFormatType = &v + + } + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := NewTIcebergFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.IcebergParams = tmp + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField3(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHudiFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.HudiParams = tmp + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := NewTPaimonFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PaimonParams = tmp + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTransactionalHiveDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TransactionalHiveParams = tmp + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField6(buf []byte) (int, error) { + offset := 0 + + tmp := NewTMaxComputeFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MaxComputeParams = tmp + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField7(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTrinoConnectorFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TrinoConnectorParams = tmp + return offset, nil +} + +func (p *TTableFormatFileDesc) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := NewTLakeSoulFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LakesoulParams = tmp + return offset, nil +} + +// for compatibility +func (p *TTableFormatFileDesc) FastWrite(buf []byte) int { + return 0 +} + +func (p *TTableFormatFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableFormatFileDesc") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TTableFormatFileDesc) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TTableFormatFileDesc") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TTableFormatFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableFormatType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_format_type", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableFormatType) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIcebergParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_params", thrift.STRUCT, 2) + offset += p.IcebergParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHudiParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hudi_params", thrift.STRUCT, 3) + offset += p.HudiParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPaimonParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "paimon_params", thrift.STRUCT, 4) + offset += p.PaimonParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTransactionalHiveParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "transactional_hive_params", thrift.STRUCT, 5) + offset += p.TransactionalHiveParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxComputeParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_compute_params", thrift.STRUCT, 6) + offset += p.MaxComputeParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTrinoConnectorParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "trino_connector_params", thrift.STRUCT, 7) + offset += p.TrinoConnectorParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLakesoulParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "lakesoul_params", thrift.STRUCT, 8) + offset += p.LakesoulParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTableFormatFileDesc) field1Length() int { + l := 0 + if p.IsSetTableFormatType() { + l += bthrift.Binary.FieldBeginLength("table_format_type", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.TableFormatType) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field2Length() int { + l := 0 + if p.IsSetIcebergParams() { + l += bthrift.Binary.FieldBeginLength("iceberg_params", thrift.STRUCT, 2) + l += p.IcebergParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field3Length() int { + l := 0 + if p.IsSetHudiParams() { + l += bthrift.Binary.FieldBeginLength("hudi_params", thrift.STRUCT, 3) + l += p.HudiParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field4Length() int { + l := 0 + if p.IsSetPaimonParams() { + l += bthrift.Binary.FieldBeginLength("paimon_params", thrift.STRUCT, 4) + l += p.PaimonParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field5Length() int { + l := 0 + if p.IsSetTransactionalHiveParams() { + l += bthrift.Binary.FieldBeginLength("transactional_hive_params", thrift.STRUCT, 5) + l += p.TransactionalHiveParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field6Length() int { + l := 0 + if p.IsSetMaxComputeParams() { + l += bthrift.Binary.FieldBeginLength("max_compute_params", thrift.STRUCT, 6) + l += p.MaxComputeParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field7Length() int { + l := 0 + if p.IsSetTrinoConnectorParams() { + l += bthrift.Binary.FieldBeginLength("trino_connector_params", thrift.STRUCT, 7) + l += p.TrinoConnectorParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTableFormatFileDesc) field8Length() int { + l := 0 + if p.IsSetLakesoulParams() { + l += bthrift.Binary.FieldBeginLength("lakesoul_params", thrift.STRUCT, 8) + l += p.LakesoulParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 17: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField17(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 18: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 21: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField21(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 22: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField22(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 23: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField23(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 24: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField24(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRangeParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFileScanRangeParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TFileType(v) + p.FileType = &tmp + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TFileFormatType(v) + p.FormatType = &tmp + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TFileCompressType(v) + p.CompressType = &tmp + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SrcTupleId = &v + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DestTupleId = &v + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumOfColumnsFromFile = &v + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.RequiredSlots = make([]*TFileScanSlotInfo, 0, size) + for i := 0; i < size; i++ { + _elem := NewTFileScanSlotInfo() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.RequiredSlots = append(p.RequiredSlots, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := NewTHdfsParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.HdfsParams = tmp + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.Properties = make(map[string]string, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.Properties[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField10(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ExprOfDestSlot = make(map[types.TSlotId]*exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key types.TSlotId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + _val := exprs.NewTExpr() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ExprOfDestSlot[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField11(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DefaultValueOfSrcSlot = make(map[types.TSlotId]*exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key types.TSlotId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + _val := exprs.NewTExpr() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.DefaultValueOfSrcSlot[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField12(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DestSidToSrcSidWithoutTrans = make(map[types.TSlotId]types.TSlotId, size) + for i := 0; i < size; i++ { + var _key types.TSlotId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val types.TSlotId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.DestSidToSrcSidWithoutTrans[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StrictMode = &v + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField14(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTNetworkAddress() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.BrokerAddresses = append(p.BrokerAddresses, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField15(buf []byte) (int, error) { + offset := 0 + + tmp := NewTFileAttributes() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.FileAttributes = tmp + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField16(buf []byte) (int, error) { + offset := 0 + + tmp := exprs.NewTExpr() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PreFilterExprs = tmp + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField17(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTableFormatFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TableFormatParams = tmp + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField18(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnIdxs = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ColumnIdxs = append(p.ColumnIdxs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField19(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.SlotNameToSchemaPos = make(map[string]int32, size) + for i := 0; i < size; i++ { + var _key string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.SlotNameToSchemaPos[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField20(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PreFilterExprsList = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PreFilterExprsList = append(p.PreFilterExprsList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField21(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadId = tmp + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField22(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TTextSerdeType(v) + p.TextSerdeType = &tmp + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField23(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SequenceMapCol = &v + + } + return offset, nil +} + +func (p *TFileScanRangeParams) FastReadField24(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SerializedTable = &v + + } + return offset, nil +} + +// for compatibility +func (p *TFileScanRangeParams) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFileScanRangeParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFileScanRangeParams") + if p != nil { + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField17(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) + offset += p.fastWriteField21(buf[offset:], binaryWriter) + offset += p.fastWriteField22(buf[offset:], binaryWriter) + offset += p.fastWriteField23(buf[offset:], binaryWriter) + offset += p.fastWriteField24(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFileScanRangeParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFileScanRangeParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() + l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() + l += p.field21Length() + l += p.field22Length() + l += p.field23Length() + l += p.field24Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFileScanRangeParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFormatType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "format_type", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FormatType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compress_type", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSrcTupleId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "src_tuple_id", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.SrcTupleId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDestTupleId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dest_tuple_id", thrift.I32, 5) offset += bthrift.Binary.WriteI32(buf[offset:], *p.DestTupleId) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset +} + +func (p *TFileScanRangeParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumOfColumnsFromFile() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_of_columns_from_file", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumOfColumnsFromFile) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRequiredSlots() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "required_slots", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.RequiredSlots { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHdfsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hdfs_params", thrift.STRUCT, 8) + offset += p.HdfsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetProperties() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 9) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + var length int + for k, v := range p.Properties { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetExprOfDestSlot() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "expr_of_dest_slot", thrift.MAP, 10) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + var length int + for k, v := range p.ExprOfDestSlot { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDefaultValueOfSrcSlot() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "default_value_of_src_slot", thrift.MAP, 11) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + var length int + for k, v := range p.DefaultValueOfSrcSlot { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDestSidToSrcSidWithoutTrans() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dest_sid_to_src_sid_without_trans", thrift.MAP, 12) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) + var length int + for k, v := range p.DestSidToSrcSidWithoutTrans { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStrictMode() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strict_mode", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.StrictMode) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBrokerAddresses() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "broker_addresses", thrift.LIST, 14) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.BrokerAddresses { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileAttributes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_attributes", thrift.STRUCT, 15) + offset += p.FileAttributes.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPreFilterExprs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pre_filter_exprs", thrift.STRUCT, 16) + offset += p.PreFilterExprs.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableFormatParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_format_params", thrift.STRUCT, 17) + offset += p.TableFormatParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnIdxs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_idxs", thrift.LIST, 18) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.ColumnIdxs { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSlotNameToSchemaPos() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "slot_name_to_schema_pos", thrift.MAP, 19) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I32, 0) + var length int + for k, v := range p.SlotNameToSchemaPos { + length++ + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPreFilterExprsList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pre_filter_exprs_list", thrift.LIST, 20) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.PreFilterExprsList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 21) + offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTextSerdeType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "text_serde_type", thrift.I32, 22) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.TextSerdeType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSequenceMapCol() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sequence_map_col", thrift.STRING, 23) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SequenceMapCol) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) fastWriteField24(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSerializedTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "serialized_table", thrift.STRING, 24) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SerializedTable) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileScanRangeParams) field1Length() int { + l := 0 + if p.IsSetFileType() { + l += bthrift.Binary.FieldBeginLength("file_type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.FileType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field2Length() int { + l := 0 + if p.IsSetFormatType() { + l += bthrift.Binary.FieldBeginLength("format_type", thrift.I32, 2) + l += bthrift.Binary.I32Length(int32(*p.FormatType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field3Length() int { + l := 0 + if p.IsSetCompressType() { + l += bthrift.Binary.FieldBeginLength("compress_type", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(*p.CompressType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field4Length() int { + l := 0 + if p.IsSetSrcTupleId() { + l += bthrift.Binary.FieldBeginLength("src_tuple_id", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.SrcTupleId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field5Length() int { + l := 0 + if p.IsSetDestTupleId() { + l += bthrift.Binary.FieldBeginLength("dest_tuple_id", thrift.I32, 5) + l += bthrift.Binary.I32Length(*p.DestTupleId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field6Length() int { + l := 0 + if p.IsSetNumOfColumnsFromFile() { + l += bthrift.Binary.FieldBeginLength("num_of_columns_from_file", thrift.I32, 6) + l += bthrift.Binary.I32Length(*p.NumOfColumnsFromFile) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field7Length() int { + l := 0 + if p.IsSetRequiredSlots() { + l += bthrift.Binary.FieldBeginLength("required_slots", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.RequiredSlots)) + for _, v := range p.RequiredSlots { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field8Length() int { + l := 0 + if p.IsSetHdfsParams() { + l += bthrift.Binary.FieldBeginLength("hdfs_params", thrift.STRUCT, 8) + l += p.HdfsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field9Length() int { + l := 0 + if p.IsSetProperties() { + l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 9) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) + for k, v := range p.Properties { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field10Length() int { + l := 0 + if p.IsSetExprOfDestSlot() { + l += bthrift.Binary.FieldBeginLength("expr_of_dest_slot", thrift.MAP, 10) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.ExprOfDestSlot)) + for k, v := range p.ExprOfDestSlot { + + l += bthrift.Binary.I32Length(k) + + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field11Length() int { + l := 0 + if p.IsSetDefaultValueOfSrcSlot() { + l += bthrift.Binary.FieldBeginLength("default_value_of_src_slot", thrift.MAP, 11) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.DefaultValueOfSrcSlot)) + for k, v := range p.DefaultValueOfSrcSlot { + + l += bthrift.Binary.I32Length(k) + + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field12Length() int { + l := 0 + if p.IsSetDestSidToSrcSidWithoutTrans() { + l += bthrift.Binary.FieldBeginLength("dest_sid_to_src_sid_without_trans", thrift.MAP, 12) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.DestSidToSrcSidWithoutTrans)) + var tmpK types.TSlotId + var tmpV types.TSlotId + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.DestSidToSrcSidWithoutTrans) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field13Length() int { + l := 0 + if p.IsSetStrictMode() { + l += bthrift.Binary.FieldBeginLength("strict_mode", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(*p.StrictMode) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field14Length() int { + l := 0 + if p.IsSetBrokerAddresses() { + l += bthrift.Binary.FieldBeginLength("broker_addresses", thrift.LIST, 14) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.BrokerAddresses)) + for _, v := range p.BrokerAddresses { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field15Length() int { + l := 0 + if p.IsSetFileAttributes() { + l += bthrift.Binary.FieldBeginLength("file_attributes", thrift.STRUCT, 15) + l += p.FileAttributes.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field16Length() int { + l := 0 + if p.IsSetPreFilterExprs() { + l += bthrift.Binary.FieldBeginLength("pre_filter_exprs", thrift.STRUCT, 16) + l += p.PreFilterExprs.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field17Length() int { + l := 0 + if p.IsSetTableFormatParams() { + l += bthrift.Binary.FieldBeginLength("table_format_params", thrift.STRUCT, 17) + l += p.TableFormatParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field18Length() int { + l := 0 + if p.IsSetColumnIdxs() { + l += bthrift.Binary.FieldBeginLength("column_idxs", thrift.LIST, 18) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.ColumnIdxs)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.ColumnIdxs) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field19Length() int { + l := 0 + if p.IsSetSlotNameToSchemaPos() { + l += bthrift.Binary.FieldBeginLength("slot_name_to_schema_pos", thrift.MAP, 19) + l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I32, len(p.SlotNameToSchemaPos)) + for k, v := range p.SlotNameToSchemaPos { + + l += bthrift.Binary.StringLengthNocopy(k) + + l += bthrift.Binary.I32Length(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field20Length() int { + l := 0 + if p.IsSetPreFilterExprsList() { + l += bthrift.Binary.FieldBeginLength("pre_filter_exprs_list", thrift.LIST, 20) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PreFilterExprsList)) + for _, v := range p.PreFilterExprsList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field21Length() int { + l := 0 + if p.IsSetLoadId() { + l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 21) + l += p.LoadId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field22Length() int { + l := 0 + if p.IsSetTextSerdeType() { + l += bthrift.Binary.FieldBeginLength("text_serde_type", thrift.I32, 22) + l += bthrift.Binary.I32Length(int32(*p.TextSerdeType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field23Length() int { + l := 0 + if p.IsSetSequenceMapCol() { + l += bthrift.Binary.FieldBeginLength("sequence_map_col", thrift.STRING, 23) + l += bthrift.Binary.StringLengthNocopy(*p.SequenceMapCol) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileScanRangeParams) field24Length() int { + l := 0 + if p.IsSetSerializedTable() { + l += bthrift.Binary.FieldBeginLength("serialized_table", thrift.STRING, 24) + l += bthrift.Binary.StringLengthNocopy(*p.SerializedTable) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileRangeDesc[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TFileRangeDesc) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.LoadId = tmp + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Path = &v + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.StartOffset = &v + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Size = &v + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FileSize = v + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnsFromPath = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ColumnsFromPath = append(p.ColumnsFromPath, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnsFromPathKeys = make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.ColumnsFromPathKeys = append(p.ColumnsFromPathKeys, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTableFormatFileDesc() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TableFormatParams = tmp + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ModificationTime = &v + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TFileType(v) + p.FileType = &tmp + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TFileCompressType(v) + p.CompressType = &tmp + + } + return offset, nil +} + +func (p *TFileRangeDesc) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FsName = &v + + } + return offset, nil +} + +// for compatibility +func (p *TFileRangeDesc) FastWrite(buf []byte) int { + return 0 +} + +func (p *TFileRangeDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFileRangeDesc") + if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TFileRangeDesc) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFileRangeDesc") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFileRangeDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLoadId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 1) + offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "path", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Path) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetStartOffset() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "start_offset", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.StartOffset) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "size", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Size) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_size", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], p.FileSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnsFromPath() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_from_path", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ColumnsFromPath { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetColumnsFromPathKeys() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_from_path_keys", thrift.LIST, 7) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range p.ColumnsFromPathKeys { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableFormatParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_format_params", thrift.STRUCT, 8) + offset += p.TableFormatParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetModificationTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "modification_time", thrift.I64, 9) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ModificationTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_type", thrift.I32, 10) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCompressType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compress_type", thrift.I32, 11) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFsName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fs_name", thrift.STRING, 12) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FsName) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFileRangeDesc) field1Length() int { + l := 0 + if p.IsSetLoadId() { + l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 1) + l += p.LoadId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field2Length() int { + l := 0 + if p.IsSetPath() { + l += bthrift.Binary.FieldBeginLength("path", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Path) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field3Length() int { + l := 0 + if p.IsSetStartOffset() { + l += bthrift.Binary.FieldBeginLength("start_offset", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.StartOffset) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field4Length() int { + l := 0 + if p.IsSetSize() { + l += bthrift.Binary.FieldBeginLength("size", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.Size) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field5Length() int { + l := 0 + if p.IsSetFileSize() { + l += bthrift.Binary.FieldBeginLength("file_size", thrift.I64, 5) + l += bthrift.Binary.I64Length(p.FileSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field6Length() int { + l := 0 + if p.IsSetColumnsFromPath() { + l += bthrift.Binary.FieldBeginLength("columns_from_path", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsFromPath)) + for _, v := range p.ColumnsFromPath { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field7Length() int { + l := 0 + if p.IsSetColumnsFromPathKeys() { + l += bthrift.Binary.FieldBeginLength("columns_from_path_keys", thrift.LIST, 7) + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsFromPathKeys)) + for _, v := range p.ColumnsFromPathKeys { + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field8Length() int { + l := 0 + if p.IsSetTableFormatParams() { + l += bthrift.Binary.FieldBeginLength("table_format_params", thrift.STRUCT, 8) + l += p.TableFormatParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field9Length() int { + l := 0 + if p.IsSetModificationTime() { + l += bthrift.Binary.FieldBeginLength("modification_time", thrift.I64, 9) + l += bthrift.Binary.I64Length(*p.ModificationTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field10Length() int { + l := 0 + if p.IsSetFileType() { + l += bthrift.Binary.FieldBeginLength("file_type", thrift.I32, 10) + l += bthrift.Binary.I32Length(int32(*p.FileType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field11Length() int { + l := 0 + if p.IsSetCompressType() { + l += bthrift.Binary.FieldBeginLength("compress_type", thrift.I32, 11) + l += bthrift.Binary.I32Length(int32(*p.CompressType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFileRangeDesc) field12Length() int { + l := 0 + if p.IsSetFsName() { + l += bthrift.Binary.FieldBeginLength("fs_name", thrift.STRING, 12) + l += bthrift.Binary.StringLengthNocopy(*p.FsName) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSplitSource) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSplitSource[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRangeParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSplitSource) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetNumOfColumnsFromFile() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_of_columns_from_file", thrift.I32, 6) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumOfColumnsFromFile) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SplitSourceId = &v + } - return offset + return offset, nil } -func (p *TFileScanRangeParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSplitSource) FastReadField2(buf []byte) (int, error) { offset := 0 - if p.IsSetRequiredSlots() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "required_slots", thrift.LIST, 7) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.RequiredSlots { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NumSplits = &v + } - return offset + return offset, nil } -func (p *TFileScanRangeParams) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { +// for compatibility +func (p *TSplitSource) FastWrite(buf []byte) int { + return 0 +} + +func (p *TSplitSource) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetHdfsParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hdfs_params", thrift.STRUCT, 8) - offset += p.HdfsParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSplitSource") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFileScanRangeParams) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetProperties() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "properties", thrift.MAP, 9) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) - var length int - for k, v := range p.Properties { - length++ - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TSplitSource) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TSplitSource") + if p != nil { + l += p.field1Length() + l += p.field2Length() } - return offset + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TFileScanRangeParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSplitSource) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetExprOfDestSlot() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "expr_of_dest_slot", thrift.MAP, 10) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) - var length int - for k, v := range p.ExprOfDestSlot { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) + if p.IsSetSplitSourceId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "split_source_id", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.SplitSourceId) - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileScanRangeParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSplitSource) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDefaultValueOfSrcSlot() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "default_value_of_src_slot", thrift.MAP, 11) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) - var length int - for k, v := range p.DefaultValueOfSrcSlot { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) + if p.IsSetNumSplits() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "num_splits", thrift.I32, 2) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NumSplits) - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileScanRangeParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDestSidToSrcSidWithoutTrans() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dest_sid_to_src_sid_without_trans", thrift.MAP, 12) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) - var length int - for k, v := range p.DestSidToSrcSidWithoutTrans { - length++ - - offset += bthrift.Binary.WriteI32(buf[offset:], k) - - offset += bthrift.Binary.WriteI32(buf[offset:], v) +func (p *TSplitSource) field1Length() int { + l := 0 + if p.IsSetSplitSourceId() { + l += bthrift.Binary.FieldBeginLength("split_source_id", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.SplitSourceId) - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TFileScanRangeParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStrictMode() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strict_mode", thrift.BOOL, 13) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.StrictMode) +func (p *TSplitSource) field2Length() int { + l := 0 + if p.IsSetNumSplits() { + l += bthrift.Binary.FieldBeginLength("num_splits", thrift.I32, 2) + l += bthrift.Binary.I32Length(*p.NumSplits) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TFileScanRangeParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBrokerAddresses() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "broker_addresses", thrift.LIST, 14) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.BrokerAddresses { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) +func (p *TFileScanRange) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRange[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRangeParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFileScanRange) FastReadField1(buf []byte) (int, error) { offset := 0 - if p.IsSetFileAttributes() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_attributes", thrift.STRUCT, 15) - offset += p.FileAttributes.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} -func (p *TFileScanRangeParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPreFilterExprs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pre_filter_exprs", thrift.STRUCT, 16) - offset += p.PreFilterExprs.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err } - return offset -} + p.Ranges = make([]*TFileRangeDesc, 0, size) + for i := 0; i < size; i++ { + _elem := NewTFileRangeDesc() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } -func (p *TFileScanRangeParams) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableFormatParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_format_params", thrift.STRUCT, 17) - offset += p.TableFormatParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + p.Ranges = append(p.Ranges, _elem) } - return offset + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil } -func (p *TFileScanRangeParams) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFileScanRange) FastReadField2(buf []byte) (int, error) { offset := 0 - if p.IsSetColumnIdxs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_idxs", thrift.LIST, 18) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) - var length int - for _, v := range p.ColumnIdxs { - length++ - offset += bthrift.Binary.WriteI32(buf[offset:], v) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + tmp := NewTFileScanRangeParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return offset + p.Params = tmp + return offset, nil } -func (p *TFileScanRangeParams) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFileScanRange) FastReadField3(buf []byte) (int, error) { offset := 0 - if p.IsSetSlotNameToSchemaPos() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "slot_name_to_schema_pos", thrift.MAP, 19) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I32, 0) - var length int - for k, v := range p.SlotNameToSchemaPos { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) + tmp := NewTSplitSource() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.SplitSource = tmp + return offset, nil +} - offset += bthrift.Binary.WriteI32(buf[offset:], v) +// for compatibility +func (p *TFileScanRange) FastWrite(buf []byte) int { + return 0 +} - } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.I32, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TFileScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFileScanRange") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFileScanRangeParams) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFileScanRange) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TFileScanRange") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TFileScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPreFilterExprsList() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pre_filter_exprs_list", thrift.LIST, 20) + if p.IsSetRanges() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ranges", thrift.LIST, 1) listBeginOffset := offset offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.PreFilterExprsList { + for _, v := range p.Ranges { length++ offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } @@ -9304,305 +13329,427 @@ func (p *TFileScanRangeParams) fastWriteField20(buf []byte, binaryWriter bthrift return offset } -func (p *TFileScanRangeParams) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFileScanRange) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetLoadId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 21) - offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 2) + offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileScanRangeParams) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFileScanRange) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTextSerdeType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "text_serde_type", thrift.I32, 22) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.TextSerdeType)) - + if p.IsSetSplitSource() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "split_source", thrift.STRUCT, 3) + offset += p.SplitSource.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileScanRangeParams) field1Length() int { +func (p *TFileScanRange) field1Length() int { l := 0 - if p.IsSetFileType() { - l += bthrift.Binary.FieldBeginLength("file_type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(*p.FileType)) - + if p.IsSetRanges() { + l += bthrift.Binary.FieldBeginLength("ranges", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Ranges)) + for _, v := range p.Ranges { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) field2Length() int { +func (p *TFileScanRange) field2Length() int { l := 0 - if p.IsSetFormatType() { - l += bthrift.Binary.FieldBeginLength("format_type", thrift.I32, 2) - l += bthrift.Binary.I32Length(int32(*p.FormatType)) - + if p.IsSetParams() { + l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 2) + l += p.Params.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) field3Length() int { +func (p *TFileScanRange) field3Length() int { l := 0 - if p.IsSetCompressType() { - l += bthrift.Binary.FieldBeginLength("compress_type", thrift.I32, 3) - l += bthrift.Binary.I32Length(int32(*p.CompressType)) - + if p.IsSetSplitSource() { + l += bthrift.Binary.FieldBeginLength("split_source", thrift.STRUCT, 3) + l += p.SplitSource.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) field4Length() int { - l := 0 - if p.IsSetSrcTupleId() { - l += bthrift.Binary.FieldBeginLength("src_tuple_id", thrift.I32, 4) - l += bthrift.Binary.I32Length(*p.SrcTupleId) +func (p *TExternalScanRange) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - l += bthrift.Binary.FieldEndLength() + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExternalScanRange[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRangeParams) field5Length() int { - l := 0 - if p.IsSetDestTupleId() { - l += bthrift.Binary.FieldBeginLength("dest_tuple_id", thrift.I32, 5) - l += bthrift.Binary.I32Length(*p.DestTupleId) +func (p *TExternalScanRange) FastReadField1(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.FieldEndLength() + tmp := NewTFileScanRange() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + p.FileScanRange = tmp + return offset, nil +} + +// for compatibility +func (p *TExternalScanRange) FastWrite(buf []byte) int { + return 0 } -func (p *TFileScanRangeParams) field6Length() int { - l := 0 - if p.IsSetNumOfColumnsFromFile() { - l += bthrift.Binary.FieldBeginLength("num_of_columns_from_file", thrift.I32, 6) - l += bthrift.Binary.I32Length(*p.NumOfColumnsFromFile) - - l += bthrift.Binary.FieldEndLength() +func (p *TExternalScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExternalScanRange") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TFileScanRangeParams) field7Length() int { +func (p *TExternalScanRange) BLength() int { l := 0 - if p.IsSetRequiredSlots() { - l += bthrift.Binary.FieldBeginLength("required_slots", thrift.LIST, 7) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.RequiredSlots)) - for _, v := range p.RequiredSlots { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TExternalScanRange") + if p != nil { + l += p.field1Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TFileScanRangeParams) field8Length() int { - l := 0 - if p.IsSetHdfsParams() { - l += bthrift.Binary.FieldBeginLength("hdfs_params", thrift.STRUCT, 8) - l += p.HdfsParams.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TExternalScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileScanRange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_scan_range", thrift.STRUCT, 1) + offset += p.FileScanRange.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TFileScanRangeParams) field9Length() int { +func (p *TExternalScanRange) field1Length() int { l := 0 - if p.IsSetProperties() { - l += bthrift.Binary.FieldBeginLength("properties", thrift.MAP, 9) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Properties)) - for k, v := range p.Properties { - - l += bthrift.Binary.StringLengthNocopy(k) - - l += bthrift.Binary.StringLengthNocopy(v) - - } - l += bthrift.Binary.MapEndLength() + if p.IsSetFileScanRange() { + l += bthrift.Binary.FieldBeginLength("file_scan_range", thrift.STRUCT, 1) + l += p.FileScanRange.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) field10Length() int { - l := 0 - if p.IsSetExprOfDestSlot() { - l += bthrift.Binary.FieldBeginLength("expr_of_dest_slot", thrift.MAP, 10) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.ExprOfDestSlot)) - for k, v := range p.ExprOfDestSlot { +func (p *TTVFNumbersScanRange) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } - l += bthrift.Binary.I32Length(k) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - l += v.BLength() + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() } - return l + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTVFNumbersScanRange[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRangeParams) field11Length() int { - l := 0 - if p.IsSetDefaultValueOfSrcSlot() { - l += bthrift.Binary.FieldBeginLength("default_value_of_src_slot", thrift.MAP, 11) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.DefaultValueOfSrcSlot)) - for k, v := range p.DefaultValueOfSrcSlot { +func (p *TTVFNumbersScanRange) FastReadField1(buf []byte) (int, error) { + offset := 0 - l += bthrift.Binary.I32Length(k) + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TotalNumbers = &v - l += v.BLength() - } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TFileScanRangeParams) field12Length() int { - l := 0 - if p.IsSetDestSidToSrcSidWithoutTrans() { - l += bthrift.Binary.FieldBeginLength("dest_sid_to_src_sid_without_trans", thrift.MAP, 12) - l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.DestSidToSrcSidWithoutTrans)) - var tmpK types.TSlotId - var tmpV types.TSlotId - l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.DestSidToSrcSidWithoutTrans) - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} +func (p *TTVFNumbersScanRange) FastReadField2(buf []byte) (int, error) { + offset := 0 -func (p *TFileScanRangeParams) field13Length() int { - l := 0 - if p.IsSetStrictMode() { - l += bthrift.Binary.FieldBeginLength("strict_mode", thrift.BOOL, 13) - l += bthrift.Binary.BoolLength(*p.StrictMode) + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UseConst = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TFileScanRangeParams) field14Length() int { - l := 0 - if p.IsSetBrokerAddresses() { - l += bthrift.Binary.FieldBeginLength("broker_addresses", thrift.LIST, 14) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.BrokerAddresses)) - for _, v := range p.BrokerAddresses { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *TTVFNumbersScanRange) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConstValue = &v + } - return l + return offset, nil } -func (p *TFileScanRangeParams) field15Length() int { - l := 0 - if p.IsSetFileAttributes() { - l += bthrift.Binary.FieldBeginLength("file_attributes", thrift.STRUCT, 15) - l += p.FileAttributes.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l +// for compatibility +func (p *TTVFNumbersScanRange) FastWrite(buf []byte) int { + return 0 } -func (p *TFileScanRangeParams) field16Length() int { - l := 0 - if p.IsSetPreFilterExprs() { - l += bthrift.Binary.FieldBeginLength("pre_filter_exprs", thrift.STRUCT, 16) - l += p.PreFilterExprs.BLength() - l += bthrift.Binary.FieldEndLength() +func (p *TTVFNumbersScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTVFNumbersScanRange") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TFileScanRangeParams) field17Length() int { +func (p *TTVFNumbersScanRange) BLength() int { l := 0 - if p.IsSetTableFormatParams() { - l += bthrift.Binary.FieldBeginLength("table_format_params", thrift.STRUCT, 17) - l += p.TableFormatParams.BLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TTVFNumbersScanRange") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TFileScanRangeParams) field18Length() int { - l := 0 - if p.IsSetColumnIdxs() { - l += bthrift.Binary.FieldBeginLength("column_idxs", thrift.LIST, 18) - l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.ColumnIdxs)) - var tmpV int32 - l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.ColumnIdxs) - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *TTVFNumbersScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTotalNumbers() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "totalNumbers", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalNumbers) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TFileScanRangeParams) field19Length() int { - l := 0 - if p.IsSetSlotNameToSchemaPos() { - l += bthrift.Binary.FieldBeginLength("slot_name_to_schema_pos", thrift.MAP, 19) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.I32, len(p.SlotNameToSchemaPos)) - for k, v := range p.SlotNameToSchemaPos { +func (p *TTVFNumbersScanRange) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUseConst() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "useConst", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.UseConst) - l += bthrift.Binary.StringLengthNocopy(k) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - l += bthrift.Binary.I32Length(v) +func (p *TTVFNumbersScanRange) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConstValue() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "constValue", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ConstValue) - } - l += bthrift.Binary.MapEndLength() - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TFileScanRangeParams) field20Length() int { +func (p *TTVFNumbersScanRange) field1Length() int { l := 0 - if p.IsSetPreFilterExprsList() { - l += bthrift.Binary.FieldBeginLength("pre_filter_exprs_list", thrift.LIST, 20) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PreFilterExprsList)) - for _, v := range p.PreFilterExprsList { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetTotalNumbers() { + l += bthrift.Binary.FieldBeginLength("totalNumbers", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.TotalNumbers) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) field21Length() int { +func (p *TTVFNumbersScanRange) field2Length() int { l := 0 - if p.IsSetLoadId() { - l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 21) - l += p.LoadId.BLength() + if p.IsSetUseConst() { + l += bthrift.Binary.FieldBeginLength("useConst", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(*p.UseConst) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRangeParams) field22Length() int { +func (p *TTVFNumbersScanRange) field3Length() int { l := 0 - if p.IsSetTextSerdeType() { - l += bthrift.Binary.FieldBeginLength("text_serde_type", thrift.I32, 22) - l += bthrift.Binary.I32Length(int32(*p.TextSerdeType)) + if p.IsSetConstValue() { + l += bthrift.Binary.FieldBeginLength("constValue", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.ConstValue) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { +func (p *TDataGenScanRange) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -9638,79 +13785,126 @@ func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField2(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 7: - if fieldTypeId == thrift.LIST { - l, err = p.FastReadField7(buf[offset:]) + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDataGenScanRange[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TDataGenScanRange) FastReadField1(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTVFNumbersScanRange() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.NumbersParams = tmp + return offset, nil +} + +// for compatibility +func (p *TDataGenScanRange) FastWrite(buf []byte) int { + return 0 +} + +func (p *TDataGenScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDataGenScanRange") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TDataGenScanRange) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TDataGenScanRange") + if p != nil { + l += p.field1Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TDataGenScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNumbersParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "numbers_params", thrift.STRUCT, 1) + offset += p.NumbersParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TDataGenScanRange) field1Length() int { + l := 0 + if p.IsSetNumbersParams() { + l += bthrift.Binary.FieldBeginLength("numbers_params", thrift.STRUCT, 1) + l += p.NumbersParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -9722,9 +13916,9 @@ func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 8: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField8(buf[offset:]) + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -9736,9 +13930,9 @@ func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 9: - if fieldTypeId == thrift.I64 { - l, err = p.FastReadField9(buf[offset:]) + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -9750,9 +13944,9 @@ func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 10: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField10(buf[offset:]) + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -9764,23 +13958,241 @@ func (p *TFileRangeDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 11: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField11(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } - case 12: + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergMetadataParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TIcebergMetadataParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := types.TIcebergQueryType(v) + p.IcebergQueryType = &tmp + + } + return offset, nil +} + +func (p *TIcebergMetadataParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Catalog = &v + + } + return offset, nil +} + +func (p *TIcebergMetadataParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Database = &v + + } + return offset, nil +} + +func (p *TIcebergMetadataParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Table = &v + + } + return offset, nil +} + +// for compatibility +func (p *TIcebergMetadataParams) FastWrite(buf []byte) int { + return 0 +} + +func (p *TIcebergMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIcebergMetadataParams") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TIcebergMetadataParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TIcebergMetadataParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TIcebergMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIcebergQueryType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_query_type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.IcebergQueryType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCatalog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergMetadataParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDatabase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "database", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Database) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergMetadataParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 4) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIcebergMetadataParams) field1Length() int { + l := 0 + if p.IsSetIcebergQueryType() { + l += bthrift.Binary.FieldBeginLength("iceberg_query_type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.IcebergQueryType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergMetadataParams) field2Length() int { + l := 0 + if p.IsSetCatalog() { + l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Catalog) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergMetadataParams) field3Length() int { + l := 0 + if p.IsSetDatabase() { + l += bthrift.Binary.FieldBeginLength("database", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Database) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TIcebergMetadataParams) field4Length() int { + l := 0 + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 4) + l += bthrift.Binary.StringLengthNocopy(*p.Table) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackendsMetadataParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: if fieldTypeId == thrift.STRING { - l, err = p.FastReadField12(buf[offset:]) + l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -9818,7 +14230,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileRangeDesc[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendsMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -9827,535 +14239,384 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileRangeDesc) FastReadField1(buf []byte) (int, error) { - offset := 0 - - tmp := types.NewTUniqueId() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.LoadId = tmp - return offset, nil -} - -func (p *TFileRangeDesc) FastReadField2(buf []byte) (int, error) { +func (p *TBackendsMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Path = &v + p.ClusterName = &v } return offset, nil } -func (p *TFileRangeDesc) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.StartOffset = &v - - } - return offset, nil +// for compatibility +func (p *TBackendsMetadataParams) FastWrite(buf []byte) int { + return 0 } -func (p *TFileRangeDesc) FastReadField4(buf []byte) (int, error) { +func (p *TBackendsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.Size = &v - + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBackendsMetadataParams") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) } - return offset, nil + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TFileRangeDesc) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.FileSize = v - +func (p *TBackendsMetadataParams) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TBackendsMetadataParams") + if p != nil { + l += p.field1Length() } - return offset, nil + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l } -func (p *TFileRangeDesc) FastReadField6(buf []byte) (int, error) { +func (p *TBackendsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 + if p.IsSetClusterName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClusterName) - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - p.ColumnsFromPath = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v + return offset +} - } +func (p *TBackendsMetadataParams) field1Length() int { + l := 0 + if p.IsSetClusterName() { + l += bthrift.Binary.FieldBeginLength("cluster_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.ClusterName) - p.ColumnsFromPath = append(p.ColumnsFromPath, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + l += bthrift.Binary.FieldEndLength() } - return offset, nil + return l } -func (p *TFileRangeDesc) FastReadField7(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) +func (p *TFrontendsMetadataParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { - return offset, err - } - p.ColumnsFromPathKeys = make([]string, 0, size) - for i := 0; i < size; i++ { - var _elem string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v - - } - - p.ColumnsFromPathKeys = append(p.ColumnsFromPathKeys, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + goto ReadStructBeginError } - return offset, nil -} -func (p *TFileRangeDesc) FastReadField8(buf []byte) (int, error) { - offset := 0 - - tmp := NewTTableFormatFileDesc() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) offset += l - } - p.TableFormatParams = tmp - return offset, nil -} - -func (p *TFileRangeDesc) FastReadField9(buf []byte) (int, error) { - offset := 0 + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { - return offset, err - } else { + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) offset += l - p.ModificationTime = &v - + if err != nil { + goto ReadFieldEndError + } } - return offset, nil -} - -func (p *TFileRangeDesc) FastReadField10(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - tmp := types.TFileType(v) - p.FileType = &tmp - + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return offset, nil -} - -func (p *TFileRangeDesc) FastReadField11(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - tmp := TFileCompressType(v) - p.CompressType = &tmp - } return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendsMetadataParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileRangeDesc) FastReadField12(buf []byte) (int, error) { +func (p *TFrontendsMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FsName = &v + p.ClusterName = &v } return offset, nil } // for compatibility -func (p *TFileRangeDesc) FastWrite(buf []byte) int { +func (p *TFrontendsMetadataParams) FastWrite(buf []byte) int { return 0 } -func (p *TFileRangeDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFrontendsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFileRangeDesc") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendsMetadataParams") if p != nil { - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField10(buf[offset:], binaryWriter) - offset += p.fastWriteField11(buf[offset:], binaryWriter) - offset += p.fastWriteField12(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFileRangeDesc) BLength() int { +func (p *TFrontendsMetadataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFileRangeDesc") + l += bthrift.Binary.StructBeginLength("TFrontendsMetadataParams") if p != nil { l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() - l += p.field10Length() - l += p.field11Length() - l += p.field12Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFileRangeDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetLoadId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 1) - offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPath() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "path", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Path) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetStartOffset() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "start_offset", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.StartOffset) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "size", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Size) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFileSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_size", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], p.FileSize) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetColumnsFromPath() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_from_path", thrift.LIST, 6) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ColumnsFromPath { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetColumnsFromPathKeys() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns_from_path_keys", thrift.LIST, 7) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) - var length int - for _, v := range p.ColumnsFromPathKeys { - length++ - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTableFormatParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_format_params", thrift.STRUCT, 8) - offset += p.TableFormatParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetModificationTime() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "modification_time", thrift.I64, 9) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.ModificationTime) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFileType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_type", thrift.I32, 10) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.FileType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCompressType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "compress_type", thrift.I32, 11) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.CompressType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFileRangeDesc) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TFrontendsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFsName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fs_name", thrift.STRING, 12) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FsName) + if p.IsSetClusterName() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_name", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClusterName) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileRangeDesc) field1Length() int { +func (p *TFrontendsMetadataParams) field1Length() int { l := 0 - if p.IsSetLoadId() { - l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 1) - l += p.LoadId.BLength() + if p.IsSetClusterName() { + l += bthrift.Binary.FieldBeginLength("cluster_name", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.ClusterName) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileRangeDesc) field2Length() int { - l := 0 - if p.IsSetPath() { - l += bthrift.Binary.FieldBeginLength("path", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Path) - - l += bthrift.Binary.FieldEndLength() +func (p *TMaterializedViewsMetadataParams) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError } - return l -} -func (p *TFileRangeDesc) field3Length() int { - l := 0 - if p.IsSetStartOffset() { - l += bthrift.Binary.FieldBeginLength("start_offset", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.StartOffset) + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } } - return l -} - -func (p *TFileRangeDesc) field4Length() int { - l := 0 - if p.IsSetSize() { - l += bthrift.Binary.FieldBeginLength("size", thrift.I64, 4) - l += bthrift.Binary.I64Length(*p.Size) - - l += bthrift.Binary.FieldEndLength() + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError } - return l + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMaterializedViewsMetadataParams[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileRangeDesc) field5Length() int { - l := 0 - if p.IsSetFileSize() { - l += bthrift.Binary.FieldBeginLength("file_size", thrift.I64, 5) - l += bthrift.Binary.I64Length(p.FileSize) +func (p *TMaterializedViewsMetadataParams) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Database = &v - l += bthrift.Binary.FieldEndLength() } - return l + return offset, nil } -func (p *TFileRangeDesc) field6Length() int { - l := 0 - if p.IsSetColumnsFromPath() { - l += bthrift.Binary.FieldBeginLength("columns_from_path", thrift.LIST, 6) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsFromPath)) - for _, v := range p.ColumnsFromPath { - l += bthrift.Binary.StringLengthNocopy(v) +func (p *TMaterializedViewsMetadataParams) FastReadField2(buf []byte) (int, error) { + offset := 0 - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() + tmp := types.NewTUserIdentity() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } - return l + p.CurrentUserIdent = tmp + return offset, nil } -func (p *TFileRangeDesc) field7Length() int { - l := 0 - if p.IsSetColumnsFromPathKeys() { - l += bthrift.Binary.FieldBeginLength("columns_from_path_keys", thrift.LIST, 7) - l += bthrift.Binary.ListBeginLength(thrift.STRING, len(p.ColumnsFromPathKeys)) - for _, v := range p.ColumnsFromPathKeys { - l += bthrift.Binary.StringLengthNocopy(v) +// for compatibility +func (p *TMaterializedViewsMetadataParams) FastWrite(buf []byte) int { + return 0 +} - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() +func (p *TMaterializedViewsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMaterializedViewsMetadataParams") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } - return l + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset } -func (p *TFileRangeDesc) field8Length() int { +func (p *TMaterializedViewsMetadataParams) BLength() int { l := 0 - if p.IsSetTableFormatParams() { - l += bthrift.Binary.FieldBeginLength("table_format_params", thrift.STRUCT, 8) - l += p.TableFormatParams.BLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.StructBeginLength("TMaterializedViewsMetadataParams") + if p != nil { + l += p.field1Length() + l += p.field2Length() } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() return l } -func (p *TFileRangeDesc) field9Length() int { - l := 0 - if p.IsSetModificationTime() { - l += bthrift.Binary.FieldBeginLength("modification_time", thrift.I64, 9) - l += bthrift.Binary.I64Length(*p.ModificationTime) +func (p *TMaterializedViewsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDatabase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "database", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Database) - l += bthrift.Binary.FieldEndLength() + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TFileRangeDesc) field10Length() int { - l := 0 - if p.IsSetFileType() { - l += bthrift.Binary.FieldBeginLength("file_type", thrift.I32, 10) - l += bthrift.Binary.I32Length(int32(*p.FileType)) - - l += bthrift.Binary.FieldEndLength() +func (p *TMaterializedViewsMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 2) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return l + return offset } -func (p *TFileRangeDesc) field11Length() int { +func (p *TMaterializedViewsMetadataParams) field1Length() int { l := 0 - if p.IsSetCompressType() { - l += bthrift.Binary.FieldBeginLength("compress_type", thrift.I32, 11) - l += bthrift.Binary.I32Length(int32(*p.CompressType)) + if p.IsSetDatabase() { + l += bthrift.Binary.FieldBeginLength("database", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Database) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileRangeDesc) field12Length() int { +func (p *TMaterializedViewsMetadataParams) field2Length() int { l := 0 - if p.IsSetFsName() { - l += bthrift.Binary.FieldBeginLength("fs_name", thrift.STRING, 12) - l += bthrift.Binary.StringLengthNocopy(*p.FsName) - + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 2) + l += p.CurrentUserIdent.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRange) FastRead(buf []byte) (int, error) { +func (p *TPartitionsMetadataParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -10378,7 +14639,7 @@ func (p *TFileScanRange) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -10392,7 +14653,7 @@ func (p *TFileScanRange) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -10405,6 +14666,20 @@ func (p *TFileScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10431,7 +14706,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFileScanRange[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionsMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -10440,128 +14715,143 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFileScanRange) FastReadField1(buf []byte) (int, error) { +func (p *TPartitionsMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err - } - p.Ranges = make([]*TFileRangeDesc, 0, size) - for i := 0; i < size; i++ { - _elem := NewTFileRangeDesc() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + } else { + offset += l + p.Catalog = &v - p.Ranges = append(p.Ranges, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TPartitionsMetadataParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Database = &v + } return offset, nil } -func (p *TFileScanRange) FastReadField2(buf []byte) (int, error) { +func (p *TPartitionsMetadataParams) FastReadField3(buf []byte) (int, error) { offset := 0 - tmp := NewTFileScanRangeParams() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Table = &v + } - p.Params = tmp return offset, nil } // for compatibility -func (p *TFileScanRange) FastWrite(buf []byte) int { +func (p *TPartitionsMetadataParams) FastWrite(buf []byte) int { return 0 } -func (p *TFileScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFileScanRange") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPartitionsMetadataParams") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFileScanRange) BLength() int { +func (p *TPartitionsMetadataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFileScanRange") + l += bthrift.Binary.StructBeginLength("TPartitionsMetadataParams") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFileScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRanges() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ranges", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Ranges { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetCatalog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileScanRange) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionsMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 2) - offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetDatabase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "database", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Database) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TFileScanRange) field1Length() int { +func (p *TPartitionsMetadataParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPartitionsMetadataParams) field1Length() int { l := 0 - if p.IsSetRanges() { - l += bthrift.Binary.FieldBeginLength("ranges", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Ranges)) - for _, v := range p.Ranges { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetCatalog() { + l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Catalog) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TFileScanRange) field2Length() int { +func (p *TPartitionsMetadataParams) field2Length() int { l := 0 - if p.IsSetParams() { - l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 2) - l += p.Params.BLength() + if p.IsSetDatabase() { + l += bthrift.Binary.FieldBeginLength("database", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Database) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPartitionsMetadataParams) field3Length() int { + l := 0 + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Table) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TExternalScanRange) FastRead(buf []byte) (int, error) { +func (p *TPartitionValuesMetadataParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -10584,7 +14874,7 @@ func (p *TExternalScanRange) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -10597,6 +14887,34 @@ func (p *TExternalScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10623,7 +14941,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExternalScanRange[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionValuesMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -10632,67 +14950,143 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TExternalScanRange) FastReadField1(buf []byte) (int, error) { +func (p *TPartitionValuesMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTFileScanRange() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Catalog = &v + + } + return offset, nil +} + +func (p *TPartitionValuesMetadataParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Database = &v + + } + return offset, nil +} + +func (p *TPartitionValuesMetadataParams) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Table = &v + } - p.FileScanRange = tmp return offset, nil } // for compatibility -func (p *TExternalScanRange) FastWrite(buf []byte) int { +func (p *TPartitionValuesMetadataParams) FastWrite(buf []byte) int { return 0 } -func (p *TExternalScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionValuesMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExternalScanRange") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPartitionValuesMetadataParams") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TExternalScanRange) BLength() int { +func (p *TPartitionValuesMetadataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TExternalScanRange") + l += bthrift.Binary.StructBeginLength("TPartitionValuesMetadataParams") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TExternalScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionValuesMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFileScanRange() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_scan_range", thrift.STRUCT, 1) - offset += p.FileScanRange.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetCatalog() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TExternalScanRange) field1Length() int { +func (p *TPartitionValuesMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDatabase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "database", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Database) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPartitionValuesMetadataParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPartitionValuesMetadataParams) field1Length() int { l := 0 - if p.IsSetFileScanRange() { - l += bthrift.Binary.FieldBeginLength("file_scan_range", thrift.STRUCT, 1) - l += p.FileScanRange.BLength() + if p.IsSetCatalog() { + l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Catalog) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTVFNumbersScanRange) FastRead(buf []byte) (int, error) { +func (p *TPartitionValuesMetadataParams) field2Length() int { + l := 0 + if p.IsSetDatabase() { + l += bthrift.Binary.FieldBeginLength("database", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.Database) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPartitionValuesMetadataParams) field3Length() int { + l := 0 + if p.IsSetTable() { + l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Table) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJobsMetadataParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -10715,7 +15109,7 @@ func (p *TTVFNumbersScanRange) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -10728,6 +15122,20 @@ func (p *TTVFNumbersScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10754,7 +15162,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTVFNumbersScanRange[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TJobsMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -10763,69 +15171,104 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TTVFNumbersScanRange) FastReadField1(buf []byte) (int, error) { +func (p *TJobsMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.TotalNumbers = &v + p.Type = &v + + } + return offset, nil +} + +func (p *TJobsMetadataParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + tmp := types.NewTUserIdentity() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } + p.CurrentUserIdent = tmp return offset, nil } // for compatibility -func (p *TTVFNumbersScanRange) FastWrite(buf []byte) int { +func (p *TJobsMetadataParams) FastWrite(buf []byte) int { return 0 } -func (p *TTVFNumbersScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TJobsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTVFNumbersScanRange") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TJobsMetadataParams") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTVFNumbersScanRange) BLength() int { +func (p *TJobsMetadataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTVFNumbersScanRange") + l += bthrift.Binary.StructBeginLength("TJobsMetadataParams") if p != nil { l += p.field1Length() + l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTVFNumbersScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TJobsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTotalNumbers() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "totalNumbers", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalNumbers) + if p.IsSetType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Type) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTVFNumbersScanRange) field1Length() int { +func (p *TJobsMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 2) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJobsMetadataParams) field1Length() int { l := 0 - if p.IsSetTotalNumbers() { - l += bthrift.Binary.FieldBeginLength("totalNumbers", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.TotalNumbers) + if p.IsSetType() { + l += bthrift.Binary.FieldBeginLength("type", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Type) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TDataGenScanRange) FastRead(buf []byte) (int, error) { +func (p *TJobsMetadataParams) field2Length() int { + l := 0 + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 2) + l += p.CurrentUserIdent.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TTasksMetadataParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -10848,7 +15291,7 @@ func (p *TDataGenScanRange) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -10861,6 +15304,20 @@ func (p *TDataGenScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10887,7 +15344,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TDataGenScanRange[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTasksMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -10896,67 +15353,104 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TDataGenScanRange) FastReadField1(buf []byte) (int, error) { +func (p *TTasksMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTTVFNumbersScanRange() + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Type = &v + + } + return offset, nil +} + +func (p *TTasksMetadataParams) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUserIdentity() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.NumbersParams = tmp + p.CurrentUserIdent = tmp return offset, nil } // for compatibility -func (p *TDataGenScanRange) FastWrite(buf []byte) int { +func (p *TTasksMetadataParams) FastWrite(buf []byte) int { return 0 } -func (p *TDataGenScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTasksMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TDataGenScanRange") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTasksMetadataParams") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TDataGenScanRange) BLength() int { +func (p *TTasksMetadataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TDataGenScanRange") + l += bthrift.Binary.StructBeginLength("TTasksMetadataParams") if p != nil { l += p.field1Length() + l += p.field2Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TDataGenScanRange) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTasksMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetNumbersParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "numbers_params", thrift.STRUCT, 1) - offset += p.NumbersParams.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.STRING, 1) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Type) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TTasksMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCurrentUserIdent() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "current_user_ident", thrift.STRUCT, 2) + offset += p.CurrentUserIdent.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - return offset + return offset +} + +func (p *TTasksMetadataParams) field1Length() int { + l := 0 + if p.IsSetType() { + l += bthrift.Binary.FieldBeginLength("type", thrift.STRING, 1) + l += bthrift.Binary.StringLengthNocopy(*p.Type) + + l += bthrift.Binary.FieldEndLength() + } + return l } -func (p *TDataGenScanRange) field1Length() int { +func (p *TTasksMetadataParams) field2Length() int { l := 0 - if p.IsSetNumbersParams() { - l += bthrift.Binary.FieldBeginLength("numbers_params", thrift.STRUCT, 1) - l += p.NumbersParams.BLength() + if p.IsSetCurrentUserIdent() { + l += bthrift.Binary.FieldBeginLength("current_user_ident", thrift.STRUCT, 2) + l += p.CurrentUserIdent.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { +func (p *TQueriesMetadataParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -10979,7 +15473,7 @@ func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -10993,7 +15487,7 @@ func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { @@ -11007,7 +15501,7 @@ func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -11021,7 +15515,7 @@ func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -11034,6 +15528,48 @@ func (p *TIcebergMetadataParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -11060,7 +15596,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIcebergMetadataParams[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueriesMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -11069,293 +15605,137 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TIcebergMetadataParams) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - tmp := types.TIcebergQueryType(v) - p.IcebergQueryType = &tmp - - } - return offset, nil -} - -func (p *TIcebergMetadataParams) FastReadField2(buf []byte) (int, error) { +func (p *TQueriesMetadataParams) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Catalog = &v + p.ClusterName = &v } return offset, nil } -func (p *TIcebergMetadataParams) FastReadField3(buf []byte) (int, error) { +func (p *TQueriesMetadataParams) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Database = &v + p.RelayToOtherFe = &v } return offset, nil } -func (p *TIcebergMetadataParams) FastReadField4(buf []byte) (int, error) { +func (p *TQueriesMetadataParams) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTMaterializedViewsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Table = &v - } + p.MaterializedViewsParams = tmp return offset, nil -} - -// for compatibility -func (p *TIcebergMetadataParams) FastWrite(buf []byte) int { - return 0 -} - -func (p *TIcebergMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIcebergMetadataParams") - if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - } - offset += bthrift.Binary.WriteFieldStop(buf[offset:]) - offset += bthrift.Binary.WriteStructEnd(buf[offset:]) - return offset -} - -func (p *TIcebergMetadataParams) BLength() int { - l := 0 - l += bthrift.Binary.StructBeginLength("TIcebergMetadataParams") - if p != nil { - l += p.field1Length() - l += p.field2Length() - l += p.field3Length() - l += p.field4Length() - } - l += bthrift.Binary.FieldStopLength() - l += bthrift.Binary.StructEndLength() - return l -} - -func (p *TIcebergMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetIcebergQueryType() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "iceberg_query_type", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.IcebergQueryType)) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIcebergMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetCatalog() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Catalog) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIcebergMetadataParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetDatabase() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "database", thrift.STRING, 3) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Database) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIcebergMetadataParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetTable() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table", thrift.STRING, 4) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Table) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TIcebergMetadataParams) field1Length() int { - l := 0 - if p.IsSetIcebergQueryType() { - l += bthrift.Binary.FieldBeginLength("iceberg_query_type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(*p.IcebergQueryType)) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TIcebergMetadataParams) field2Length() int { - l := 0 - if p.IsSetCatalog() { - l += bthrift.Binary.FieldBeginLength("catalog", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.Catalog) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TIcebergMetadataParams) field3Length() int { - l := 0 - if p.IsSetDatabase() { - l += bthrift.Binary.FieldBeginLength("database", thrift.STRING, 3) - l += bthrift.Binary.StringLengthNocopy(*p.Database) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TIcebergMetadataParams) field4Length() int { - l := 0 - if p.IsSetTable() { - l += bthrift.Binary.FieldBeginLength("table", thrift.STRING, 4) - l += bthrift.Binary.StringLengthNocopy(*p.Table) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TBackendsMetadataParams) FastRead(buf []byte) (int, error) { - var err error - var offset int - var l int - var fieldTypeId thrift.TType - var fieldId int16 - _, l, err = bthrift.Binary.ReadStructBegin(buf) - offset += l - if err != nil { - goto ReadStructBeginError - } - - for { - _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } +} - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) +func (p *TQueriesMetadataParams) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := NewTJobsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError } + p.JobsParams = tmp + return offset, nil +} + +func (p *TQueriesMetadataParams) FastReadField5(buf []byte) (int, error) { + offset := 0 + tmp := NewTTasksMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TasksParams = tmp return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendsMetadataParams[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TBackendsMetadataParams) FastReadField1(buf []byte) (int, error) { +func (p *TQueriesMetadataParams) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + tmp := NewTPartitionsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.ClusterName = &v + } + p.PartitionsParams = tmp + return offset, nil +} + +func (p *TQueriesMetadataParams) FastReadField7(buf []byte) (int, error) { + offset := 0 + tmp := NewTPartitionValuesMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l } + p.PartitionValuesParams = tmp return offset, nil } // for compatibility -func (p *TBackendsMetadataParams) FastWrite(buf []byte) int { +func (p *TQueriesMetadataParams) FastWrite(buf []byte) int { return 0 } -func (p *TBackendsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueriesMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBackendsMetadataParams") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueriesMetadataParams") if p != nil { + offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TBackendsMetadataParams) BLength() int { +func (p *TQueriesMetadataParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TBackendsMetadataParams") + l += bthrift.Binary.StructBeginLength("TQueriesMetadataParams") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TBackendsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TQueriesMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetClusterName() { offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_name", thrift.STRING, 1) @@ -11366,7 +15746,68 @@ func (p *TBackendsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthri return offset } -func (p *TBackendsMetadataParams) field1Length() int { +func (p *TQueriesMetadataParams) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRelayToOtherFe() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "relay_to_other_fe", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.RelayToOtherFe) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueriesMetadataParams) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaterializedViewsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "materialized_views_params", thrift.STRUCT, 3) + offset += p.MaterializedViewsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueriesMetadataParams) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jobs_params", thrift.STRUCT, 4) + offset += p.JobsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueriesMetadataParams) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTasksParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks_params", thrift.STRUCT, 5) + offset += p.TasksParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueriesMetadataParams) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions_params", thrift.STRUCT, 6) + offset += p.PartitionsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueriesMetadataParams) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionValuesParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_values_params", thrift.STRUCT, 7) + offset += p.PartitionValuesParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueriesMetadataParams) field1Length() int { l := 0 if p.IsSetClusterName() { l += bthrift.Binary.FieldBeginLength("cluster_name", thrift.STRING, 1) @@ -11377,7 +15818,68 @@ func (p *TBackendsMetadataParams) field1Length() int { return l } -func (p *TFrontendsMetadataParams) FastRead(buf []byte) (int, error) { +func (p *TQueriesMetadataParams) field2Length() int { + l := 0 + if p.IsSetRelayToOtherFe() { + l += bthrift.Binary.FieldBeginLength("relay_to_other_fe", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(*p.RelayToOtherFe) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueriesMetadataParams) field3Length() int { + l := 0 + if p.IsSetMaterializedViewsParams() { + l += bthrift.Binary.FieldBeginLength("materialized_views_params", thrift.STRUCT, 3) + l += p.MaterializedViewsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueriesMetadataParams) field4Length() int { + l := 0 + if p.IsSetJobsParams() { + l += bthrift.Binary.FieldBeginLength("jobs_params", thrift.STRUCT, 4) + l += p.JobsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueriesMetadataParams) field5Length() int { + l := 0 + if p.IsSetTasksParams() { + l += bthrift.Binary.FieldBeginLength("tasks_params", thrift.STRUCT, 5) + l += p.TasksParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueriesMetadataParams) field6Length() int { + l := 0 + if p.IsSetPartitionsParams() { + l += bthrift.Binary.FieldBeginLength("partitions_params", thrift.STRUCT, 6) + l += p.PartitionsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueriesMetadataParams) field7Length() int { + l := 0 + if p.IsSetPartitionValuesParams() { + l += bthrift.Binary.FieldBeginLength("partition_values_params", thrift.STRUCT, 7) + l += p.PartitionValuesParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetaCacheStatsParams) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -11398,27 +15900,10 @@ func (p *TFrontendsMetadataParams) FastRead(buf []byte) (int, error) { if fieldTypeId == thrift.STOP { break } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - l, err = p.FastReadField1(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError } l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) @@ -11438,8 +15923,6 @@ ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TFrontendsMetadataParams[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -11448,68 +15931,31 @@ ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TFrontendsMetadataParams) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.ClusterName = &v - - } - return offset, nil -} - // for compatibility -func (p *TFrontendsMetadataParams) FastWrite(buf []byte) int { +func (p *TMetaCacheStatsParams) FastWrite(buf []byte) int { return 0 } -func (p *TFrontendsMetadataParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMetaCacheStatsParams) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TFrontendsMetadataParams") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMetaCacheStatsParams") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TFrontendsMetadataParams) BLength() int { +func (p *TMetaCacheStatsParams) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TFrontendsMetadataParams") + l += bthrift.Binary.StructBeginLength("TMetaCacheStatsParams") if p != nil { - l += p.field1Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TFrontendsMetadataParams) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetClusterName() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_name", thrift.STRING, 1) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ClusterName) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TFrontendsMetadataParams) field1Length() int { - l := 0 - if p.IsSetClusterName() { - l += bthrift.Binary.FieldBeginLength("cluster_name", thrift.STRING, 1) - l += bthrift.Binary.StringLengthNocopy(*p.ClusterName) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - func (p *TMetaScanRange) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11546,9 +15992,65 @@ func (p *TMetaScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField2(buf[offset:]) + l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -11560,9 +16062,9 @@ func (p *TMetaScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 3: + case 7: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField3(buf[offset:]) + l, err = p.FastReadField7(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -11574,9 +16076,51 @@ func (p *TMetaScanRange) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 4: + case 8: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField4(buf[offset:]) + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -11677,6 +16221,97 @@ func (p *TMetaScanRange) FastReadField4(buf []byte) (int, error) { return offset, nil } +func (p *TMetaScanRange) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := NewTQueriesMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueriesParams = tmp + return offset, nil +} + +func (p *TMetaScanRange) FastReadField6(buf []byte) (int, error) { + offset := 0 + + tmp := NewTMaterializedViewsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MaterializedViewsParams = tmp + return offset, nil +} + +func (p *TMetaScanRange) FastReadField7(buf []byte) (int, error) { + offset := 0 + + tmp := NewTJobsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.JobsParams = tmp + return offset, nil +} + +func (p *TMetaScanRange) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTasksMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TasksParams = tmp + return offset, nil +} + +func (p *TMetaScanRange) FastReadField9(buf []byte) (int, error) { + offset := 0 + + tmp := NewTPartitionsMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PartitionsParams = tmp + return offset, nil +} + +func (p *TMetaScanRange) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := NewTMetaCacheStatsParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.MetaCacheStatsParams = tmp + return offset, nil +} + +func (p *TMetaScanRange) FastReadField11(buf []byte) (int, error) { + offset := 0 + + tmp := NewTPartitionValuesMetadataParams() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.PartitionValuesParams = tmp + return offset, nil +} + // for compatibility func (p *TMetaScanRange) FastWrite(buf []byte) int { return 0 @@ -11690,6 +16325,13 @@ func (p *TMetaScanRange) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -11704,6 +16346,13 @@ func (p *TMetaScanRange) BLength() int { l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -11731,62 +16380,202 @@ func (p *TMetaScanRange) fastWriteField2(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TMetaScanRange) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBackendsParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backends_params", thrift.STRUCT, 3) - offset += p.BackendsParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TMetaScanRange) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backends_params", thrift.STRUCT, 3) + offset += p.BackendsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFrontendsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "frontends_params", thrift.STRUCT, 4) + offset += p.FrontendsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueriesParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "queries_params", thrift.STRUCT, 5) + offset += p.QueriesParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaterializedViewsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "materialized_views_params", thrift.STRUCT, 6) + offset += p.MaterializedViewsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetJobsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "jobs_params", thrift.STRUCT, 7) + offset += p.JobsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTasksParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks_params", thrift.STRUCT, 8) + offset += p.TasksParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions_params", thrift.STRUCT, 9) + offset += p.PartitionsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetaCacheStatsParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta_cache_stats_params", thrift.STRUCT, 10) + offset += p.MetaCacheStatsParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionValuesParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_values_params", thrift.STRUCT, 11) + offset += p.PartitionValuesParams.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMetaScanRange) field1Length() int { + l := 0 + if p.IsSetMetadataType() { + l += bthrift.Binary.FieldBeginLength("metadata_type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(*p.MetadataType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetaScanRange) field2Length() int { + l := 0 + if p.IsSetIcebergParams() { + l += bthrift.Binary.FieldBeginLength("iceberg_params", thrift.STRUCT, 2) + l += p.IcebergParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetaScanRange) field3Length() int { + l := 0 + if p.IsSetBackendsParams() { + l += bthrift.Binary.FieldBeginLength("backends_params", thrift.STRUCT, 3) + l += p.BackendsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetaScanRange) field4Length() int { + l := 0 + if p.IsSetFrontendsParams() { + l += bthrift.Binary.FieldBeginLength("frontends_params", thrift.STRUCT, 4) + l += p.FrontendsParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetaScanRange) field5Length() int { + l := 0 + if p.IsSetQueriesParams() { + l += bthrift.Binary.FieldBeginLength("queries_params", thrift.STRUCT, 5) + l += p.QueriesParams.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMetaScanRange) field6Length() int { + l := 0 + if p.IsSetMaterializedViewsParams() { + l += bthrift.Binary.FieldBeginLength("materialized_views_params", thrift.STRUCT, 6) + l += p.MaterializedViewsParams.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TMetaScanRange) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetFrontendsParams() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "frontends_params", thrift.STRUCT, 4) - offset += p.FrontendsParams.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TMetaScanRange) field7Length() int { + l := 0 + if p.IsSetJobsParams() { + l += bthrift.Binary.FieldBeginLength("jobs_params", thrift.STRUCT, 7) + l += p.JobsParams.BLength() + l += bthrift.Binary.FieldEndLength() } - return offset + return l } -func (p *TMetaScanRange) field1Length() int { +func (p *TMetaScanRange) field8Length() int { l := 0 - if p.IsSetMetadataType() { - l += bthrift.Binary.FieldBeginLength("metadata_type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(*p.MetadataType)) - + if p.IsSetTasksParams() { + l += bthrift.Binary.FieldBeginLength("tasks_params", thrift.STRUCT, 8) + l += p.TasksParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMetaScanRange) field2Length() int { +func (p *TMetaScanRange) field9Length() int { l := 0 - if p.IsSetIcebergParams() { - l += bthrift.Binary.FieldBeginLength("iceberg_params", thrift.STRUCT, 2) - l += p.IcebergParams.BLength() + if p.IsSetPartitionsParams() { + l += bthrift.Binary.FieldBeginLength("partitions_params", thrift.STRUCT, 9) + l += p.PartitionsParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMetaScanRange) field3Length() int { +func (p *TMetaScanRange) field10Length() int { l := 0 - if p.IsSetBackendsParams() { - l += bthrift.Binary.FieldBeginLength("backends_params", thrift.STRUCT, 3) - l += p.BackendsParams.BLength() + if p.IsSetMetaCacheStatsParams() { + l += bthrift.Binary.FieldBeginLength("meta_cache_stats_params", thrift.STRUCT, 10) + l += p.MetaCacheStatsParams.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TMetaScanRange) field4Length() int { +func (p *TMetaScanRange) field11Length() int { l := 0 - if p.IsSetFrontendsParams() { - l += bthrift.Binary.FieldBeginLength("frontends_params", thrift.STRUCT, 4) - l += p.FrontendsParams.BLength() + if p.IsSetPartitionValuesParams() { + l += bthrift.Binary.FieldBeginLength("partition_values_params", thrift.STRUCT, 11) + l += p.PartitionValuesParams.BLength() l += bthrift.Binary.FieldEndLength() } return l @@ -15585,6 +20374,20 @@ func (p *TSchemaScanNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 15: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -15803,6 +20606,33 @@ func (p *TSchemaScanNode) FastReadField14(buf []byte) (int, error) { return offset, nil } +func (p *TSchemaScanNode) FastReadField15(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FeAddrList = make([]*types.TNetworkAddress, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTNetworkAddress() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FeAddrList = append(p.FeAddrList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TSchemaScanNode) FastWrite(buf []byte) int { return 0 @@ -15825,6 +20655,7 @@ func (p *TSchemaScanNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -15848,6 +20679,7 @@ func (p *TSchemaScanNode) BLength() int { l += p.field11Length() l += p.field12Length() l += p.field14Length() + l += p.field15Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -15992,6 +20824,24 @@ func (p *TSchemaScanNode) fastWriteField14(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TSchemaScanNode) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFeAddrList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_addr_list", thrift.LIST, 15) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.FeAddrList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TSchemaScanNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) @@ -16130,6 +20980,20 @@ func (p *TSchemaScanNode) field14Length() int { return l } +func (p *TSchemaScanNode) field15Length() int { + l := 0 + if p.IsSetFeAddrList() { + l += bthrift.Binary.FieldBeginLength("fe_addr_list", thrift.LIST, 15) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FeAddrList)) + for _, v := range p.FeAddrList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMetaScanNode) FastRead(buf []byte) (int, error) { var err error var offset int @@ -17349,6 +22213,20 @@ func (p *TOlapScanNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 18: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -17728,6 +22606,36 @@ func (p *TOlapScanNode) FastReadField17(buf []byte) (int, error) { return offset, nil } +func (p *TOlapScanNode) FastReadField18(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TOlapScanNode) FastWrite(buf []byte) int { return 0 @@ -17754,6 +22662,7 @@ func (p *TOlapScanNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -17781,6 +22690,7 @@ func (p *TOlapScanNode) BLength() int { l += p.field15Length() l += p.field16Length() l += p.field17Length() + l += p.field18Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -18024,6 +22934,25 @@ func (p *TOlapScanNode) fastWriteField17(buf []byte, binaryWriter bthrift.Binary return offset } +func (p *TOlapScanNode) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 18) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TOlapScanNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) @@ -18233,6 +23162,19 @@ func (p *TOlapScanNode) field17Length() int { return l } +func (p *TOlapScanNode) field18Length() int { + l := 0 + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 18) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TEqJoinCondition) FastRead(buf []byte) (int, error) { var err error var offset int @@ -18653,6 +23595,48 @@ func (p *THashJoinNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -18896,24 +23880,79 @@ func (p *THashJoinNode) FastReadField9(buf []byte) (int, error) { func (p *THashJoinNode) FastReadField10(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsBroadcastJoin = &v + + } + return offset, nil +} + +func (p *THashJoinNode) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsMark = &v + + } + return offset, nil +} + +func (p *THashJoinNode) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TJoinDistributionType(v) + p.DistType = &tmp + + } + return offset, nil +} + +func (p *THashJoinNode) FastReadField13(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.MarkJoinConjuncts = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.MarkJoinConjuncts = append(p.MarkJoinConjuncts, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.IsBroadcastJoin = &v - } return offset, nil } -func (p *THashJoinNode) FastReadField11(buf []byte) (int, error) { +func (p *THashJoinNode) FastReadField14(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.IsMark = &v + p.UseSpecificProjections = &v } return offset, nil @@ -18932,6 +23971,7 @@ func (p *THashJoinNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -18939,6 +23979,8 @@ func (p *THashJoinNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -18960,6 +24002,9 @@ func (p *THashJoinNode) BLength() int { l += p.field9Length() l += p.field10Length() l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -19119,6 +24164,46 @@ func (p *THashJoinNode) fastWriteField11(buf []byte, binaryWriter bthrift.Binary return offset } +func (p *THashJoinNode) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDistType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dist_type", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.DistType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THashJoinNode) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMarkJoinConjuncts() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mark_join_conjuncts", thrift.LIST, 13) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.MarkJoinConjuncts { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *THashJoinNode) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUseSpecificProjections() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "use_specific_projections", thrift.BOOL, 14) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.UseSpecificProjections) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *THashJoinNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("join_op", thrift.I32, 1) @@ -19248,6 +24333,42 @@ func (p *THashJoinNode) field11Length() int { return l } +func (p *THashJoinNode) field12Length() int { + l := 0 + if p.IsSetDistType() { + l += bthrift.Binary.FieldBeginLength("dist_type", thrift.I32, 12) + l += bthrift.Binary.I32Length(int32(*p.DistType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THashJoinNode) field13Length() int { + l := 0 + if p.IsSetMarkJoinConjuncts() { + l += bthrift.Binary.FieldBeginLength("mark_join_conjuncts", thrift.LIST, 13) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.MarkJoinConjuncts)) + for _, v := range p.MarkJoinConjuncts { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *THashJoinNode) field14Length() int { + l := 0 + if p.IsSetUseSpecificProjections() { + l += bthrift.Binary.FieldBeginLength("use_specific_projections", thrift.BOOL, 14) + l += bthrift.Binary.BoolLength(*p.UseSpecificProjections) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TNestedLoopJoinNode) FastRead(buf []byte) (int, error) { var err error var offset int @@ -19384,6 +24505,34 @@ func (p *TNestedLoopJoinNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 9: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -19575,6 +24724,46 @@ func (p *TNestedLoopJoinNode) FastReadField8(buf []byte) (int, error) { return offset, nil } +func (p *TNestedLoopJoinNode) FastReadField9(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.MarkJoinConjuncts = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.MarkJoinConjuncts = append(p.MarkJoinConjuncts, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TNestedLoopJoinNode) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UseSpecificProjections = &v + + } + return offset, nil +} + // for compatibility func (p *TNestedLoopJoinNode) FastWrite(buf []byte) int { return 0 @@ -19587,11 +24776,13 @@ func (p *TNestedLoopJoinNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.B offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -19610,6 +24801,8 @@ func (p *TNestedLoopJoinNode) BLength() int { l += p.field6Length() l += p.field7Length() l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -19723,6 +24916,35 @@ func (p *TNestedLoopJoinNode) fastWriteField8(buf []byte, binaryWriter bthrift.B return offset } +func (p *TNestedLoopJoinNode) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMarkJoinConjuncts() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mark_join_conjuncts", thrift.LIST, 9) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.MarkJoinConjuncts { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TNestedLoopJoinNode) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUseSpecificProjections() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "use_specific_projections", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.UseSpecificProjections) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TNestedLoopJoinNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("join_op", thrift.I32, 1) @@ -19816,6 +25038,31 @@ func (p *TNestedLoopJoinNode) field8Length() int { return l } +func (p *TNestedLoopJoinNode) field9Length() int { + l := 0 + if p.IsSetMarkJoinConjuncts() { + l += bthrift.Binary.FieldBeginLength("mark_join_conjuncts", thrift.LIST, 9) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.MarkJoinConjuncts)) + for _, v := range p.MarkJoinConjuncts { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TNestedLoopJoinNode) field10Length() int { + l := 0 + if p.IsSetUseSpecificProjections() { + l += bthrift.Binary.FieldBeginLength("use_specific_projections", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.UseSpecificProjections) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMergeJoinNode) FastRead(buf []byte) (int, error) { var err error var offset int @@ -20194,6 +25441,34 @@ func (p *TAggregationNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 9: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -20399,6 +25674,32 @@ func (p *TAggregationNode) FastReadField8(buf []byte) (int, error) { return offset, nil } +func (p *TAggregationNode) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsColocate = &v + + } + return offset, nil +} + +func (p *TAggregationNode) FastReadField10(buf []byte) (int, error) { + offset := 0 + + tmp := NewTSortInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.AggSortInfoByGroupKey = tmp + return offset, nil +} + // for compatibility func (p *TAggregationNode) FastWrite(buf []byte) int { return 0 @@ -20413,9 +25714,11 @@ func (p *TAggregationNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -20434,6 +25737,8 @@ func (p *TAggregationNode) BLength() int { l += p.field6Length() l += p.field7Length() l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -20541,6 +25846,27 @@ func (p *TAggregationNode) fastWriteField8(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TAggregationNode) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsColocate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_colocate", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsColocate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAggregationNode) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAggSortInfoByGroupKey() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "agg_sort_info_by_group_key", thrift.STRUCT, 10) + offset += p.AggSortInfoByGroupKey.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TAggregationNode) field1Length() int { l := 0 if p.IsSetGroupingExprs() { @@ -20619,12 +25945,33 @@ func (p *TAggregationNode) field7Length() int { return l } -func (p *TAggregationNode) field8Length() int { +func (p *TAggregationNode) field8Length() int { + l := 0 + if p.IsSetIsFirstPhase() { + l += bthrift.Binary.FieldBeginLength("is_first_phase", thrift.BOOL, 8) + l += bthrift.Binary.BoolLength(*p.IsFirstPhase) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAggregationNode) field9Length() int { + l := 0 + if p.IsSetIsColocate() { + l += bthrift.Binary.FieldBeginLength("is_colocate", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(*p.IsColocate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAggregationNode) field10Length() int { l := 0 - if p.IsSetIsFirstPhase() { - l += bthrift.Binary.FieldBeginLength("is_first_phase", thrift.BOOL, 8) - l += bthrift.Binary.BoolLength(*p.IsFirstPhase) - + if p.IsSetAggSortInfoByGroupKey() { + l += bthrift.Binary.FieldBeginLength("agg_sort_info_by_group_key", thrift.STRUCT, 10) + l += p.AggSortInfoByGroupKey.BLength() l += bthrift.Binary.FieldEndLength() } return l @@ -21240,35 +26587,274 @@ func (p *TRepeatNode) field4Length() int { return l } -func (p *TRepeatNode) field5Length() int { +func (p *TRepeatNode) field5Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("all_slot_ids", thrift.SET, 5) + l += bthrift.Binary.SetBeginLength(thrift.I32, len(p.AllSlotIds)) + + for i := 0; i < len(p.AllSlotIds); i++ { + for j := i + 1; j < len(p.AllSlotIds); j++ { + if func(tgt, src types.TSlotId) bool { + if tgt != src { + return false + } + return true + }(p.AllSlotIds[i], p.AllSlotIds[j]) { + panic(fmt.Errorf("%T error writing set field: slice is not unique", p.AllSlotIds[i])) + } + } + } + var tmpV types.TSlotId + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.AllSlotIds) + l += bthrift.Binary.SetEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TRepeatNode) field6Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("exprs", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Exprs)) + for _, v := range p.Exprs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TPreAggregationNode) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetGroupExprs bool = false + var issetAggregateExprs bool = false + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetGroupExprs = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetAggregateExprs = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + if !issetGroupExprs { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetAggregateExprs { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPreAggregationNode[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPreAggregationNode[fieldId])) +} + +func (p *TPreAggregationNode) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.GroupExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.GroupExprs = append(p.GroupExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPreAggregationNode) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.AggregateExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.AggregateExprs = append(p.AggregateExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +// for compatibility +func (p *TPreAggregationNode) FastWrite(buf []byte) int { + return 0 +} + +func (p *TPreAggregationNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPreAggregationNode") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TPreAggregationNode) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TPreAggregationNode") + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TPreAggregationNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_exprs", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.GroupExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPreAggregationNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "aggregate_exprs", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.AggregateExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TPreAggregationNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("all_slot_ids", thrift.SET, 5) - l += bthrift.Binary.SetBeginLength(thrift.I32, len(p.AllSlotIds)) - - for i := 0; i < len(p.AllSlotIds); i++ { - for j := i + 1; j < len(p.AllSlotIds); j++ { - if func(tgt, src types.TSlotId) bool { - if tgt != src { - return false - } - return true - }(p.AllSlotIds[i], p.AllSlotIds[j]) { - panic(fmt.Errorf("%T error writing set field: slice is not unique", p.AllSlotIds[i])) - } - } + l += bthrift.Binary.FieldBeginLength("group_exprs", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.GroupExprs)) + for _, v := range p.GroupExprs { + l += v.BLength() } - var tmpV types.TSlotId - l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.AllSlotIds) - l += bthrift.Binary.SetEndLength() + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TRepeatNode) field6Length() int { +func (p *TPreAggregationNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("exprs", thrift.LIST, 6) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Exprs)) - for _, v := range p.Exprs { + l += bthrift.Binary.FieldBeginLength("aggregate_exprs", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.AggregateExprs)) + for _, v := range p.AggregateExprs { l += v.BLength() } l += bthrift.Binary.ListEndLength() @@ -21276,14 +26862,14 @@ func (p *TRepeatNode) field6Length() int { return l } -func (p *TPreAggregationNode) FastRead(buf []byte) (int, error) { +func (p *TSortNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetGroupExprs bool = false - var issetAggregateExprs bool = false + var issetSortInfo bool = false + var issetUseTopN bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -21301,13 +26887,13 @@ func (p *TPreAggregationNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetGroupExprs = true + issetSortInfo = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21316,13 +26902,111 @@ func (p *TPreAggregationNode) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetAggregateExprs = true + issetUseTopN = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21350,12 +27034,12 @@ func (p *TPreAggregationNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetGroupExprs { + if !issetSortInfo { fieldId = 1 goto RequiredFieldNotSetError } - if !issetAggregateExprs { + if !issetUseTopN { fieldId = 2 goto RequiredFieldNotSetError } @@ -21365,7 +27049,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPreAggregationNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -21373,156 +27057,366 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPreAggregationNode[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSortNode[fieldId])) } -func (p *TPreAggregationNode) FastReadField1(buf []byte) (int, error) { +func (p *TSortNode) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + tmp := NewTSortInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err + } else { + offset += l } - p.GroupExprs = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + p.SortInfo = tmp + return offset, nil +} + +func (p *TSortNode) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.UseTopN = v - p.GroupExprs = append(p.GroupExprs, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TSortNode) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Offset = &v + + } + return offset, nil +} + +func (p *TSortNode) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l + p.IsDefaultLimit = &v + } return offset, nil } -func (p *TPreAggregationNode) FastReadField2(buf []byte) (int, error) { +func (p *TSortNode) FastReadField7(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err + } else { + offset += l + p.UseTopnOpt = &v + } - p.AggregateExprs = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + return offset, nil +} + +func (p *TSortNode) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MergeByExchange = &v + + } + return offset, nil +} + +func (p *TSortNode) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsAnalyticSort = &v + + } + return offset, nil +} + +func (p *TSortNode) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsColocate = &v - p.AggregateExprs = append(p.AggregateExprs, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TSortNode) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l + + tmp := TSortAlgorithm(v) + p.Algorithm = &tmp + } return offset, nil } // for compatibility -func (p *TPreAggregationNode) FastWrite(buf []byte) int { +func (p *TSortNode) FastWrite(buf []byte) int { return 0 } -func (p *TPreAggregationNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSortNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPreAggregationNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSortNode") if p != nil { - offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPreAggregationNode) BLength() int { +func (p *TSortNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPreAggregationNode") + l += bthrift.Binary.StructBeginLength("TSortNode") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPreAggregationNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSortNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "group_exprs", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.GroupExprs { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_info", thrift.STRUCT, 1) + offset += p.SortInfo.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TPreAggregationNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TSortNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "aggregate_exprs", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.AggregateExprs { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "use_top_n", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], p.UseTopN) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TPreAggregationNode) field1Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("group_exprs", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.GroupExprs)) - for _, v := range p.GroupExprs { - l += v.BLength() +func (p *TSortNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOffset() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "offset", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Offset) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - l += bthrift.Binary.ListEndLength() + return offset +} + +func (p *TSortNode) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsDefaultLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_default_limit", thrift.BOOL, 6) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsDefaultLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortNode) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUseTopnOpt() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "use_topn_opt", thrift.BOOL, 7) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.UseTopnOpt) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortNode) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMergeByExchange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "merge_by_exchange", thrift.BOOL, 8) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.MergeByExchange) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortNode) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsAnalyticSort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_analytic_sort", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsAnalyticSort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortNode) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsColocate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_colocate", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsColocate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortNode) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetAlgorithm() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "algorithm", thrift.I32, 11) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Algorithm)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TSortNode) field1Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("sort_info", thrift.STRUCT, 1) + l += p.SortInfo.BLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TPreAggregationNode) field2Length() int { +func (p *TSortNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("aggregate_exprs", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.AggregateExprs)) - for _, v := range p.AggregateExprs { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldBeginLength("use_top_n", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(p.UseTopN) + l += bthrift.Binary.FieldEndLength() return l } -func (p *TSortNode) FastRead(buf []byte) (int, error) { +func (p *TSortNode) field3Length() int { + l := 0 + if p.IsSetOffset() { + l += bthrift.Binary.FieldBeginLength("offset", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.Offset) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortNode) field6Length() int { + l := 0 + if p.IsSetIsDefaultLimit() { + l += bthrift.Binary.FieldBeginLength("is_default_limit", thrift.BOOL, 6) + l += bthrift.Binary.BoolLength(*p.IsDefaultLimit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortNode) field7Length() int { + l := 0 + if p.IsSetUseTopnOpt() { + l += bthrift.Binary.FieldBeginLength("use_topn_opt", thrift.BOOL, 7) + l += bthrift.Binary.BoolLength(*p.UseTopnOpt) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortNode) field8Length() int { + l := 0 + if p.IsSetMergeByExchange() { + l += bthrift.Binary.FieldBeginLength("merge_by_exchange", thrift.BOOL, 8) + l += bthrift.Binary.BoolLength(*p.MergeByExchange) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortNode) field9Length() int { + l := 0 + if p.IsSetIsAnalyticSort() { + l += bthrift.Binary.FieldBeginLength("is_analytic_sort", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(*p.IsAnalyticSort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortNode) field10Length() int { + l := 0 + if p.IsSetIsColocate() { + l += bthrift.Binary.FieldBeginLength("is_colocate", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.IsColocate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TSortNode) field11Length() int { + l := 0 + if p.IsSetAlgorithm() { + l += bthrift.Binary.FieldBeginLength("algorithm", thrift.I32, 11) + l += bthrift.Binary.I32Length(int32(*p.Algorithm)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPartitionSortNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetSortInfo bool = false - var issetUseTopN bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -21540,13 +27434,12 @@ func (p *TSortNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetSortInfo = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21555,13 +27448,12 @@ func (p *TSortNode) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.BOOL { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetUseTopN = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21570,7 +27462,7 @@ func (p *TSortNode) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -21583,9 +27475,9 @@ func (p *TSortNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 6: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField6(buf[offset:]) + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -21597,9 +27489,23 @@ func (p *TSortNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 7: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField7(buf[offset:]) + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -21631,33 +27537,49 @@ func (p *TSortNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetSortInfo { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetUseTopN { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionSortNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSortNode[fieldId])) } -func (p *TSortNode) FastReadField1(buf []byte) (int, error) { +func (p *TPartitionSortNode) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PartitionExprs = append(p.PartitionExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPartitionSortNode) FastReadField2(buf []byte) (int, error) { offset := 0 tmp := NewTSortInfo() @@ -21670,200 +27592,246 @@ func (p *TSortNode) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TSortNode) FastReadField2(buf []byte) (int, error) { +func (p *TPartitionSortNode) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.UseTopN = v + p.HasGlobalLimit = &v } return offset, nil } -func (p *TSortNode) FastReadField3(buf []byte) (int, error) { +func (p *TPartitionSortNode) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Offset = &v + + tmp := TopNAlgorithm(v) + p.TopNAlgorithm = &tmp } return offset, nil } -func (p *TSortNode) FastReadField6(buf []byte) (int, error) { +func (p *TPartitionSortNode) FastReadField5(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.IsDefaultLimit = &v + p.PartitionInnerLimit = &v } return offset, nil } -func (p *TSortNode) FastReadField7(buf []byte) (int, error) { +func (p *TPartitionSortNode) FastReadField6(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.UseTopnOpt = &v + + tmp := TPartTopNPhase(v) + p.PtopnPhase = &tmp } return offset, nil } // for compatibility -func (p *TSortNode) FastWrite(buf []byte) int { +func (p *TPartitionSortNode) FastWrite(buf []byte) int { return 0 } -func (p *TSortNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionSortNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSortNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPartitionSortNode") if p != nil { - offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TSortNode) BLength() int { +func (p *TPartitionSortNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TSortNode") + l += bthrift.Binary.StructBeginLength("TPartitionSortNode") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() + l += p.field5Length() l += p.field6Length() - l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TSortNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionSortNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_info", thrift.STRUCT, 1) - offset += p.SortInfo.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + if p.IsSetPartitionExprs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_exprs", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.PartitionExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TSortNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionSortNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "use_top_n", thrift.BOOL, 2) - offset += bthrift.Binary.WriteBool(buf[offset:], p.UseTopN) + if p.IsSetSortInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_info", thrift.STRUCT, 2) + offset += p.SortInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) +func (p *TPartitionSortNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHasGlobalLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "has_global_limit", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.HasGlobalLimit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TSortNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionSortNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetOffset() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "offset", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Offset) + if p.IsSetTopNAlgorithm() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "top_n_algorithm", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.TopNAlgorithm)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TSortNode) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionSortNode) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetIsDefaultLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_default_limit", thrift.BOOL, 6) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsDefaultLimit) + if p.IsSetPartitionInnerLimit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_inner_limit", thrift.I64, 5) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionInnerLimit) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TSortNode) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TPartitionSortNode) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetUseTopnOpt() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "use_topn_opt", thrift.BOOL, 7) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.UseTopnOpt) + if p.IsSetPtopnPhase() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ptopn_phase", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.PtopnPhase)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TSortNode) field1Length() int { +func (p *TPartitionSortNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("sort_info", thrift.STRUCT, 1) - l += p.SortInfo.BLength() - l += bthrift.Binary.FieldEndLength() + if p.IsSetPartitionExprs() { + l += bthrift.Binary.FieldBeginLength("partition_exprs", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionExprs)) + for _, v := range p.PartitionExprs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TSortNode) field2Length() int { +func (p *TPartitionSortNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("use_top_n", thrift.BOOL, 2) - l += bthrift.Binary.BoolLength(p.UseTopN) + if p.IsSetSortInfo() { + l += bthrift.Binary.FieldBeginLength("sort_info", thrift.STRUCT, 2) + l += p.SortInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} - l += bthrift.Binary.FieldEndLength() +func (p *TPartitionSortNode) field3Length() int { + l := 0 + if p.IsSetHasGlobalLimit() { + l += bthrift.Binary.FieldBeginLength("has_global_limit", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(*p.HasGlobalLimit) + + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TSortNode) field3Length() int { +func (p *TPartitionSortNode) field4Length() int { l := 0 - if p.IsSetOffset() { - l += bthrift.Binary.FieldBeginLength("offset", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.Offset) + if p.IsSetTopNAlgorithm() { + l += bthrift.Binary.FieldBeginLength("top_n_algorithm", thrift.I32, 4) + l += bthrift.Binary.I32Length(int32(*p.TopNAlgorithm)) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TSortNode) field6Length() int { +func (p *TPartitionSortNode) field5Length() int { l := 0 - if p.IsSetIsDefaultLimit() { - l += bthrift.Binary.FieldBeginLength("is_default_limit", thrift.BOOL, 6) - l += bthrift.Binary.BoolLength(*p.IsDefaultLimit) + if p.IsSetPartitionInnerLimit() { + l += bthrift.Binary.FieldBeginLength("partition_inner_limit", thrift.I64, 5) + l += bthrift.Binary.I64Length(*p.PartitionInnerLimit) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TSortNode) field7Length() int { +func (p *TPartitionSortNode) field6Length() int { l := 0 - if p.IsSetUseTopnOpt() { - l += bthrift.Binary.FieldBeginLength("use_topn_opt", thrift.BOOL, 7) - l += bthrift.Binary.BoolLength(*p.UseTopnOpt) + if p.IsSetPtopnPhase() { + l += bthrift.Binary.FieldBeginLength("ptopn_phase", thrift.I32, 6) + l += bthrift.Binary.I32Length(int32(*p.PtopnPhase)) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPartitionSortNode) FastRead(buf []byte) (int, error) { +func (p *TAnalyticWindowBoundary) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetType bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -21881,12 +27849,13 @@ func (p *TPartitionSortNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetType = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -21909,36 +27878,8 @@ func (p *TPartitionSortNode) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.BOOL { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: if fieldTypeId == thrift.I64 { - l, err = p.FastReadField5(buf[offset:]) + l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -21970,256 +27911,159 @@ func (p *TPartitionSortNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetType { + fieldId = 1 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionSortNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAnalyticWindowBoundary[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TAnalyticWindowBoundary[fieldId])) } -func (p *TPartitionSortNode) FastReadField1(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PartitionExprs = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.PartitionExprs = append(p.PartitionExprs, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TPartitionSortNode) FastReadField2(buf []byte) (int, error) { +func (p *TAnalyticWindowBoundary) FastReadField1(buf []byte) (int, error) { offset := 0 - tmp := NewTSortInfo() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - } - p.SortInfo = tmp - return offset, nil -} - -func (p *TPartitionSortNode) FastReadField3(buf []byte) (int, error) { - offset := 0 - if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.HasGlobalLimit = &v + p.Type = TAnalyticWindowBoundaryType(v) } return offset, nil } -func (p *TPartitionSortNode) FastReadField4(buf []byte) (int, error) { +func (p *TAnalyticWindowBoundary) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := exprs.NewTExpr() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TopNAlgorithm(v) - p.TopNAlgorithm = &tmp - } + p.RangeOffsetPredicate = tmp return offset, nil } -func (p *TPartitionSortNode) FastReadField5(buf []byte) (int, error) { +func (p *TAnalyticWindowBoundary) FastReadField3(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - p.PartitionInnerLimit = &v + p.RowsOffsetValue = &v } return offset, nil } // for compatibility -func (p *TPartitionSortNode) FastWrite(buf []byte) int { +func (p *TAnalyticWindowBoundary) FastWrite(buf []byte) int { return 0 } -func (p *TPartitionSortNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindowBoundary) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPartitionSortNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAnalyticWindowBoundary") if p != nil { offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TPartitionSortNode) BLength() int { +func (p *TAnalyticWindowBoundary) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TPartitionSortNode") + l += bthrift.Binary.StructBeginLength("TAnalyticWindowBoundary") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TPartitionSortNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPartitionExprs() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_exprs", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.PartitionExprs { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPartitionSortNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetSortInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_info", thrift.STRUCT, 2) - offset += p.SortInfo.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TPartitionSortNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindowBoundary) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetHasGlobalLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "has_global_limit", thrift.BOOL, 3) - offset += bthrift.Binary.WriteBool(buf[offset:], *p.HasGlobalLimit) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Type)) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TPartitionSortNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindowBoundary) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetTopNAlgorithm() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "top_n_algorithm", thrift.I32, 4) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.TopNAlgorithm)) - + if p.IsSetRangeOffsetPredicate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "range_offset_predicate", thrift.STRUCT, 2) + offset += p.RangeOffsetPredicate.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPartitionSortNode) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindowBoundary) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetPartitionInnerLimit() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_inner_limit", thrift.I64, 5) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionInnerLimit) + if p.IsSetRowsOffsetValue() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rows_offset_value", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowsOffsetValue) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TPartitionSortNode) field1Length() int { - l := 0 - if p.IsSetPartitionExprs() { - l += bthrift.Binary.FieldBeginLength("partition_exprs", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionExprs)) - for _, v := range p.PartitionExprs { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPartitionSortNode) field2Length() int { - l := 0 - if p.IsSetSortInfo() { - l += bthrift.Binary.FieldBeginLength("sort_info", thrift.STRUCT, 2) - l += p.SortInfo.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TPartitionSortNode) field3Length() int { +func (p *TAnalyticWindowBoundary) field1Length() int { l := 0 - if p.IsSetHasGlobalLimit() { - l += bthrift.Binary.FieldBeginLength("has_global_limit", thrift.BOOL, 3) - l += bthrift.Binary.BoolLength(*p.HasGlobalLimit) + l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) + l += bthrift.Binary.I32Length(int32(p.Type)) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TPartitionSortNode) field4Length() int { +func (p *TAnalyticWindowBoundary) field2Length() int { l := 0 - if p.IsSetTopNAlgorithm() { - l += bthrift.Binary.FieldBeginLength("top_n_algorithm", thrift.I32, 4) - l += bthrift.Binary.I32Length(int32(*p.TopNAlgorithm)) - + if p.IsSetRangeOffsetPredicate() { + l += bthrift.Binary.FieldBeginLength("range_offset_predicate", thrift.STRUCT, 2) + l += p.RangeOffsetPredicate.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TPartitionSortNode) field5Length() int { +func (p *TAnalyticWindowBoundary) field3Length() int { l := 0 - if p.IsSetPartitionInnerLimit() { - l += bthrift.Binary.FieldBeginLength("partition_inner_limit", thrift.I64, 5) - l += bthrift.Binary.I64Length(*p.PartitionInnerLimit) + if p.IsSetRowsOffsetValue() { + l += bthrift.Binary.FieldBeginLength("rows_offset_value", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.RowsOffsetValue) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAnalyticWindowBoundary) FastRead(buf []byte) (int, error) { +func (p *TAnalyticWindow) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -22272,7 +28116,7 @@ func (p *TAnalyticWindowBoundary) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -22315,7 +28159,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAnalyticWindowBoundary[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAnalyticWindow[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -22323,10 +28167,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TAnalyticWindowBoundary[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TAnalyticWindow[fieldId])) } -func (p *TAnalyticWindowBoundary) FastReadField1(buf []byte) (int, error) { +func (p *TAnalyticWindow) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { @@ -22334,59 +28178,59 @@ func (p *TAnalyticWindowBoundary) FastReadField1(buf []byte) (int, error) { } else { offset += l - p.Type = TAnalyticWindowBoundaryType(v) + p.Type = TAnalyticWindowType(v) } return offset, nil } -func (p *TAnalyticWindowBoundary) FastReadField2(buf []byte) (int, error) { +func (p *TAnalyticWindow) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := exprs.NewTExpr() + tmp := NewTAnalyticWindowBoundary() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.RangeOffsetPredicate = tmp + p.WindowStart = tmp return offset, nil } -func (p *TAnalyticWindowBoundary) FastReadField3(buf []byte) (int, error) { +func (p *TAnalyticWindow) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + tmp := NewTAnalyticWindowBoundary() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - p.RowsOffsetValue = &v - } + p.WindowEnd = tmp return offset, nil } // for compatibility -func (p *TAnalyticWindowBoundary) FastWrite(buf []byte) int { +func (p *TAnalyticWindow) FastWrite(buf []byte) int { return 0 } -func (p *TAnalyticWindowBoundary) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindow) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAnalyticWindowBoundary") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAnalyticWindow") if p != nil { - offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAnalyticWindowBoundary) BLength() int { +func (p *TAnalyticWindow) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAnalyticWindowBoundary") + l += bthrift.Binary.StructBeginLength("TAnalyticWindow") if p != nil { l += p.field1Length() l += p.field2Length() @@ -22397,7 +28241,7 @@ func (p *TAnalyticWindowBoundary) BLength() int { return l } -func (p *TAnalyticWindowBoundary) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindow) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Type)) @@ -22406,28 +28250,27 @@ func (p *TAnalyticWindowBoundary) fastWriteField1(buf []byte, binaryWriter bthri return offset } -func (p *TAnalyticWindowBoundary) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindow) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRangeOffsetPredicate() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "range_offset_predicate", thrift.STRUCT, 2) - offset += p.RangeOffsetPredicate.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetWindowStart() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "window_start", thrift.STRUCT, 2) + offset += p.WindowStart.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAnalyticWindowBoundary) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticWindow) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetRowsOffsetValue() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "rows_offset_value", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowsOffsetValue) - + if p.IsSetWindowEnd() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "window_end", thrift.STRUCT, 3) + offset += p.WindowEnd.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAnalyticWindowBoundary) field1Length() int { +func (p *TAnalyticWindow) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) l += bthrift.Binary.I32Length(int32(p.Type)) @@ -22436,34 +28279,37 @@ func (p *TAnalyticWindowBoundary) field1Length() int { return l } -func (p *TAnalyticWindowBoundary) field2Length() int { +func (p *TAnalyticWindow) field2Length() int { l := 0 - if p.IsSetRangeOffsetPredicate() { - l += bthrift.Binary.FieldBeginLength("range_offset_predicate", thrift.STRUCT, 2) - l += p.RangeOffsetPredicate.BLength() + if p.IsSetWindowStart() { + l += bthrift.Binary.FieldBeginLength("window_start", thrift.STRUCT, 2) + l += p.WindowStart.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAnalyticWindowBoundary) field3Length() int { +func (p *TAnalyticWindow) field3Length() int { l := 0 - if p.IsSetRowsOffsetValue() { - l += bthrift.Binary.FieldBeginLength("rows_offset_value", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.RowsOffsetValue) - + if p.IsSetWindowEnd() { + l += bthrift.Binary.FieldBeginLength("window_end", thrift.STRUCT, 3) + l += p.WindowEnd.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAnalyticWindow) FastRead(buf []byte) (int, error) { +func (p *TAnalyticNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetType bool = false + var issetPartitionExprs bool = false + var issetOrderByExprs bool = false + var issetAnalyticFunctions bool = false + var issetIntermediateTupleId bool = false + var issetOutputTupleId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -22481,13 +28327,72 @@ func (p *TAnalyticWindow) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetPartitionExprs = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetOrderByExprs = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetAnalyticFunctions = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: if fieldTypeId == thrift.I32 { - l, err = p.FastReadField1(buf[offset:]) + l, err = p.FastReadField5(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetType = true + issetIntermediateTupleId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -22495,9 +28400,38 @@ func (p *TAnalyticWindow) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 2: + case 6: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetOutputTupleId = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField2(buf[offset:]) + l, err = p.FastReadField8(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -22509,9 +28443,23 @@ func (p *TAnalyticWindow) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - case 3: + case 9: if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField3(buf[offset:]) + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) offset += l if err != nil { goto ReadFieldError @@ -22543,17 +28491,37 @@ func (p *TAnalyticWindow) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetType { + if !issetPartitionExprs { fieldId = 1 goto RequiredFieldNotSetError } + + if !issetOrderByExprs { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetAnalyticFunctions { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetIntermediateTupleId { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetOutputTupleId { + fieldId = 6 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAnalyticWindow[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAnalyticNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -22561,149 +28529,461 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TAnalyticWindow[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TAnalyticNode[fieldId])) +} + +func (p *TAnalyticNode) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.PartitionExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.PartitionExprs = append(p.PartitionExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TAnalyticNode) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.OrderByExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.OrderByExprs = append(p.OrderByExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TAnalyticNode) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.AnalyticFunctions = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.AnalyticFunctions = append(p.AnalyticFunctions, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TAnalyticNode) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := NewTAnalyticWindow() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Window = tmp + return offset, nil +} + +func (p *TAnalyticNode) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IntermediateTupleId = v + + } + return offset, nil +} + +func (p *TAnalyticNode) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.OutputTupleId = v + + } + return offset, nil +} + +func (p *TAnalyticNode) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BufferedTupleId = &v + + } + return offset, nil } -func (p *TAnalyticWindow) FastReadField1(buf []byte) (int, error) { +func (p *TAnalyticNode) FastReadField8(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + tmp := exprs.NewTExpr() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.Type = TAnalyticWindowType(v) - } + p.PartitionByEq = tmp return offset, nil } -func (p *TAnalyticWindow) FastReadField2(buf []byte) (int, error) { +func (p *TAnalyticNode) FastReadField9(buf []byte) (int, error) { offset := 0 - tmp := NewTAnalyticWindowBoundary() + tmp := exprs.NewTExpr() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.WindowStart = tmp + p.OrderByEq = tmp return offset, nil } -func (p *TAnalyticWindow) FastReadField3(buf []byte) (int, error) { +func (p *TAnalyticNode) FastReadField10(buf []byte) (int, error) { offset := 0 - tmp := NewTAnalyticWindowBoundary() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l + p.IsColocate = &v + } - p.WindowEnd = tmp return offset, nil } // for compatibility -func (p *TAnalyticWindow) FastWrite(buf []byte) int { +func (p *TAnalyticNode) FastWrite(buf []byte) int { return 0 } -func (p *TAnalyticWindow) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAnalyticWindow") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAnalyticNode") if p != nil { + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAnalyticWindow) BLength() int { +func (p *TAnalyticNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAnalyticWindow") + l += bthrift.Binary.StructBeginLength("TAnalyticNode") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAnalyticWindow) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Type)) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_exprs", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.PartitionExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TAnalyticNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "order_by_exprs", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.OrderByExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} +func (p *TAnalyticNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "analytic_functions", thrift.LIST, 3) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.AnalyticFunctions { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAnalyticWindow) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetWindowStart() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "window_start", thrift.STRUCT, 2) - offset += p.WindowStart.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetWindow() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "window", thrift.STRUCT, 4) + offset += p.Window.FastWriteNocopy(buf[offset:], binaryWriter) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAnalyticWindow) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAnalyticNode) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetWindowEnd() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "window_end", thrift.STRUCT, 3) - offset += p.WindowEnd.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "intermediate_tuple_id", thrift.I32, 5) + offset += bthrift.Binary.WriteI32(buf[offset:], p.IntermediateTupleId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TAnalyticNode) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "output_tuple_id", thrift.I32, 6) + offset += bthrift.Binary.WriteI32(buf[offset:], p.OutputTupleId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TAnalyticNode) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBufferedTupleId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "buffered_tuple_id", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BufferedTupleId) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TAnalyticWindow) field1Length() int { +func (p *TAnalyticNode) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionByEq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_by_eq", thrift.STRUCT, 8) + offset += p.PartitionByEq.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAnalyticNode) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOrderByEq() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "order_by_eq", thrift.STRUCT, 9) + offset += p.OrderByEq.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAnalyticNode) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsColocate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_colocate", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsColocate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TAnalyticNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1) - l += bthrift.Binary.I32Length(int32(p.Type)) + l += bthrift.Binary.FieldBeginLength("partition_exprs", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionExprs)) + for _, v := range p.PartitionExprs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} +func (p *TAnalyticNode) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("order_by_exprs", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.OrderByExprs)) + for _, v := range p.OrderByExprs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TAnalyticWindow) field2Length() int { +func (p *TAnalyticNode) field3Length() int { l := 0 - if p.IsSetWindowStart() { - l += bthrift.Binary.FieldBeginLength("window_start", thrift.STRUCT, 2) - l += p.WindowStart.BLength() + l += bthrift.Binary.FieldBeginLength("analytic_functions", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.AnalyticFunctions)) + for _, v := range p.AnalyticFunctions { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TAnalyticNode) field4Length() int { + l := 0 + if p.IsSetWindow() { + l += bthrift.Binary.FieldBeginLength("window", thrift.STRUCT, 4) + l += p.Window.BLength() l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAnalyticWindow) field3Length() int { +func (p *TAnalyticNode) field5Length() int { l := 0 - if p.IsSetWindowEnd() { - l += bthrift.Binary.FieldBeginLength("window_end", thrift.STRUCT, 3) - l += p.WindowEnd.BLength() + l += bthrift.Binary.FieldBeginLength("intermediate_tuple_id", thrift.I32, 5) + l += bthrift.Binary.I32Length(p.IntermediateTupleId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TAnalyticNode) field6Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("output_tuple_id", thrift.I32, 6) + l += bthrift.Binary.I32Length(p.OutputTupleId) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TAnalyticNode) field7Length() int { + l := 0 + if p.IsSetBufferedTupleId() { + l += bthrift.Binary.FieldBeginLength("buffered_tuple_id", thrift.I32, 7) + l += bthrift.Binary.I32Length(*p.BufferedTupleId) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAnalyticNode) FastRead(buf []byte) (int, error) { +func (p *TAnalyticNode) field8Length() int { + l := 0 + if p.IsSetPartitionByEq() { + l += bthrift.Binary.FieldBeginLength("partition_by_eq", thrift.STRUCT, 8) + l += p.PartitionByEq.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAnalyticNode) field9Length() int { + l := 0 + if p.IsSetOrderByEq() { + l += bthrift.Binary.FieldBeginLength("order_by_eq", thrift.STRUCT, 9) + l += p.OrderByEq.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TAnalyticNode) field10Length() int { + l := 0 + if p.IsSetIsColocate() { + l += bthrift.Binary.FieldBeginLength("is_colocate", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.IsColocate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMergeNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetPartitionExprs bool = false - var issetOrderByExprs bool = false - var issetAnalyticFunctions bool = false - var issetIntermediateTupleId bool = false - var issetOutputTupleId bool = false + var issetTupleId bool = false + var issetResultExprLists bool = false + var issetConstExprLists bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -22721,13 +29001,13 @@ func (p *TAnalyticNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetPartitionExprs = true + issetTupleId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -22742,7 +29022,7 @@ func (p *TAnalyticNode) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetOrderByExprs = true + issetResultExprLists = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -22757,93 +29037,7 @@ func (p *TAnalyticNode) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetAnalyticFunctions = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField4(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 5: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField5(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetIntermediateTupleId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 6: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField6(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetOutputTupleId = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 7: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField7(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField8(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 9: - if fieldTypeId == thrift.STRUCT { - l, err = p.FastReadField9(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } + issetConstExprLists = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -22871,37 +29065,27 @@ func (p *TAnalyticNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetPartitionExprs { + if !issetTupleId { fieldId = 1 goto RequiredFieldNotSetError } - if !issetOrderByExprs { + if !issetResultExprLists { fieldId = 2 goto RequiredFieldNotSetError } - if !issetAnalyticFunctions { + if !issetConstExprLists { fieldId = 3 goto RequiredFieldNotSetError } - - if !issetIntermediateTupleId { - fieldId = 5 - goto RequiredFieldNotSetError - } - - if !issetOutputTupleId { - fieldId = 6 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAnalyticNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMergeNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -22909,37 +29093,24 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TAnalyticNode[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMergeNode[fieldId])) } -func (p *TAnalyticNode) FastReadField1(buf []byte) (int, error) { +func (p *TMergeNode) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.PartitionExprs = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.PartitionExprs = append(p.PartitionExprs, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.TupleId = v + } return offset, nil } -func (p *TAnalyticNode) FastReadField2(buf []byte) (int, error) { +func (p *TMergeNode) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -22947,16 +29118,31 @@ func (p *TAnalyticNode) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.OrderByExprs = make([]*exprs.TExpr, 0, size) + p.ResultExprLists = make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _elem := make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem1 := exprs.NewTExpr() + if l, err := _elem1.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.OrderByExprs = append(p.OrderByExprs, _elem) + p.ResultExprLists = append(p.ResultExprLists, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -22966,7 +29152,7 @@ func (p *TAnalyticNode) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TAnalyticNode) FastReadField3(buf []byte) (int, error) { +func (p *TMergeNode) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -22974,351 +29160,170 @@ func (p *TAnalyticNode) FastReadField3(buf []byte) (int, error) { if err != nil { return offset, err } - p.AnalyticFunctions = make([]*exprs.TExpr, 0, size) + p.ConstExprLists = make([][]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l } + _elem := make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem1 := exprs.NewTExpr() + if l, err := _elem1.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } - p.AnalyticFunctions = append(p.AnalyticFunctions, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - return offset, nil -} - -func (p *TAnalyticNode) FastReadField4(buf []byte) (int, error) { - offset := 0 - - tmp := NewTAnalyticWindow() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.Window = tmp - return offset, nil -} - -func (p *TAnalyticNode) FastReadField5(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.IntermediateTupleId = v - - } - return offset, nil -} - -func (p *TAnalyticNode) FastReadField6(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.OutputTupleId = v - - } - return offset, nil -} - -func (p *TAnalyticNode) FastReadField7(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - p.BufferedTupleId = &v - - } - return offset, nil -} - -func (p *TAnalyticNode) FastReadField8(buf []byte) (int, error) { - offset := 0 - - tmp := exprs.NewTExpr() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - p.PartitionByEq = tmp - return offset, nil -} - -func (p *TAnalyticNode) FastReadField9(buf []byte) (int, error) { - offset := 0 + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } - tmp := exprs.NewTExpr() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + p.ConstExprLists = append(p.ConstExprLists, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.OrderByEq = tmp return offset, nil } // for compatibility -func (p *TAnalyticNode) FastWrite(buf []byte) int { +func (p *TMergeNode) FastWrite(buf []byte) int { return 0 } -func (p *TAnalyticNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMergeNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAnalyticNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMergeNode") if p != nil { - offset += p.fastWriteField5(buf[offset:], binaryWriter) - offset += p.fastWriteField6(buf[offset:], binaryWriter) - offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) - offset += p.fastWriteField8(buf[offset:], binaryWriter) - offset += p.fastWriteField9(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAnalyticNode) BLength() int { +func (p *TMergeNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAnalyticNode") + l += bthrift.Binary.StructBeginLength("TMergeNode") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() - l += p.field4Length() - l += p.field5Length() - l += p.field6Length() - l += p.field7Length() - l += p.field8Length() - l += p.field9Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAnalyticNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMergeNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_exprs", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.PartitionExprs { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tuple_id", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], p.TupleId) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAnalyticNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMergeNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "order_by_exprs", thrift.LIST, 2) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_expr_lists", thrift.LIST, 2) listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) var length int - for _, v := range p.OrderByExprs { + for _, v := range p.ResultExprLists { length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAnalyticNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TMergeNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "analytic_functions", thrift.LIST, 3) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "const_expr_lists", thrift.LIST, 3) listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) var length int - for _, v := range p.AnalyticFunctions { + for _, v := range p.ConstExprLists { length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAnalyticNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetWindow() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "window", thrift.STRUCT, 4) - offset += p.Window.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAnalyticNode) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "intermediate_tuple_id", thrift.I32, 5) - offset += bthrift.Binary.WriteI32(buf[offset:], p.IntermediateTupleId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TAnalyticNode) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "output_tuple_id", thrift.I32, 6) - offset += bthrift.Binary.WriteI32(buf[offset:], p.OutputTupleId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TAnalyticNode) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetBufferedTupleId() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "buffered_tuple_id", thrift.I32, 7) - offset += bthrift.Binary.WriteI32(buf[offset:], *p.BufferedTupleId) - - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAnalyticNode) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetPartitionByEq() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_by_eq", thrift.STRUCT, 8) - offset += p.PartitionByEq.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAnalyticNode) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - if p.IsSetOrderByEq() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "order_by_eq", thrift.STRUCT, 9) - offset += p.OrderByEq.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } - return offset -} - -func (p *TAnalyticNode) field1Length() int { +func (p *TMergeNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("partition_exprs", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionExprs)) - for _, v := range p.PartitionExprs { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} + l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) + l += bthrift.Binary.I32Length(p.TupleId) -func (p *TAnalyticNode) field2Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("order_by_exprs", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.OrderByExprs)) - for _, v := range p.OrderByExprs { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TAnalyticNode) field3Length() int { +func (p *TMergeNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("analytic_functions", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.AnalyticFunctions)) - for _, v := range p.AnalyticFunctions { - l += v.BLength() + l += bthrift.Binary.FieldBeginLength("result_expr_lists", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultExprLists)) + for _, v := range p.ResultExprLists { + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() } l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TAnalyticNode) field4Length() int { +func (p *TMergeNode) field3Length() int { l := 0 - if p.IsSetWindow() { - l += bthrift.Binary.FieldBeginLength("window", thrift.STRUCT, 4) - l += p.Window.BLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldBeginLength("const_expr_lists", thrift.LIST, 3) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ConstExprLists)) + for _, v := range p.ConstExprLists { + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() } - return l -} - -func (p *TAnalyticNode) field5Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("intermediate_tuple_id", thrift.I32, 5) - l += bthrift.Binary.I32Length(p.IntermediateTupleId) - - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TAnalyticNode) field6Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("output_tuple_id", thrift.I32, 6) - l += bthrift.Binary.I32Length(p.OutputTupleId) - + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TAnalyticNode) field7Length() int { - l := 0 - if p.IsSetBufferedTupleId() { - l += bthrift.Binary.FieldBeginLength("buffered_tuple_id", thrift.I32, 7) - l += bthrift.Binary.I32Length(*p.BufferedTupleId) - - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAnalyticNode) field8Length() int { - l := 0 - if p.IsSetPartitionByEq() { - l += bthrift.Binary.FieldBeginLength("partition_by_eq", thrift.STRUCT, 8) - l += p.PartitionByEq.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TAnalyticNode) field9Length() int { - l := 0 - if p.IsSetOrderByEq() { - l += bthrift.Binary.FieldBeginLength("order_by_eq", thrift.STRUCT, 9) - l += p.OrderByEq.BLength() - l += bthrift.Binary.FieldEndLength() - } - return l -} - -func (p *TMergeNode) FastRead(buf []byte) (int, error) { +func (p *TUnionNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -23327,6 +29332,7 @@ func (p *TMergeNode) FastRead(buf []byte) (int, error) { var issetTupleId bool = false var issetResultExprLists bool = false var issetConstExprLists bool = false + var issetFirstMaterializedChildIdx bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -23388,6 +29394,21 @@ func (p *TMergeNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFirstMaterializedChildIdx = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -23422,13 +29443,18 @@ func (p *TMergeNode) FastRead(buf []byte) (int, error) { fieldId = 3 goto RequiredFieldNotSetError } + + if !issetFirstMaterializedChildIdx { + fieldId = 4 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMergeNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUnionNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -23436,10 +29462,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMergeNode[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TUnionNode[fieldId])) } -func (p *TMergeNode) FastReadField1(buf []byte) (int, error) { +func (p *TUnionNode) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { @@ -23453,7 +29479,7 @@ func (p *TMergeNode) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TMergeNode) FastReadField2(buf []byte) (int, error) { +func (p *TUnionNode) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -23495,7 +29521,7 @@ func (p *TMergeNode) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TMergeNode) FastReadField3(buf []byte) (int, error) { +func (p *TUnionNode) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -23537,16 +29563,31 @@ func (p *TMergeNode) FastReadField3(buf []byte) (int, error) { return offset, nil } +func (p *TUnionNode) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.FirstMaterializedChildIdx = v + + } + return offset, nil +} + // for compatibility -func (p *TMergeNode) FastWrite(buf []byte) int { +func (p *TUnionNode) FastWrite(buf []byte) int { return 0 } -func (p *TMergeNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUnionNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMergeNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUnionNode") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) } @@ -23555,20 +29596,21 @@ func (p *TMergeNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TMergeNode) BLength() int { +func (p *TUnionNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TMergeNode") + l += bthrift.Binary.StructBeginLength("TUnionNode") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TMergeNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUnionNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tuple_id", thrift.I32, 1) offset += bthrift.Binary.WriteI32(buf[offset:], p.TupleId) @@ -23577,7 +29619,7 @@ func (p *TMergeNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TMergeNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUnionNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_expr_lists", thrift.LIST, 2) listBeginOffset := offset @@ -23601,7 +29643,7 @@ func (p *TMergeNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TMergeNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TUnionNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "const_expr_lists", thrift.LIST, 3) listBeginOffset := offset @@ -23625,7 +29667,16 @@ func (p *TMergeNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TMergeNode) field1Length() int { +func (p *TUnionNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "first_materialized_child_idx", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], p.FirstMaterializedChildIdx) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TUnionNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) l += bthrift.Binary.I32Length(p.TupleId) @@ -23634,7 +29685,7 @@ func (p *TMergeNode) field1Length() int { return l } -func (p *TMergeNode) field2Length() int { +func (p *TUnionNode) field2Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("result_expr_lists", thrift.LIST, 2) l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultExprLists)) @@ -23650,7 +29701,7 @@ func (p *TMergeNode) field2Length() int { return l } -func (p *TMergeNode) field3Length() int { +func (p *TUnionNode) field3Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("const_expr_lists", thrift.LIST, 3) l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ConstExprLists)) @@ -23666,7 +29717,16 @@ func (p *TMergeNode) field3Length() int { return l } -func (p *TUnionNode) FastRead(buf []byte) (int, error) { +func (p *TUnionNode) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("first_materialized_child_idx", thrift.I64, 4) + l += bthrift.Binary.I64Length(p.FirstMaterializedChildIdx) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TIntersectNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -23752,6 +29812,20 @@ func (p *TUnionNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -23797,7 +29871,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TUnionNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIntersectNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -23805,10 +29879,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TUnionNode[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIntersectNode[fieldId])) } -func (p *TUnionNode) FastReadField1(buf []byte) (int, error) { +func (p *TIntersectNode) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { @@ -23822,7 +29896,7 @@ func (p *TUnionNode) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TUnionNode) FastReadField2(buf []byte) (int, error) { +func (p *TIntersectNode) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -23864,7 +29938,7 @@ func (p *TUnionNode) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TUnionNode) FastReadField3(buf []byte) (int, error) { +func (p *TIntersectNode) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -23906,7 +29980,7 @@ func (p *TUnionNode) FastReadField3(buf []byte) (int, error) { return offset, nil } -func (p *TUnionNode) FastReadField4(buf []byte) (int, error) { +func (p *TIntersectNode) FastReadField4(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { @@ -23920,17 +29994,31 @@ func (p *TUnionNode) FastReadField4(buf []byte) (int, error) { return offset, nil } +func (p *TIntersectNode) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsColocate = &v + + } + return offset, nil +} + // for compatibility -func (p *TUnionNode) FastWrite(buf []byte) int { +func (p *TIntersectNode) FastWrite(buf []byte) int { return 0 } -func (p *TUnionNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TIntersectNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TUnionNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIntersectNode") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) } @@ -23939,21 +30027,22 @@ func (p *TUnionNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TUnionNode) BLength() int { +func (p *TIntersectNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TUnionNode") + l += bthrift.Binary.StructBeginLength("TIntersectNode") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TUnionNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TIntersectNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tuple_id", thrift.I32, 1) offset += bthrift.Binary.WriteI32(buf[offset:], p.TupleId) @@ -23962,7 +30051,7 @@ func (p *TUnionNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TUnionNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TIntersectNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_expr_lists", thrift.LIST, 2) listBeginOffset := offset @@ -23986,7 +30075,7 @@ func (p *TUnionNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TUnionNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TIntersectNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "const_expr_lists", thrift.LIST, 3) listBeginOffset := offset @@ -24010,7 +30099,7 @@ func (p *TUnionNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TUnionNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TIntersectNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "first_materialized_child_idx", thrift.I64, 4) offset += bthrift.Binary.WriteI64(buf[offset:], p.FirstMaterializedChildIdx) @@ -24019,7 +30108,18 @@ func (p *TUnionNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWrit return offset } -func (p *TUnionNode) field1Length() int { +func (p *TIntersectNode) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsColocate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_colocate", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsColocate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TIntersectNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) l += bthrift.Binary.I32Length(p.TupleId) @@ -24028,7 +30128,7 @@ func (p *TUnionNode) field1Length() int { return l } -func (p *TUnionNode) field2Length() int { +func (p *TIntersectNode) field2Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("result_expr_lists", thrift.LIST, 2) l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultExprLists)) @@ -24044,7 +30144,7 @@ func (p *TUnionNode) field2Length() int { return l } -func (p *TUnionNode) field3Length() int { +func (p *TIntersectNode) field3Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("const_expr_lists", thrift.LIST, 3) l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ConstExprLists)) @@ -24060,7 +30160,7 @@ func (p *TUnionNode) field3Length() int { return l } -func (p *TUnionNode) field4Length() int { +func (p *TIntersectNode) field4Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("first_materialized_child_idx", thrift.I64, 4) l += bthrift.Binary.I64Length(p.FirstMaterializedChildIdx) @@ -24069,7 +30169,18 @@ func (p *TUnionNode) field4Length() int { return l } -func (p *TIntersectNode) FastRead(buf []byte) (int, error) { +func (p *TIntersectNode) field5Length() int { + l := 0 + if p.IsSetIsColocate() { + l += bthrift.Binary.FieldBeginLength("is_colocate", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.IsColocate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExceptNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int @@ -24155,6 +30266,20 @@ func (p *TIntersectNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24200,7 +30325,7 @@ ReadStructBeginError: ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIntersectNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExceptNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -24208,10 +30333,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TIntersectNode[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExceptNode[fieldId])) } -func (p *TIntersectNode) FastReadField1(buf []byte) (int, error) { +func (p *TExceptNode) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { @@ -24225,7 +30350,7 @@ func (p *TIntersectNode) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TIntersectNode) FastReadField2(buf []byte) (int, error) { +func (p *TExceptNode) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -24267,7 +30392,7 @@ func (p *TIntersectNode) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TIntersectNode) FastReadField3(buf []byte) (int, error) { +func (p *TExceptNode) FastReadField3(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -24309,7 +30434,7 @@ func (p *TIntersectNode) FastReadField3(buf []byte) (int, error) { return offset, nil } -func (p *TIntersectNode) FastReadField4(buf []byte) (int, error) { +func (p *TExceptNode) FastReadField4(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { @@ -24323,17 +30448,31 @@ func (p *TIntersectNode) FastReadField4(buf []byte) (int, error) { return offset, nil } +func (p *TExceptNode) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsColocate = &v + + } + return offset, nil +} + // for compatibility -func (p *TIntersectNode) FastWrite(buf []byte) int { +func (p *TExceptNode) FastWrite(buf []byte) int { return 0 } -func (p *TIntersectNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExceptNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIntersectNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExceptNode") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) } @@ -24342,21 +30481,22 @@ func (p *TIntersectNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TIntersectNode) BLength() int { +func (p *TExceptNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TIntersectNode") + l += bthrift.Binary.StructBeginLength("TExceptNode") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() l += p.field4Length() + l += p.field5Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TIntersectNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExceptNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tuple_id", thrift.I32, 1) offset += bthrift.Binary.WriteI32(buf[offset:], p.TupleId) @@ -24365,7 +30505,7 @@ func (p *TIntersectNode) fastWriteField1(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TIntersectNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExceptNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_expr_lists", thrift.LIST, 2) listBeginOffset := offset @@ -24389,7 +30529,7 @@ func (p *TIntersectNode) fastWriteField2(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TIntersectNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExceptNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "const_expr_lists", thrift.LIST, 3) listBeginOffset := offset @@ -24413,7 +30553,7 @@ func (p *TIntersectNode) fastWriteField3(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TIntersectNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExceptNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "first_materialized_child_idx", thrift.I64, 4) offset += bthrift.Binary.WriteI64(buf[offset:], p.FirstMaterializedChildIdx) @@ -24422,7 +30562,18 @@ func (p *TIntersectNode) fastWriteField4(buf []byte, binaryWriter bthrift.Binary return offset } -func (p *TIntersectNode) field1Length() int { +func (p *TExceptNode) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsColocate() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_colocate", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsColocate) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TExceptNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) l += bthrift.Binary.I32Length(p.TupleId) @@ -24431,7 +30582,7 @@ func (p *TIntersectNode) field1Length() int { return l } -func (p *TIntersectNode) field2Length() int { +func (p *TExceptNode) field2Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("result_expr_lists", thrift.LIST, 2) l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultExprLists)) @@ -24447,7 +30598,7 @@ func (p *TIntersectNode) field2Length() int { return l } -func (p *TIntersectNode) field3Length() int { +func (p *TExceptNode) field3Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("const_expr_lists", thrift.LIST, 3) l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ConstExprLists)) @@ -24463,7 +30614,7 @@ func (p *TIntersectNode) field3Length() int { return l } -func (p *TIntersectNode) field4Length() int { +func (p *TExceptNode) field4Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("first_materialized_child_idx", thrift.I64, 4) l += bthrift.Binary.I64Length(p.FirstMaterializedChildIdx) @@ -24472,16 +30623,24 @@ func (p *TIntersectNode) field4Length() int { return l } -func (p *TExceptNode) FastRead(buf []byte) (int, error) { +func (p *TExceptNode) field5Length() int { + l := 0 + if p.IsSetIsColocate() { + l += bthrift.Binary.FieldBeginLength("is_colocate", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.IsColocate) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TExchangeNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int - var fieldTypeId thrift.TType - var fieldId int16 - var issetTupleId bool = false - var issetResultExprLists bool = false - var issetConstExprLists bool = false - var issetFirstMaterializedChildIdx bool = false + var fieldTypeId thrift.TType + var fieldId int16 + var issetInputRowTuples bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -24499,13 +30658,13 @@ func (p *TExceptNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetTupleId = true + issetInputRowTuples = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24514,13 +30673,12 @@ func (p *TExceptNode) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetResultExprLists = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24529,13 +30687,12 @@ func (p *TExceptNode) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetConstExprLists = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24544,13 +30701,12 @@ func (p *TExceptNode) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetFirstMaterializedChildIdx = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24578,32 +30734,17 @@ func (p *TExceptNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetTupleId { + if !issetInputRowTuples { fieldId = 1 goto RequiredFieldNotSetError } - - if !issetResultExprLists { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetConstExprLists { - fieldId = 3 - goto RequiredFieldNotSetError - } - - if !issetFirstMaterializedChildIdx { - fieldId = 4 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExceptNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExchangeNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -24611,24 +30752,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExceptNode[fieldId])) -} - -func (p *TExceptNode) FastReadField1(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.TupleId = v - - } - return offset, nil + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExchangeNode[fieldId])) } -func (p *TExceptNode) FastReadField2(buf []byte) (int, error) { +func (p *TExchangeNode) FastReadField1(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -24636,31 +30763,19 @@ func (p *TExceptNode) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.ResultExprLists = make([][]*exprs.TExpr, 0, size) + p.InputRowTuples = make([]types.TTupleId, 0, size) for i := 0; i < size; i++ { - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - _elem := make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() - if l, err := _elem1.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - _elem = append(_elem, _elem1) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + var _elem types.TTupleId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l + + _elem = v + } - p.ResultExprLists = append(p.ResultExprLists, _elem) + p.InputRowTuples = append(p.InputRowTuples, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -24670,84 +30785,69 @@ func (p *TExceptNode) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TExceptNode) FastReadField3(buf []byte) (int, error) { +func (p *TExchangeNode) FastReadField2(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + tmp := NewTSortInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err - } - p.ConstExprLists = make([][]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + } else { offset += l - if err != nil { - return offset, err - } - _elem := make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem1 := exprs.NewTExpr() - if l, err := _elem1.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + } + p.SortInfo = tmp + return offset, nil +} - _elem = append(_elem, _elem1) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } +func (p *TExchangeNode) FastReadField3(buf []byte) (int, error) { + offset := 0 - p.ConstExprLists = append(p.ConstExprLists, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + p.Offset = &v + } return offset, nil } -func (p *TExceptNode) FastReadField4(buf []byte) (int, error) { +func (p *TExchangeNode) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.FirstMaterializedChildIdx = v + tmp := partitions.TPartitionType(v) + p.PartitionType = &tmp } return offset, nil } // for compatibility -func (p *TExceptNode) FastWrite(buf []byte) int { +func (p *TExchangeNode) FastWrite(buf []byte) int { return 0 } -func (p *TExceptNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExchangeNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExceptNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExchangeNode") if p != nil { + offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) - offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TExceptNode) BLength() int { +func (p *TExchangeNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TExceptNode") + l += bthrift.Binary.StructBeginLength("TExchangeNode") if p != nil { l += p.field1Length() l += p.field2Length() @@ -24759,129 +30859,107 @@ func (p *TExceptNode) BLength() int { return l } -func (p *TExceptNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExchangeNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tuple_id", thrift.I32, 1) - offset += bthrift.Binary.WriteI32(buf[offset:], p.TupleId) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "input_row_tuples", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.InputRowTuples { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExceptNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExchangeNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "result_expr_lists", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) - var length int - for _, v := range p.ResultExprLists { - length++ - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range v { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetSortInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_info", thrift.STRUCT, 2) + offset += p.SortInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExceptNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExchangeNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "const_expr_lists", thrift.LIST, 3) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) - var length int - for _, v := range p.ConstExprLists { - length++ - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range v { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) + if p.IsSetOffset() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "offset", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Offset) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExceptNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TExchangeNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "first_materialized_child_idx", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], p.FirstMaterializedChildIdx) + if p.IsSetPartitionType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_type", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.PartitionType)) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TExceptNode) field1Length() int { +func (p *TExchangeNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) - l += bthrift.Binary.I32Length(p.TupleId) - + l += bthrift.Binary.FieldBeginLength("input_row_tuples", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.InputRowTuples)) + var tmpV types.TTupleId + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.InputRowTuples) + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TExceptNode) field2Length() int { +func (p *TExchangeNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("result_expr_lists", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ResultExprLists)) - for _, v := range p.ResultExprLists { - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) - for _, v := range v { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetSortInfo() { + l += bthrift.Binary.FieldBeginLength("sort_info", thrift.STRUCT, 2) + l += p.SortInfo.BLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() return l } -func (p *TExceptNode) field3Length() int { +func (p *TExchangeNode) field3Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("const_expr_lists", thrift.LIST, 3) - l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.ConstExprLists)) - for _, v := range p.ConstExprLists { - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) - for _, v := range v { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + if p.IsSetOffset() { + l += bthrift.Binary.FieldBeginLength("offset", thrift.I64, 3) + l += bthrift.Binary.I64Length(*p.Offset) + + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() return l } -func (p *TExceptNode) field4Length() int { +func (p *TExchangeNode) field4Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("first_materialized_child_idx", thrift.I64, 4) - l += bthrift.Binary.I64Length(p.FirstMaterializedChildIdx) + if p.IsSetPartitionType() { + l += bthrift.Binary.FieldBeginLength("partition_type", thrift.I32, 4) + l += bthrift.Binary.I32Length(int32(*p.PartitionType)) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TExchangeNode) FastRead(buf []byte) (int, error) { +func (p *TOlapRewriteNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetInputRowTuples bool = false + var issetColumns bool = false + var issetColumnTypes bool = false + var issetOutputTupleId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -24905,7 +30983,7 @@ func (p *TExchangeNode) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetInputRowTuples = true + issetColumns = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24914,12 +30992,13 @@ func (p *TExchangeNode) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRUCT { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetColumnTypes = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24928,12 +31007,13 @@ func (p *TExchangeNode) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetOutputTupleId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -24961,17 +31041,27 @@ func (p *TExchangeNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetInputRowTuples { + if !issetColumns { fieldId = 1 goto RequiredFieldNotSetError } + + if !issetColumnTypes { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetOutputTupleId { + fieldId = 3 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TExchangeNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TOlapRewriteNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: @@ -24979,10 +31069,10 @@ ReadFieldEndError: ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TExchangeNode[fieldId])) + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TOlapRewriteNode[fieldId])) } -func (p *TExchangeNode) FastReadField1(buf []byte) (int, error) { +func (p *TOlapRewriteNode) FastReadField1(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -24990,19 +31080,16 @@ func (p *TExchangeNode) FastReadField1(buf []byte) (int, error) { if err != nil { return offset, err } - p.InputRowTuples = make([]types.TTupleId, 0, size) + p.Columns = make([]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { - var _elem types.TTupleId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _elem = v - } - p.InputRowTuples = append(p.InputRowTuples, _elem) + p.Columns = append(p.Columns, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -25012,40 +31099,55 @@ func (p *TExchangeNode) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TExchangeNode) FastReadField2(buf []byte) (int, error) { +func (p *TOlapRewriteNode) FastReadField2(buf []byte) (int, error) { offset := 0 - tmp := NewTSortInfo() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ColumnTypes = make([]*types.TColumnType, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTColumnType() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ColumnTypes = append(p.ColumnTypes, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.SortInfo = tmp return offset, nil } -func (p *TExchangeNode) FastReadField3(buf []byte) (int, error) { +func (p *TOlapRewriteNode) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.Offset = &v + + p.OutputTupleId = v } return offset, nil } // for compatibility -func (p *TExchangeNode) FastWrite(buf []byte) int { +func (p *TOlapRewriteNode) FastWrite(buf []byte) int { return 0 } -func (p *TExchangeNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TOlapRewriteNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TExchangeNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TOlapRewriteNode") if p != nil { offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) @@ -25056,9 +31158,9 @@ func (p *TExchangeNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW return offset } -func (p *TExchangeNode) BLength() int { +func (p *TOlapRewriteNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TExchangeNode") + l += bthrift.Binary.StructBeginLength("TOlapRewriteNode") if p != nil { l += p.field1Length() l += p.field2Length() @@ -25069,85 +31171,86 @@ func (p *TExchangeNode) BLength() int { return l } -func (p *TExchangeNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TOlapRewriteNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "input_row_tuples", thrift.LIST, 1) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.LIST, 1) listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) var length int - for _, v := range p.InputRowTuples { + for _, v := range p.Columns { length++ - offset += bthrift.Binary.WriteI32(buf[offset:], v) - + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExchangeNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TOlapRewriteNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSortInfo() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sort_info", thrift.STRUCT, 2) - offset += p.SortInfo.FastWriteNocopy(buf[offset:], binaryWriter) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_types", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.ColumnTypes { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExchangeNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TOlapRewriteNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetOffset() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "offset", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.Offset) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "output_tuple_id", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], p.OutputTupleId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TExchangeNode) field1Length() int { +func (p *TOlapRewriteNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("input_row_tuples", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.InputRowTuples)) - var tmpV types.TTupleId - l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.InputRowTuples) + l += bthrift.Binary.FieldBeginLength("columns", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Columns)) + for _, v := range p.Columns { + l += v.BLength() + } l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } -func (p *TExchangeNode) field2Length() int { +func (p *TOlapRewriteNode) field2Length() int { l := 0 - if p.IsSetSortInfo() { - l += bthrift.Binary.FieldBeginLength("sort_info", thrift.STRUCT, 2) - l += p.SortInfo.BLength() - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldBeginLength("column_types", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ColumnTypes)) + for _, v := range p.ColumnTypes { + l += v.BLength() } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() return l } -func (p *TExchangeNode) field3Length() int { +func (p *TOlapRewriteNode) field3Length() int { l := 0 - if p.IsSetOffset() { - l += bthrift.Binary.FieldBeginLength("offset", thrift.I64, 3) - l += bthrift.Binary.I64Length(*p.Offset) + l += bthrift.Binary.FieldBeginLength("output_tuple_id", thrift.I32, 3) + l += bthrift.Binary.I32Length(p.OutputTupleId) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TOlapRewriteNode) FastRead(buf []byte) (int, error) { +func (p *TTableFunctionNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetColumns bool = false - var issetColumnTypes bool = false - var issetOutputTupleId bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25171,7 +31274,6 @@ func (p *TOlapRewriteNode) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetColumns = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25186,22 +31288,6 @@ func (p *TOlapRewriteNode) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetColumnTypes = true - } else { - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError - } - } - case 3: - if fieldTypeId == thrift.I32 { - l, err = p.FastReadField3(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldError - } - issetOutputTupleId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25229,38 +31315,22 @@ func (p *TOlapRewriteNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetColumns { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetColumnTypes { - fieldId = 2 - goto RequiredFieldNotSetError - } - - if !issetOutputTupleId { - fieldId = 3 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TOlapRewriteNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableFunctionNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TOlapRewriteNode[fieldId])) } -func (p *TOlapRewriteNode) FastReadField1(buf []byte) (int, error) { +func (p *TTableFunctionNode) FastReadField1(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -25268,7 +31338,7 @@ func (p *TOlapRewriteNode) FastReadField1(buf []byte) (int, error) { if err != nil { return offset, err } - p.Columns = make([]*exprs.TExpr, 0, size) + p.FnCallExprList = make([]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { _elem := exprs.NewTExpr() if l, err := _elem.FastRead(buf[offset:]); err != nil { @@ -25277,7 +31347,7 @@ func (p *TOlapRewriteNode) FastReadField1(buf []byte) (int, error) { offset += l } - p.Columns = append(p.Columns, _elem) + p.FnCallExprList = append(p.FnCallExprList, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -25287,7 +31357,7 @@ func (p *TOlapRewriteNode) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TOlapRewriteNode) FastReadField2(buf []byte) (int, error) { +func (p *TTableFunctionNode) FastReadField2(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -25295,16 +31365,19 @@ func (p *TOlapRewriteNode) FastReadField2(buf []byte) (int, error) { if err != nil { return offset, err } - p.ColumnTypes = make([]*types.TColumnType, 0, size) + p.OutputSlotIds = make([]types.TSlotId, 0, size) for i := 0; i < size; i++ { - _elem := types.NewTColumnType() - if l, err := _elem.FastRead(buf[offset:]); err != nil { + var _elem types.TSlotId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l + + _elem = v + } - p.ColumnTypes = append(p.ColumnTypes, _elem) + p.OutputSlotIds = append(p.OutputSlotIds, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -25314,30 +31387,15 @@ func (p *TOlapRewriteNode) FastReadField2(buf []byte) (int, error) { return offset, nil } -func (p *TOlapRewriteNode) FastReadField3(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - p.OutputTupleId = v - - } - return offset, nil -} - // for compatibility -func (p *TOlapRewriteNode) FastWrite(buf []byte) int { +func (p *TTableFunctionNode) FastWrite(buf []byte) int { return 0 } -func (p *TOlapRewriteNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTableFunctionNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TOlapRewriteNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableFunctionNode") if p != nil { - offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) } @@ -25346,99 +31404,90 @@ func (p *TOlapRewriteNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina return offset } -func (p *TOlapRewriteNode) BLength() int { +func (p *TTableFunctionNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TOlapRewriteNode") + l += bthrift.Binary.StructBeginLength("TTableFunctionNode") if p != nil { l += p.field1Length() l += p.field2Length() - l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TOlapRewriteNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { - offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "columns", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.Columns { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - return offset -} - -func (p *TOlapRewriteNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTableFunctionNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "column_types", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.ColumnTypes { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + if p.IsSetFnCallExprList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fnCallExprList", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.FnCallExprList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TOlapRewriteNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTableFunctionNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "output_tuple_id", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], p.OutputTupleId) + if p.IsSetOutputSlotIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "outputSlotIds", thrift.LIST, 2) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.OutputSlotIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TOlapRewriteNode) field1Length() int { +func (p *TTableFunctionNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("columns", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Columns)) - for _, v := range p.Columns { - l += v.BLength() + if p.IsSetFnCallExprList() { + l += bthrift.Binary.FieldBeginLength("fnCallExprList", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FnCallExprList)) + for _, v := range p.FnCallExprList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() return l } -func (p *TOlapRewriteNode) field2Length() int { +func (p *TTableFunctionNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("column_types", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ColumnTypes)) - for _, v := range p.ColumnTypes { - l += v.BLength() + if p.IsSetOutputSlotIds() { + l += bthrift.Binary.FieldBeginLength("outputSlotIds", thrift.LIST, 2) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.OutputSlotIds)) + var tmpV types.TSlotId + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.OutputSlotIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() } - l += bthrift.Binary.ListEndLength() - l += bthrift.Binary.FieldEndLength() - return l -} - -func (p *TOlapRewriteNode) field3Length() int { - l := 0 - l += bthrift.Binary.FieldBeginLength("output_tuple_id", thrift.I32, 3) - l += bthrift.Binary.I32Length(p.OutputTupleId) - - l += bthrift.Binary.FieldEndLength() return l } -func (p *TTableFunctionNode) FastRead(buf []byte) (int, error) { +func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetMinReservation bool = false + var issetMaxReservation bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25456,12 +31505,13 @@ func (p *TTableFunctionNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetMinReservation = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25470,12 +31520,41 @@ func (p *TTableFunctionNode) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.LIST { + if fieldTypeId == thrift.I64 { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetMaxReservation = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25503,179 +31582,207 @@ func (p *TTableFunctionNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetMinReservation { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetMaxReservation { + fieldId = 2 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTableFunctionNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendResourceProfile[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TBackendResourceProfile[fieldId])) } -func (p *TTableFunctionNode) FastReadField1(buf []byte) (int, error) { +func (p *TBackendResourceProfile) FastReadField1(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err - } - p.FnCallExprList = make([]*exprs.TExpr, 0, size) - for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } + } else { + offset += l + + p.MinReservation = v - p.FnCallExprList = append(p.FnCallExprList, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TBackendResourceProfile) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.MaxReservation = v + } return offset, nil } -func (p *TTableFunctionNode) FastReadField2(buf []byte) (int, error) { +func (p *TBackendResourceProfile) FastReadField3(buf []byte) (int, error) { offset := 0 - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err - } - p.OutputSlotIds = make([]types.TSlotId, 0, size) - for i := 0; i < size; i++ { - var _elem types.TSlotId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - - _elem = v + } else { + offset += l - } + p.SpillableBufferSize = v - p.OutputSlotIds = append(p.OutputSlotIds, _elem) } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, nil +} + +func (p *TBackendResourceProfile) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l + + p.MaxRowBufferSize = v + } return offset, nil } // for compatibility -func (p *TTableFunctionNode) FastWrite(buf []byte) int { +func (p *TBackendResourceProfile) FastWrite(buf []byte) int { return 0 } -func (p *TTableFunctionNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBackendResourceProfile) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTableFunctionNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBackendResourceProfile") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TTableFunctionNode) BLength() int { +func (p *TBackendResourceProfile) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TTableFunctionNode") + l += bthrift.Binary.StructBeginLength("TBackendResourceProfile") if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TTableFunctionNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBackendResourceProfile) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetFnCallExprList() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fnCallExprList", thrift.LIST, 1) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) - var length int - for _, v := range p.FnCallExprList { - length++ - offset += v.FastWriteNocopy(buf[offset:], binaryWriter) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_reservation", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MinReservation) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TTableFunctionNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TBackendResourceProfile) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetOutputSlotIds() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "outputSlotIds", thrift.LIST, 2) - listBeginOffset := offset - offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) - var length int - for _, v := range p.OutputSlotIds { - length++ - offset += bthrift.Binary.WriteI32(buf[offset:], v) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_reservation", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxReservation) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TBackendResourceProfile) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSpillableBufferSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "spillable_buffer_size", thrift.I64, 3) + offset += bthrift.Binary.WriteI64(buf[offset:], p.SpillableBufferSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackendResourceProfile) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMaxRowBufferSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_row_buffer_size", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxRowBufferSize) - } - bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) - offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TTableFunctionNode) field1Length() int { +func (p *TBackendResourceProfile) field1Length() int { l := 0 - if p.IsSetFnCallExprList() { - l += bthrift.Binary.FieldBeginLength("fnCallExprList", thrift.LIST, 1) - l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FnCallExprList)) - for _, v := range p.FnCallExprList { - l += v.BLength() - } - l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldBeginLength("min_reservation", thrift.I64, 1) + l += bthrift.Binary.I64Length(p.MinReservation) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TBackendResourceProfile) field2Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("max_reservation", thrift.I64, 2) + l += bthrift.Binary.I64Length(p.MaxReservation) + + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TBackendResourceProfile) field3Length() int { + l := 0 + if p.IsSetSpillableBufferSize() { + l += bthrift.Binary.FieldBeginLength("spillable_buffer_size", thrift.I64, 3) + l += bthrift.Binary.I64Length(p.SpillableBufferSize) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TTableFunctionNode) field2Length() int { +func (p *TBackendResourceProfile) field4Length() int { l := 0 - if p.IsSetOutputSlotIds() { - l += bthrift.Binary.FieldBeginLength("outputSlotIds", thrift.LIST, 2) - l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.OutputSlotIds)) - var tmpV types.TSlotId - l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.OutputSlotIds) - l += bthrift.Binary.ListEndLength() + if p.IsSetMaxRowBufferSize() { + l += bthrift.Binary.FieldBeginLength("max_row_buffer_size", thrift.I64, 4) + l += bthrift.Binary.I64Length(p.MaxRowBufferSize) + l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { +func (p *TAssertNumRowsNode) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 - var issetMinReservation bool = false - var issetMaxReservation bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25699,7 +31806,6 @@ func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { if err != nil { goto ReadFieldError } - issetMinReservation = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25708,13 +31814,12 @@ func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.STRING { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } - issetMaxReservation = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25723,7 +31828,7 @@ func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { @@ -25737,7 +31842,7 @@ func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { } } case 4: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField4(buf[offset:]) offset += l if err != nil { @@ -25770,110 +31875,97 @@ func (p *TBackendResourceProfile) FastRead(buf []byte) (int, error) { goto ReadStructEndError } - if !issetMinReservation { - fieldId = 1 - goto RequiredFieldNotSetError - } - - if !issetMaxReservation { - fieldId = 2 - goto RequiredFieldNotSetError - } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBackendResourceProfile[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAssertNumRowsNode[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -RequiredFieldNotSetError: - return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TBackendResourceProfile[fieldId])) } -func (p *TBackendResourceProfile) FastReadField1(buf []byte) (int, error) { +func (p *TAssertNumRowsNode) FastReadField1(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.MinReservation = v + p.DesiredNumRows = &v } return offset, nil } -func (p *TBackendResourceProfile) FastReadField2(buf []byte) (int, error) { +func (p *TAssertNumRowsNode) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.MaxReservation = v + p.SubqueryString = &v } return offset, nil } -func (p *TBackendResourceProfile) FastReadField3(buf []byte) (int, error) { +func (p *TAssertNumRowsNode) FastReadField3(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SpillableBufferSize = v + tmp := TAssertion(v) + p.Assertion = &tmp } return offset, nil } -func (p *TBackendResourceProfile) FastReadField4(buf []byte) (int, error) { +func (p *TAssertNumRowsNode) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - - p.MaxRowBufferSize = v + p.ShouldConvertOutputToNullable = &v } return offset, nil } // for compatibility -func (p *TBackendResourceProfile) FastWrite(buf []byte) int { +func (p *TAssertNumRowsNode) FastWrite(buf []byte) int { return 0 } -func (p *TBackendResourceProfile) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAssertNumRowsNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TBackendResourceProfile") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAssertNumRowsNode") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) - offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TBackendResourceProfile) BLength() int { +func (p *TAssertNumRowsNode) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TBackendResourceProfile") + l += bthrift.Binary.StructBeginLength("TAssertNumRowsNode") if p != nil { l += p.field1Length() l += p.field2Length() @@ -25885,92 +31977,104 @@ func (p *TBackendResourceProfile) BLength() int { return l } -func (p *TBackendResourceProfile) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAssertNumRowsNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_reservation", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MinReservation) + if p.IsSetDesiredNumRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "desired_num_rows", thrift.I64, 1) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.DesiredNumRows) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TBackendResourceProfile) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAssertNumRowsNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_reservation", thrift.I64, 2) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxReservation) + if p.IsSetSubqueryString() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "subquery_string", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SubqueryString) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } return offset } -func (p *TBackendResourceProfile) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAssertNumRowsNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSpillableBufferSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "spillable_buffer_size", thrift.I64, 3) - offset += bthrift.Binary.WriteI64(buf[offset:], p.SpillableBufferSize) + if p.IsSetAssertion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "assertion", thrift.I32, 3) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Assertion)) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBackendResourceProfile) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TAssertNumRowsNode) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetMaxRowBufferSize() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_row_buffer_size", thrift.I64, 4) - offset += bthrift.Binary.WriteI64(buf[offset:], p.MaxRowBufferSize) + if p.IsSetShouldConvertOutputToNullable() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "should_convert_output_to_nullable", thrift.BOOL, 4) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ShouldConvertOutputToNullable) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) } return offset } -func (p *TBackendResourceProfile) field1Length() int { +func (p *TAssertNumRowsNode) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("min_reservation", thrift.I64, 1) - l += bthrift.Binary.I64Length(p.MinReservation) + if p.IsSetDesiredNumRows() { + l += bthrift.Binary.FieldBeginLength("desired_num_rows", thrift.I64, 1) + l += bthrift.Binary.I64Length(*p.DesiredNumRows) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TBackendResourceProfile) field2Length() int { +func (p *TAssertNumRowsNode) field2Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("max_reservation", thrift.I64, 2) - l += bthrift.Binary.I64Length(p.MaxReservation) + if p.IsSetSubqueryString() { + l += bthrift.Binary.FieldBeginLength("subquery_string", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.SubqueryString) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + } return l } -func (p *TBackendResourceProfile) field3Length() int { +func (p *TAssertNumRowsNode) field3Length() int { l := 0 - if p.IsSetSpillableBufferSize() { - l += bthrift.Binary.FieldBeginLength("spillable_buffer_size", thrift.I64, 3) - l += bthrift.Binary.I64Length(p.SpillableBufferSize) + if p.IsSetAssertion() { + l += bthrift.Binary.FieldBeginLength("assertion", thrift.I32, 3) + l += bthrift.Binary.I32Length(int32(*p.Assertion)) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TBackendResourceProfile) field4Length() int { +func (p *TAssertNumRowsNode) field4Length() int { l := 0 - if p.IsSetMaxRowBufferSize() { - l += bthrift.Binary.FieldBeginLength("max_row_buffer_size", thrift.I64, 4) - l += bthrift.Binary.I64Length(p.MaxRowBufferSize) + if p.IsSetShouldConvertOutputToNullable() { + l += bthrift.Binary.FieldBeginLength("should_convert_output_to_nullable", thrift.BOOL, 4) + l += bthrift.Binary.BoolLength(*p.ShouldConvertOutputToNullable) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TAssertNumRowsNode) FastRead(buf []byte) (int, error) { +func (p *TTopnFilterDesc) FastRead(buf []byte) (int, error) { var err error var offset int var l int var fieldTypeId thrift.TType var fieldId int16 + var issetSourceNodeId bool = false + var issetIsAsc bool = false + var issetNullFirst bool = false + var issetTargetNodeIdToTargetExpr bool = false _, l, err = bthrift.Binary.ReadStructBegin(buf) offset += l if err != nil { @@ -25988,12 +32092,13 @@ func (p *TAssertNumRowsNode) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.I64 { + if fieldTypeId == thrift.I32 { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetSourceNodeId = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26002,12 +32107,13 @@ func (p *TAssertNumRowsNode) FastRead(buf []byte) (int, error) { } } case 2: - if fieldTypeId == thrift.STRING { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField2(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetIsAsc = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26016,12 +32122,28 @@ func (p *TAssertNumRowsNode) FastRead(buf []byte) (int, error) { } } case 3: - if fieldTypeId == thrift.I32 { + if fieldTypeId == thrift.BOOL { l, err = p.FastReadField3(buf[offset:]) offset += l if err != nil { goto ReadFieldError } + issetNullFirst = true + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTargetNodeIdToTargetExpr = true } else { l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26049,156 +32171,238 @@ func (p *TAssertNumRowsNode) FastRead(buf []byte) (int, error) { goto ReadStructEndError } + if !issetSourceNodeId { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetIsAsc { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetNullFirst { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetTargetNodeIdToTargetExpr { + fieldId = 4 + goto RequiredFieldNotSetError + } return offset, nil ReadStructBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TAssertNumRowsNode[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TTopnFilterDesc[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) ReadFieldEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) ReadStructEndError: return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TTopnFilterDesc[fieldId])) } -func (p *TAssertNumRowsNode) FastReadField1(buf []byte) (int, error) { +func (p *TTopnFilterDesc) FastReadField1(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DesiredNumRows = &v + + p.SourceNodeId = v } return offset, nil } -func (p *TAssertNumRowsNode) FastReadField2(buf []byte) (int, error) { +func (p *TTopnFilterDesc) FastReadField2(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.SubqueryString = &v + + p.IsAsc = v } return offset, nil } -func (p *TAssertNumRowsNode) FastReadField3(buf []byte) (int, error) { +func (p *TTopnFilterDesc) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.NullFirst = v + + } + return offset, nil +} + +func (p *TTopnFilterDesc) FastReadField4(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TargetNodeIdToTargetExpr = make(map[types.TPlanNodeId]*exprs.TExpr, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + _val := exprs.NewTExpr() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TargetNodeIdToTargetExpr[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TAssertion(v) - p.Assertion = &tmp - } return offset, nil } // for compatibility -func (p *TAssertNumRowsNode) FastWrite(buf []byte) int { +func (p *TTopnFilterDesc) FastWrite(buf []byte) int { return 0 } -func (p *TAssertNumRowsNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTopnFilterDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TAssertNumRowsNode") + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TTopnFilterDesc") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) return offset } -func (p *TAssertNumRowsNode) BLength() int { +func (p *TTopnFilterDesc) BLength() int { l := 0 - l += bthrift.Binary.StructBeginLength("TAssertNumRowsNode") + l += bthrift.Binary.StructBeginLength("TTopnFilterDesc") if p != nil { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() return l } -func (p *TAssertNumRowsNode) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTopnFilterDesc) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetDesiredNumRows() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "desired_num_rows", thrift.I64, 1) - offset += bthrift.Binary.WriteI64(buf[offset:], *p.DesiredNumRows) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "source_node_id", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], p.SourceNodeId) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAssertNumRowsNode) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTopnFilterDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetSubqueryString() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "subquery_string", thrift.STRING, 2) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SubqueryString) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_asc", thrift.BOOL, 2) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsAsc) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) - } + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAssertNumRowsNode) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { +func (p *TTopnFilterDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - if p.IsSetAssertion() { - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "assertion", thrift.I32, 3) - offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Assertion)) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "null_first", thrift.BOOL, 3) + offset += bthrift.Binary.WriteBool(buf[offset:], p.NullFirst) - offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + return offset +} + +func (p *TTopnFilterDesc) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "target_node_id_to_target_expr", thrift.MAP, 4) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + var length int + for k, v := range p.TargetNodeIdToTargetExpr { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } -func (p *TAssertNumRowsNode) field1Length() int { +func (p *TTopnFilterDesc) field1Length() int { l := 0 - if p.IsSetDesiredNumRows() { - l += bthrift.Binary.FieldBeginLength("desired_num_rows", thrift.I64, 1) - l += bthrift.Binary.I64Length(*p.DesiredNumRows) + l += bthrift.Binary.FieldBeginLength("source_node_id", thrift.I32, 1) + l += bthrift.Binary.I32Length(p.SourceNodeId) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TAssertNumRowsNode) field2Length() int { +func (p *TTopnFilterDesc) field2Length() int { l := 0 - if p.IsSetSubqueryString() { - l += bthrift.Binary.FieldBeginLength("subquery_string", thrift.STRING, 2) - l += bthrift.Binary.StringLengthNocopy(*p.SubqueryString) + l += bthrift.Binary.FieldBeginLength("is_asc", thrift.BOOL, 2) + l += bthrift.Binary.BoolLength(p.IsAsc) - l += bthrift.Binary.FieldEndLength() - } + l += bthrift.Binary.FieldEndLength() return l } -func (p *TAssertNumRowsNode) field3Length() int { +func (p *TTopnFilterDesc) field3Length() int { l := 0 - if p.IsSetAssertion() { - l += bthrift.Binary.FieldBeginLength("assertion", thrift.I32, 3) - l += bthrift.Binary.I32Length(int32(*p.Assertion)) + l += bthrift.Binary.FieldBeginLength("null_first", thrift.BOOL, 3) + l += bthrift.Binary.BoolLength(p.NullFirst) - l += bthrift.Binary.FieldEndLength() + l += bthrift.Binary.FieldEndLength() + return l +} + +func (p *TTopnFilterDesc) field4Length() int { + l := 0 + l += bthrift.Binary.FieldBeginLength("target_node_id_to_target_expr", thrift.MAP, 4) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.TargetNodeIdToTargetExpr)) + for k, v := range p.TargetNodeIdToTargetExpr { + + l += bthrift.Binary.I32Length(k) + + l += v.BLength() } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() return l } @@ -26408,6 +32612,62 @@ func (p *TRuntimeFilterDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 13: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -26669,6 +32929,60 @@ func (p *TRuntimeFilterDesc) FastReadField12(buf []byte) (int, error) { return offset, nil } +func (p *TRuntimeFilterDesc) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TMinMaxRuntimeFilterType(v) + p.MinMaxType = &tmp + + } + return offset, nil +} + +func (p *TRuntimeFilterDesc) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BloomFilterSizeCalculatedByNdv = &v + + } + return offset, nil +} + +func (p *TRuntimeFilterDesc) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NullAware = &v + + } + return offset, nil +} + +func (p *TRuntimeFilterDesc) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SyncFilterSize = &v + + } + return offset, nil +} + // for compatibility func (p *TRuntimeFilterDesc) FastWrite(buf []byte) int { return 0 @@ -26686,10 +33000,14 @@ func (p *TRuntimeFilterDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bi offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -26712,6 +33030,10 @@ func (p *TRuntimeFilterDesc) BLength() int { l += p.field10Length() l += p.field11Length() l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -26842,6 +33164,50 @@ func (p *TRuntimeFilterDesc) fastWriteField12(buf []byte, binaryWriter bthrift.B return offset } +func (p *TRuntimeFilterDesc) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMinMaxType() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_max_type", thrift.I32, 13) + offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MinMaxType)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRuntimeFilterDesc) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBloomFilterSizeCalculatedByNdv() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "bloom_filter_size_calculated_by_ndv", thrift.BOOL, 14) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.BloomFilterSizeCalculatedByNdv) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRuntimeFilterDesc) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNullAware() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "null_aware", thrift.BOOL, 15) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.NullAware) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TRuntimeFilterDesc) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSyncFilterSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sync_filter_size", thrift.BOOL, 16) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.SyncFilterSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TRuntimeFilterDesc) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("filter_id", thrift.I32, 1) @@ -26940,22 +33306,66 @@ func (p *TRuntimeFilterDesc) field10Length() int { return l } -func (p *TRuntimeFilterDesc) field11Length() int { +func (p *TRuntimeFilterDesc) field11Length() int { + l := 0 + if p.IsSetBitmapFilterNotIn() { + l += bthrift.Binary.FieldBeginLength("bitmap_filter_not_in", thrift.BOOL, 11) + l += bthrift.Binary.BoolLength(*p.BitmapFilterNotIn) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRuntimeFilterDesc) field12Length() int { + l := 0 + if p.IsSetOptRemoteRf() { + l += bthrift.Binary.FieldBeginLength("opt_remote_rf", thrift.BOOL, 12) + l += bthrift.Binary.BoolLength(*p.OptRemoteRf) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRuntimeFilterDesc) field13Length() int { + l := 0 + if p.IsSetMinMaxType() { + l += bthrift.Binary.FieldBeginLength("min_max_type", thrift.I32, 13) + l += bthrift.Binary.I32Length(int32(*p.MinMaxType)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRuntimeFilterDesc) field14Length() int { + l := 0 + if p.IsSetBloomFilterSizeCalculatedByNdv() { + l += bthrift.Binary.FieldBeginLength("bloom_filter_size_calculated_by_ndv", thrift.BOOL, 14) + l += bthrift.Binary.BoolLength(*p.BloomFilterSizeCalculatedByNdv) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TRuntimeFilterDesc) field15Length() int { l := 0 - if p.IsSetBitmapFilterNotIn() { - l += bthrift.Binary.FieldBeginLength("bitmap_filter_not_in", thrift.BOOL, 11) - l += bthrift.Binary.BoolLength(*p.BitmapFilterNotIn) + if p.IsSetNullAware() { + l += bthrift.Binary.FieldBeginLength("null_aware", thrift.BOOL, 15) + l += bthrift.Binary.BoolLength(*p.NullAware) l += bthrift.Binary.FieldEndLength() } return l } -func (p *TRuntimeFilterDesc) field12Length() int { +func (p *TRuntimeFilterDesc) field16Length() int { l := 0 - if p.IsSetOptRemoteRf() { - l += bthrift.Binary.FieldBeginLength("opt_remote_rf", thrift.BOOL, 12) - l += bthrift.Binary.BoolLength(*p.OptRemoteRf) + if p.IsSetSyncFilterSize() { + l += bthrift.Binary.FieldBeginLength("sync_filter_size", thrift.BOOL, 16) + l += bthrift.Binary.BoolLength(*p.SyncFilterSize) l += bthrift.Binary.FieldEndLength() } @@ -27919,6 +34329,34 @@ func (p *TPlanNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 50: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField50(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 51: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField51(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 101: if fieldTypeId == thrift.LIST { l, err = p.FastReadField101(buf[offset:]) @@ -27961,6 +34399,62 @@ func (p *TPlanNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 104: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField104(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 105: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField105(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 106: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField106(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 107: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField107(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -28531,30 +35025,208 @@ func (p *TPlanNode) FastReadField37(buf []byte) (int, error) { func (p *TPlanNode) FastReadField40(buf []byte) (int, error) { offset := 0 - tmp := exprs.NewTExpr() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + tmp := exprs.NewTExpr() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Vconjunct = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField41(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTableFunctionNode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TableFunctionNode = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField42(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.OutputSlotIds = make([]types.TSlotId, 0, size) + for i := 0; i < size; i++ { + var _elem types.TSlotId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.OutputSlotIds = append(p.OutputSlotIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPlanNode) FastReadField43(buf []byte) (int, error) { + offset := 0 + + tmp := NewTDataGenScanNode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.DataGenScanNode = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField44(buf []byte) (int, error) { + offset := 0 + + tmp := NewTFileScanNode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.FileScanNode = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField45(buf []byte) (int, error) { + offset := 0 + + tmp := NewTJdbcScanNode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.JdbcScanNode = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField46(buf []byte) (int, error) { + offset := 0 + + tmp := NewTNestedLoopJoinNode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.NestedLoopJoinNode = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField47(buf []byte) (int, error) { + offset := 0 + + tmp := NewTTestExternalScanNode() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TestExternalScanNode = tmp + return offset, nil +} + +func (p *TPlanNode) FastReadField48(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TPushAggOp(v) + p.PushDownAggTypeOpt = &tmp + + } + return offset, nil +} + +func (p *TPlanNode) FastReadField49(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.PushDownCount = &v + + } + return offset, nil +} + +func (p *TPlanNode) FastReadField50(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DistributeExprLists = make([][]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _elem := make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem1 := exprs.NewTExpr() + if l, err := _elem1.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.DistributeExprLists = append(p.DistributeExprLists, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.Vconjunct = tmp return offset, nil } -func (p *TPlanNode) FastReadField41(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField51(buf []byte) (int, error) { offset := 0 - tmp := NewTTableFunctionNode() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l + p.IsSerialOperator = &v + } - p.TableFunctionNode = tmp return offset, nil } -func (p *TPlanNode) FastReadField42(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField101(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -28562,19 +35234,16 @@ func (p *TPlanNode) FastReadField42(buf []byte) (int, error) { if err != nil { return offset, err } - p.OutputSlotIds = make([]types.TSlotId, 0, size) + p.Projections = make([]*exprs.TExpr, 0, size) for i := 0; i < size; i++ { - var _elem types.TSlotId - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l - - _elem = v - } - p.OutputSlotIds = append(p.OutputSlotIds, _elem) + p.Projections = append(p.Projections, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -28584,100 +35253,105 @@ func (p *TPlanNode) FastReadField42(buf []byte) (int, error) { return offset, nil } -func (p *TPlanNode) FastReadField43(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField102(buf []byte) (int, error) { offset := 0 - tmp := NewTDataGenScanNode() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l + p.OutputTupleId = &v + } - p.DataGenScanNode = tmp return offset, nil } -func (p *TPlanNode) FastReadField44(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField103(buf []byte) (int, error) { offset := 0 - tmp := NewTFileScanNode() + tmp := NewTPartitionSortNode() if l, err := tmp.FastRead(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.FileScanNode = tmp + p.PartitionSortNode = tmp return offset, nil } -func (p *TPlanNode) FastReadField45(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField104(buf []byte) (int, error) { offset := 0 - tmp := NewTJdbcScanNode() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l } - p.JdbcScanNode = tmp - return offset, nil -} + p.IntermediateProjectionsList = make([][]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _elem := make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem1 := exprs.NewTExpr() + if l, err := _elem1.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } -func (p *TPlanNode) FastReadField46(buf []byte) (int, error) { - offset := 0 + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } - tmp := NewTNestedLoopJoinNode() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + p.IntermediateProjectionsList = append(p.IntermediateProjectionsList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l } - p.NestedLoopJoinNode = tmp return offset, nil } -func (p *TPlanNode) FastReadField47(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField105(buf []byte) (int, error) { offset := 0 - tmp := NewTTestExternalScanNode() - if l, err := tmp.FastRead(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l } - p.TestExternalScanNode = tmp - return offset, nil -} - -func (p *TPlanNode) FastReadField48(buf []byte) (int, error) { - offset := 0 + p.IntermediateOutputTupleIdList = make([]types.TTupleId, 0, size) + for i := 0; i < size; i++ { + var _elem types.TTupleId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { - return offset, err - } else { - offset += l + _elem = v - tmp := TPushAggOp(v) - p.PushDownAggTypeOpt = &tmp + } + p.IntermediateOutputTupleIdList = append(p.IntermediateOutputTupleIdList, _elem) } - return offset, nil -} - -func (p *TPlanNode) FastReadField49(buf []byte) (int, error) { - offset := 0 - - if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - p.PushDownCount = &v - } return offset, nil } -func (p *TPlanNode) FastReadField101(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField106(buf []byte) (int, error) { offset := 0 _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) @@ -28685,16 +35359,19 @@ func (p *TPlanNode) FastReadField101(buf []byte) (int, error) { if err != nil { return offset, err } - p.Projections = make([]*exprs.TExpr, 0, size) + p.TopnFilterSourceNodeIds = make([]int32, 0, size) for i := 0; i < size; i++ { - _elem := exprs.NewTExpr() - if l, err := _elem.FastRead(buf[offset:]); err != nil { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l + + _elem = v + } - p.Projections = append(p.Projections, _elem) + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) } if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err @@ -28704,29 +35381,16 @@ func (p *TPlanNode) FastReadField101(buf []byte) (int, error) { return offset, nil } -func (p *TPlanNode) FastReadField102(buf []byte) (int, error) { +func (p *TPlanNode) FastReadField107(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.OutputTupleId = &v - - } - return offset, nil -} - -func (p *TPlanNode) FastReadField103(buf []byte) (int, error) { - offset := 0 + p.NereidsId = &v - tmp := NewTPartitionSortNode() - if l, err := tmp.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l } - p.PartitionSortNode = tmp return offset, nil } @@ -28744,7 +35408,9 @@ func (p *TPlanNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField49(buf[offset:], binaryWriter) + offset += p.fastWriteField51(buf[offset:], binaryWriter) offset += p.fastWriteField102(buf[offset:], binaryWriter) + offset += p.fastWriteField107(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) @@ -28783,8 +35449,12 @@ func (p *TPlanNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField46(buf[offset:], binaryWriter) offset += p.fastWriteField47(buf[offset:], binaryWriter) offset += p.fastWriteField48(buf[offset:], binaryWriter) + offset += p.fastWriteField50(buf[offset:], binaryWriter) offset += p.fastWriteField101(buf[offset:], binaryWriter) offset += p.fastWriteField103(buf[offset:], binaryWriter) + offset += p.fastWriteField104(buf[offset:], binaryWriter) + offset += p.fastWriteField105(buf[offset:], binaryWriter) + offset += p.fastWriteField106(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -28838,9 +35508,15 @@ func (p *TPlanNode) BLength() int { l += p.field47Length() l += p.field48Length() l += p.field49Length() + l += p.field50Length() + l += p.field51Length() l += p.field101Length() l += p.field102Length() l += p.field103Length() + l += p.field104Length() + l += p.field105Length() + l += p.field106Length() + l += p.field107Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -29313,6 +35989,43 @@ func (p *TPlanNode) fastWriteField49(buf []byte, binaryWriter bthrift.BinaryWrit return offset } +func (p *TPlanNode) fastWriteField50(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDistributeExprLists() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "distribute_expr_lists", thrift.LIST, 50) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) + var length int + for _, v := range p.DistributeExprLists { + length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlanNode) fastWriteField51(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsSerialOperator() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_serial_operator", thrift.BOOL, 51) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsSerialOperator) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPlanNode) fastWriteField101(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetProjections() { @@ -29352,6 +36065,81 @@ func (p *TPlanNode) fastWriteField103(buf []byte, binaryWriter bthrift.BinaryWri return offset } +func (p *TPlanNode) fastWriteField104(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIntermediateProjectionsList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "intermediate_projections_list", thrift.LIST, 104) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) + var length int + for _, v := range p.IntermediateProjectionsList { + length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range v { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlanNode) fastWriteField105(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIntermediateOutputTupleIdList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "intermediate_output_tuple_id_list", thrift.LIST, 105) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.IntermediateOutputTupleIdList { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlanNode) fastWriteField106(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 106) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPlanNode) fastWriteField107(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNereidsId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "nereids_id", thrift.I32, 107) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NereidsId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPlanNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("node_id", thrift.I32, 1) @@ -29792,6 +36580,35 @@ func (p *TPlanNode) field49Length() int { return l } +func (p *TPlanNode) field50Length() int { + l := 0 + if p.IsSetDistributeExprLists() { + l += bthrift.Binary.FieldBeginLength("distribute_expr_lists", thrift.LIST, 50) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.DistributeExprLists)) + for _, v := range p.DistributeExprLists { + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlanNode) field51Length() int { + l := 0 + if p.IsSetIsSerialOperator() { + l += bthrift.Binary.FieldBeginLength("is_serial_operator", thrift.BOOL, 51) + l += bthrift.Binary.BoolLength(*p.IsSerialOperator) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TPlanNode) field101Length() int { l := 0 if p.IsSetProjections() { @@ -29827,6 +36644,61 @@ func (p *TPlanNode) field103Length() int { return l } +func (p *TPlanNode) field104Length() int { + l := 0 + if p.IsSetIntermediateProjectionsList() { + l += bthrift.Binary.FieldBeginLength("intermediate_projections_list", thrift.LIST, 104) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.IntermediateProjectionsList)) + for _, v := range p.IntermediateProjectionsList { + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v)) + for _, v := range v { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlanNode) field105Length() int { + l := 0 + if p.IsSetIntermediateOutputTupleIdList() { + l += bthrift.Binary.FieldBeginLength("intermediate_output_tuple_id_list", thrift.LIST, 105) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.IntermediateOutputTupleIdList)) + var tmpV types.TTupleId + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.IntermediateOutputTupleIdList) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlanNode) field106Length() int { + l := 0 + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 106) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPlanNode) field107Length() int { + l := 0 + if p.IsSetNereidsId() { + l += bthrift.Binary.FieldBeginLength("nereids_id", thrift.I32, 107) + l += bthrift.Binary.I32Length(*p.NereidsId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TPlan) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/querycache/QueryCache.go b/pkg/rpc/kitex_gen/querycache/QueryCache.go new file mode 100644 index 00000000..f24266c6 --- /dev/null +++ b/pkg/rpc/kitex_gen/querycache/QueryCache.go @@ -0,0 +1,694 @@ +// Code generated by thriftgo (0.3.13). DO NOT EDIT. + +package querycache + +import ( + "bytes" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "strings" +) + +type TQueryCacheParam struct { + NodeId *int32 `thrift:"node_id,1,optional" frugal:"1,optional,i32" json:"node_id,omitempty"` + Digest []byte `thrift:"digest,2,optional" frugal:"2,optional,binary" json:"digest,omitempty"` + OutputSlotMapping map[int32]int32 `thrift:"output_slot_mapping,3,optional" frugal:"3,optional,map" json:"output_slot_mapping,omitempty"` + TabletToRange map[int64]string `thrift:"tablet_to_range,4,optional" frugal:"4,optional,map" json:"tablet_to_range,omitempty"` + ForceRefreshQueryCache *bool `thrift:"force_refresh_query_cache,5,optional" frugal:"5,optional,bool" json:"force_refresh_query_cache,omitempty"` + EntryMaxBytes *int64 `thrift:"entry_max_bytes,6,optional" frugal:"6,optional,i64" json:"entry_max_bytes,omitempty"` + EntryMaxRows *int64 `thrift:"entry_max_rows,7,optional" frugal:"7,optional,i64" json:"entry_max_rows,omitempty"` +} + +func NewTQueryCacheParam() *TQueryCacheParam { + return &TQueryCacheParam{} +} + +func (p *TQueryCacheParam) InitDefault() { +} + +var TQueryCacheParam_NodeId_DEFAULT int32 + +func (p *TQueryCacheParam) GetNodeId() (v int32) { + if !p.IsSetNodeId() { + return TQueryCacheParam_NodeId_DEFAULT + } + return *p.NodeId +} + +var TQueryCacheParam_Digest_DEFAULT []byte + +func (p *TQueryCacheParam) GetDigest() (v []byte) { + if !p.IsSetDigest() { + return TQueryCacheParam_Digest_DEFAULT + } + return p.Digest +} + +var TQueryCacheParam_OutputSlotMapping_DEFAULT map[int32]int32 + +func (p *TQueryCacheParam) GetOutputSlotMapping() (v map[int32]int32) { + if !p.IsSetOutputSlotMapping() { + return TQueryCacheParam_OutputSlotMapping_DEFAULT + } + return p.OutputSlotMapping +} + +var TQueryCacheParam_TabletToRange_DEFAULT map[int64]string + +func (p *TQueryCacheParam) GetTabletToRange() (v map[int64]string) { + if !p.IsSetTabletToRange() { + return TQueryCacheParam_TabletToRange_DEFAULT + } + return p.TabletToRange +} + +var TQueryCacheParam_ForceRefreshQueryCache_DEFAULT bool + +func (p *TQueryCacheParam) GetForceRefreshQueryCache() (v bool) { + if !p.IsSetForceRefreshQueryCache() { + return TQueryCacheParam_ForceRefreshQueryCache_DEFAULT + } + return *p.ForceRefreshQueryCache +} + +var TQueryCacheParam_EntryMaxBytes_DEFAULT int64 + +func (p *TQueryCacheParam) GetEntryMaxBytes() (v int64) { + if !p.IsSetEntryMaxBytes() { + return TQueryCacheParam_EntryMaxBytes_DEFAULT + } + return *p.EntryMaxBytes +} + +var TQueryCacheParam_EntryMaxRows_DEFAULT int64 + +func (p *TQueryCacheParam) GetEntryMaxRows() (v int64) { + if !p.IsSetEntryMaxRows() { + return TQueryCacheParam_EntryMaxRows_DEFAULT + } + return *p.EntryMaxRows +} +func (p *TQueryCacheParam) SetNodeId(val *int32) { + p.NodeId = val +} +func (p *TQueryCacheParam) SetDigest(val []byte) { + p.Digest = val +} +func (p *TQueryCacheParam) SetOutputSlotMapping(val map[int32]int32) { + p.OutputSlotMapping = val +} +func (p *TQueryCacheParam) SetTabletToRange(val map[int64]string) { + p.TabletToRange = val +} +func (p *TQueryCacheParam) SetForceRefreshQueryCache(val *bool) { + p.ForceRefreshQueryCache = val +} +func (p *TQueryCacheParam) SetEntryMaxBytes(val *int64) { + p.EntryMaxBytes = val +} +func (p *TQueryCacheParam) SetEntryMaxRows(val *int64) { + p.EntryMaxRows = val +} + +var fieldIDToName_TQueryCacheParam = map[int16]string{ + 1: "node_id", + 2: "digest", + 3: "output_slot_mapping", + 4: "tablet_to_range", + 5: "force_refresh_query_cache", + 6: "entry_max_bytes", + 7: "entry_max_rows", +} + +func (p *TQueryCacheParam) IsSetNodeId() bool { + return p.NodeId != nil +} + +func (p *TQueryCacheParam) IsSetDigest() bool { + return p.Digest != nil +} + +func (p *TQueryCacheParam) IsSetOutputSlotMapping() bool { + return p.OutputSlotMapping != nil +} + +func (p *TQueryCacheParam) IsSetTabletToRange() bool { + return p.TabletToRange != nil +} + +func (p *TQueryCacheParam) IsSetForceRefreshQueryCache() bool { + return p.ForceRefreshQueryCache != nil +} + +func (p *TQueryCacheParam) IsSetEntryMaxBytes() bool { + return p.EntryMaxBytes != nil +} + +func (p *TQueryCacheParam) IsSetEntryMaxRows() bool { + return p.EntryMaxRows != nil +} + +func (p *TQueryCacheParam) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.MAP { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryCacheParam[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryCacheParam) ReadField1(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.NodeId = _field + return nil +} +func (p *TQueryCacheParam) ReadField2(iprot thrift.TProtocol) error { + + var _field []byte + if v, err := iprot.ReadBinary(); err != nil { + return err + } else { + _field = []byte(v) + } + p.Digest = _field + return nil +} +func (p *TQueryCacheParam) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + var _val int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.OutputSlotMapping = _field + return nil +} +func (p *TQueryCacheParam) ReadField4(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[int64]string, size) + for i := 0; i < size; i++ { + var _key int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _key = v + } + + var _val string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _val = v + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.TabletToRange = _field + return nil +} +func (p *TQueryCacheParam) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ForceRefreshQueryCache = _field + return nil +} +func (p *TQueryCacheParam) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.EntryMaxBytes = _field + return nil +} +func (p *TQueryCacheParam) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.EntryMaxRows = _field + return nil +} + +func (p *TQueryCacheParam) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TQueryCacheParam"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetNodeId() { + if err = oprot.WriteFieldBegin("node_id", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.NodeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDigest() { + if err = oprot.WriteFieldBegin("digest", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBinary([]byte(p.Digest)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetOutputSlotMapping() { + if err = oprot.WriteFieldBegin("output_slot_mapping", thrift.MAP, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.OutputSlotMapping)); err != nil { + return err + } + for k, v := range p.OutputSlotMapping { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletToRange() { + if err = oprot.WriteFieldBegin("tablet_to_range", thrift.MAP, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I64, thrift.STRING, len(p.TabletToRange)); err != nil { + return err + } + for k, v := range p.TabletToRange { + if err := oprot.WriteI64(k); err != nil { + return err + } + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetForceRefreshQueryCache() { + if err = oprot.WriteFieldBegin("force_refresh_query_cache", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.ForceRefreshQueryCache); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetEntryMaxBytes() { + if err = oprot.WriteFieldBegin("entry_max_bytes", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.EntryMaxBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TQueryCacheParam) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetEntryMaxRows() { + if err = oprot.WriteFieldBegin("entry_max_rows", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.EntryMaxRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TQueryCacheParam) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TQueryCacheParam(%+v)", *p) + +} + +func (p *TQueryCacheParam) DeepEqual(ano *TQueryCacheParam) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.NodeId) { + return false + } + if !p.Field2DeepEqual(ano.Digest) { + return false + } + if !p.Field3DeepEqual(ano.OutputSlotMapping) { + return false + } + if !p.Field4DeepEqual(ano.TabletToRange) { + return false + } + if !p.Field5DeepEqual(ano.ForceRefreshQueryCache) { + return false + } + if !p.Field6DeepEqual(ano.EntryMaxBytes) { + return false + } + if !p.Field7DeepEqual(ano.EntryMaxRows) { + return false + } + return true +} + +func (p *TQueryCacheParam) Field1DeepEqual(src *int32) bool { + + if p.NodeId == src { + return true + } else if p.NodeId == nil || src == nil { + return false + } + if *p.NodeId != *src { + return false + } + return true +} +func (p *TQueryCacheParam) Field2DeepEqual(src []byte) bool { + + if bytes.Compare(p.Digest, src) != 0 { + return false + } + return true +} +func (p *TQueryCacheParam) Field3DeepEqual(src map[int32]int32) bool { + + if len(p.OutputSlotMapping) != len(src) { + return false + } + for k, v := range p.OutputSlotMapping { + _src := src[k] + if v != _src { + return false + } + } + return true +} +func (p *TQueryCacheParam) Field4DeepEqual(src map[int64]string) bool { + + if len(p.TabletToRange) != len(src) { + return false + } + for k, v := range p.TabletToRange { + _src := src[k] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TQueryCacheParam) Field5DeepEqual(src *bool) bool { + + if p.ForceRefreshQueryCache == src { + return true + } else if p.ForceRefreshQueryCache == nil || src == nil { + return false + } + if *p.ForceRefreshQueryCache != *src { + return false + } + return true +} +func (p *TQueryCacheParam) Field6DeepEqual(src *int64) bool { + + if p.EntryMaxBytes == src { + return true + } else if p.EntryMaxBytes == nil || src == nil { + return false + } + if *p.EntryMaxBytes != *src { + return false + } + return true +} +func (p *TQueryCacheParam) Field7DeepEqual(src *int64) bool { + + if p.EntryMaxRows == src { + return true + } else if p.EntryMaxRows == nil || src == nil { + return false + } + if *p.EntryMaxRows != *src { + return false + } + return true +} diff --git a/pkg/rpc/kitex_gen/querycache/k-QueryCache.go b/pkg/rpc/kitex_gen/querycache/k-QueryCache.go new file mode 100644 index 00000000..0dc6405a --- /dev/null +++ b/pkg/rpc/kitex_gen/querycache/k-QueryCache.go @@ -0,0 +1,550 @@ +// Code generated by Kitex v0.8.0. DO NOT EDIT. + +package querycache + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + "github.com/apache/thrift/lib/go/thrift" + + "github.com/cloudwego/kitex/pkg/protocol/bthrift" +) + +// unused protection +var ( + _ = fmt.Formatter(nil) + _ = (*bytes.Buffer)(nil) + _ = (*strings.Builder)(nil) + _ = reflect.Type(nil) + _ = thrift.TProtocol(nil) + _ = bthrift.BinaryWriter(nil) +) + +func (p *TQueryCacheParam) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryCacheParam[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TQueryCacheParam) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.NodeId = &v + + } + return offset, nil +} + +func (p *TQueryCacheParam) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBinary(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.Digest = []byte(v) + + } + return offset, nil +} + +func (p *TQueryCacheParam) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.OutputSlotMapping = make(map[int32]int32, size) + for i := 0; i < size; i++ { + var _key int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.OutputSlotMapping[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TQueryCacheParam) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletToRange = make(map[int64]string, size) + for i := 0; i < size; i++ { + var _key int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + + var _val string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _val = v + + } + + p.TabletToRange[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TQueryCacheParam) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ForceRefreshQueryCache = &v + + } + return offset, nil +} + +func (p *TQueryCacheParam) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EntryMaxBytes = &v + + } + return offset, nil +} + +func (p *TQueryCacheParam) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.EntryMaxRows = &v + + } + return offset, nil +} + +// for compatibility +func (p *TQueryCacheParam) FastWrite(buf []byte) int { + return 0 +} + +func (p *TQueryCacheParam) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryCacheParam") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TQueryCacheParam) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TQueryCacheParam") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TQueryCacheParam) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetNodeId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "node_id", thrift.I32, 1) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.NodeId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDigest() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "digest", thrift.STRING, 2) + offset += bthrift.Binary.WriteBinaryNocopy(buf[offset:], binaryWriter, []byte(p.Digest)) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetOutputSlotMapping() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "output_slot_mapping", thrift.MAP, 3) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, 0) + var length int + for k, v := range p.OutputSlotMapping { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.I32, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletToRange() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_to_range", thrift.MAP, 4) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.STRING, 0) + var length int + for k, v := range p.TabletToRange { + length++ + + offset += bthrift.Binary.WriteI64(buf[offset:], k) + + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) + + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.STRING, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetForceRefreshQueryCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "force_refresh_query_cache", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ForceRefreshQueryCache) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEntryMaxBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "entry_max_bytes", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.EntryMaxBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEntryMaxRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "entry_max_rows", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.EntryMaxRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryCacheParam) field1Length() int { + l := 0 + if p.IsSetNodeId() { + l += bthrift.Binary.FieldBeginLength("node_id", thrift.I32, 1) + l += bthrift.Binary.I32Length(*p.NodeId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryCacheParam) field2Length() int { + l := 0 + if p.IsSetDigest() { + l += bthrift.Binary.FieldBeginLength("digest", thrift.STRING, 2) + l += bthrift.Binary.BinaryLengthNocopy([]byte(p.Digest)) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryCacheParam) field3Length() int { + l := 0 + if p.IsSetOutputSlotMapping() { + l += bthrift.Binary.FieldBeginLength("output_slot_mapping", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.I32, len(p.OutputSlotMapping)) + var tmpK int32 + var tmpV int32 + l += (bthrift.Binary.I32Length(int32(tmpK)) + bthrift.Binary.I32Length(int32(tmpV))) * len(p.OutputSlotMapping) + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryCacheParam) field4Length() int { + l := 0 + if p.IsSetTabletToRange() { + l += bthrift.Binary.FieldBeginLength("tablet_to_range", thrift.MAP, 4) + l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.STRING, len(p.TabletToRange)) + for k, v := range p.TabletToRange { + + l += bthrift.Binary.I64Length(k) + + l += bthrift.Binary.StringLengthNocopy(v) + + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryCacheParam) field5Length() int { + l := 0 + if p.IsSetForceRefreshQueryCache() { + l += bthrift.Binary.FieldBeginLength("force_refresh_query_cache", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.ForceRefreshQueryCache) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryCacheParam) field6Length() int { + l := 0 + if p.IsSetEntryMaxBytes() { + l += bthrift.Binary.FieldBeginLength("entry_max_bytes", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.EntryMaxBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryCacheParam) field7Length() int { + l := 0 + if p.IsSetEntryMaxRows() { + l += bthrift.Binary.FieldBeginLength("entry_max_rows", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.EntryMaxRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} diff --git a/pkg/rpc/kitex_gen/querycache/k-consts.go b/pkg/rpc/kitex_gen/querycache/k-consts.go new file mode 100644 index 00000000..6ac990f6 --- /dev/null +++ b/pkg/rpc/kitex_gen/querycache/k-consts.go @@ -0,0 +1,4 @@ +package querycache + +// KitexUnusedProtection is used to prevent 'imported and not used' error. +var KitexUnusedProtection = struct{}{} diff --git a/pkg/rpc/kitex_gen/runtimeprofile/RuntimeProfile.go b/pkg/rpc/kitex_gen/runtimeprofile/RuntimeProfile.go index d21d4f8a..1717891f 100644 --- a/pkg/rpc/kitex_gen/runtimeprofile/RuntimeProfile.go +++ b/pkg/rpc/kitex_gen/runtimeprofile/RuntimeProfile.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package runtimeprofile @@ -13,6 +13,7 @@ type TCounter struct { Name string `thrift:"name,1,required" frugal:"1,required,string" json:"name"` Type metrics.TUnit `thrift:"type,2,required" frugal:"2,required,TUnit" json:"type"` Value int64 `thrift:"value,3,required" frugal:"3,required,i64" json:"value"` + Level *int64 `thrift:"level,4,optional" frugal:"4,optional,i64" json:"level,omitempty"` } func NewTCounter() *TCounter { @@ -20,7 +21,6 @@ func NewTCounter() *TCounter { } func (p *TCounter) InitDefault() { - *p = TCounter{} } func (p *TCounter) GetName() (v string) { @@ -34,6 +34,15 @@ func (p *TCounter) GetType() (v metrics.TUnit) { func (p *TCounter) GetValue() (v int64) { return p.Value } + +var TCounter_Level_DEFAULT int64 + +func (p *TCounter) GetLevel() (v int64) { + if !p.IsSetLevel() { + return TCounter_Level_DEFAULT + } + return *p.Level +} func (p *TCounter) SetName(val string) { p.Name = val } @@ -43,11 +52,19 @@ func (p *TCounter) SetType(val metrics.TUnit) { func (p *TCounter) SetValue(val int64) { p.Value = val } +func (p *TCounter) SetLevel(val *int64) { + p.Level = val +} var fieldIDToName_TCounter = map[int16]string{ 1: "name", 2: "type", 3: "value", + 4: "level", +} + +func (p *TCounter) IsSetLevel() bool { + return p.Level != nil } func (p *TCounter) Read(iprot thrift.TProtocol) (err error) { @@ -78,10 +95,8 @@ func (p *TCounter) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -89,10 +104,8 @@ func (p *TCounter) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -100,17 +113,22 @@ func (p *TCounter) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetValue = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -152,29 +170,47 @@ RequiredFieldNotSetError: } func (p *TCounter) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } - func (p *TCounter) ReadField2(iprot thrift.TProtocol) error { + + var _field metrics.TUnit if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = metrics.TUnit(v) + _field = metrics.TUnit(v) } + p.Type = _field return nil } - func (p *TCounter) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.Value = _field + return nil +} +func (p *TCounter) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Value = v + _field = &v } + p.Level = _field return nil } @@ -196,7 +232,10 @@ func (p *TCounter) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -266,11 +305,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TCounter) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetLevel() { + if err = oprot.WriteFieldBegin("level", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Level); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + func (p *TCounter) String() string { if p == nil { return "" } return fmt.Sprintf("TCounter(%+v)", *p) + } func (p *TCounter) DeepEqual(ano *TCounter) bool { @@ -288,6 +347,9 @@ func (p *TCounter) DeepEqual(ano *TCounter) bool { if !p.Field3DeepEqual(ano.Value) { return false } + if !p.Field4DeepEqual(ano.Level) { + return false + } return true } @@ -312,6 +374,18 @@ func (p *TCounter) Field3DeepEqual(src int64) bool { } return true } +func (p *TCounter) Field4DeepEqual(src *int64) bool { + + if p.Level == src { + return true + } else if p.Level == nil || src == nil { + return false + } + if *p.Level != *src { + return false + } + return true +} type TRuntimeProfileNode struct { Name string `thrift:"name,1,required" frugal:"1,required,string" json:"name"` @@ -323,6 +397,7 @@ type TRuntimeProfileNode struct { InfoStringsDisplayOrder []string `thrift:"info_strings_display_order,7,required" frugal:"7,required,list" json:"info_strings_display_order"` ChildCountersMap map[string][]string `thrift:"child_counters_map,8,required" frugal:"8,required,map>" json:"child_counters_map"` Timestamp int64 `thrift:"timestamp,9,required" frugal:"9,required,i64" json:"timestamp"` + IsSink *bool `thrift:"is_sink,10,optional" frugal:"10,optional,bool" json:"is_sink,omitempty"` } func NewTRuntimeProfileNode() *TRuntimeProfileNode { @@ -330,7 +405,6 @@ func NewTRuntimeProfileNode() *TRuntimeProfileNode { } func (p *TRuntimeProfileNode) InitDefault() { - *p = TRuntimeProfileNode{} } func (p *TRuntimeProfileNode) GetName() (v string) { @@ -368,6 +442,15 @@ func (p *TRuntimeProfileNode) GetChildCountersMap() (v map[string][]string) { func (p *TRuntimeProfileNode) GetTimestamp() (v int64) { return p.Timestamp } + +var TRuntimeProfileNode_IsSink_DEFAULT bool + +func (p *TRuntimeProfileNode) GetIsSink() (v bool) { + if !p.IsSetIsSink() { + return TRuntimeProfileNode_IsSink_DEFAULT + } + return *p.IsSink +} func (p *TRuntimeProfileNode) SetName(val string) { p.Name = val } @@ -395,17 +478,25 @@ func (p *TRuntimeProfileNode) SetChildCountersMap(val map[string][]string) { func (p *TRuntimeProfileNode) SetTimestamp(val int64) { p.Timestamp = val } +func (p *TRuntimeProfileNode) SetIsSink(val *bool) { + p.IsSink = val +} var fieldIDToName_TRuntimeProfileNode = map[int16]string{ - 1: "name", - 2: "num_children", - 3: "counters", - 4: "metadata", - 5: "indent", - 6: "info_strings", - 7: "info_strings_display_order", - 8: "child_counters_map", - 9: "timestamp", + 1: "name", + 2: "num_children", + 3: "counters", + 4: "metadata", + 5: "indent", + 6: "info_strings", + 7: "info_strings_display_order", + 8: "child_counters_map", + 9: "timestamp", + 10: "is_sink", +} + +func (p *TRuntimeProfileNode) IsSetIsSink() bool { + return p.IsSink != nil } func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { @@ -442,10 +533,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -453,10 +542,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNumChildren = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -464,10 +551,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCounters = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -475,10 +560,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetMetadata = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { @@ -486,10 +569,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIndent = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.MAP { @@ -497,10 +578,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetInfoStrings = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.LIST { @@ -508,10 +587,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetInfoStringsDisplayOrder = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.MAP { @@ -519,10 +596,8 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetChildCountersMap = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { @@ -530,17 +605,22 @@ func (p *TRuntimeProfileNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTimestamp = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -612,67 +692,78 @@ RequiredFieldNotSetError: } func (p *TRuntimeProfileNode) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } - func (p *TRuntimeProfileNode) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.NumChildren = v + _field = v } + p.NumChildren = _field return nil } - func (p *TRuntimeProfileNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Counters = make([]*TCounter, 0, size) + _field := make([]*TCounter, 0, size) + values := make([]TCounter, size) for i := 0; i < size; i++ { - _elem := NewTCounter() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Counters = append(p.Counters, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Counters = _field return nil } - func (p *TRuntimeProfileNode) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Metadata = v + _field = v } + p.Metadata = _field return nil } - func (p *TRuntimeProfileNode) ReadField5(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Indent = v + _field = v } + p.Indent = _field return nil } - func (p *TRuntimeProfileNode) ReadField6(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.InfoStrings = make(map[string]string, size) + _field := make(map[string]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -688,21 +779,22 @@ func (p *TRuntimeProfileNode) ReadField6(iprot thrift.TProtocol) error { _val = v } - p.InfoStrings[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.InfoStrings = _field return nil } - func (p *TRuntimeProfileNode) ReadField7(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.InfoStringsDisplayOrder = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -710,20 +802,20 @@ func (p *TRuntimeProfileNode) ReadField7(iprot thrift.TProtocol) error { _elem = v } - p.InfoStringsDisplayOrder = append(p.InfoStringsDisplayOrder, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.InfoStringsDisplayOrder = _field return nil } - func (p *TRuntimeProfileNode) ReadField8(iprot thrift.TProtocol) error { _, _, size, err := iprot.ReadMapBegin() if err != nil { return err } - p.ChildCountersMap = make(map[string][]string, size) + _field := make(map[string][]string, size) for i := 0; i < size; i++ { var _key string if v, err := iprot.ReadString(); err != nil { @@ -731,13 +823,13 @@ func (p *TRuntimeProfileNode) ReadField8(iprot thrift.TProtocol) error { } else { _key = v } - _, size, err := iprot.ReadSetBegin() if err != nil { return err } _val := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -751,20 +843,34 @@ func (p *TRuntimeProfileNode) ReadField8(iprot thrift.TProtocol) error { return err } - p.ChildCountersMap[_key] = _val + _field[_key] = _val } if err := iprot.ReadMapEnd(); err != nil { return err } + p.ChildCountersMap = _field return nil } - func (p *TRuntimeProfileNode) ReadField9(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Timestamp = v + _field = v + } + p.Timestamp = _field + return nil +} +func (p *TRuntimeProfileNode) ReadField10(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v } + p.IsSink = _field return nil } @@ -810,7 +916,10 @@ func (p *TRuntimeProfileNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } - + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -930,11 +1039,9 @@ func (p *TRuntimeProfileNode) writeField6(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.InfoStrings { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteString(v); err != nil { return err } @@ -985,11 +1092,9 @@ func (p *TRuntimeProfileNode) writeField8(oprot thrift.TProtocol) (err error) { return err } for k, v := range p.ChildCountersMap { - if err := oprot.WriteString(k); err != nil { return err } - if err := oprot.WriteSetBegin(thrift.STRING, len(v)); err != nil { return err } @@ -1044,11 +1149,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } +func (p *TRuntimeProfileNode) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetIsSink() { + if err = oprot.WriteFieldBegin("is_sink", thrift.BOOL, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsSink); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + func (p *TRuntimeProfileNode) String() string { if p == nil { return "" } return fmt.Sprintf("TRuntimeProfileNode(%+v)", *p) + } func (p *TRuntimeProfileNode) DeepEqual(ano *TRuntimeProfileNode) bool { @@ -1084,6 +1209,9 @@ func (p *TRuntimeProfileNode) DeepEqual(ano *TRuntimeProfileNode) bool { if !p.Field9DeepEqual(ano.Timestamp) { return false } + if !p.Field10DeepEqual(ano.IsSink) { + return false + } return true } @@ -1180,6 +1308,18 @@ func (p *TRuntimeProfileNode) Field9DeepEqual(src int64) bool { } return true } +func (p *TRuntimeProfileNode) Field10DeepEqual(src *bool) bool { + + if p.IsSink == src { + return true + } else if p.IsSink == nil || src == nil { + return false + } + if *p.IsSink != *src { + return false + } + return true +} type TRuntimeProfileTree struct { Nodes []*TRuntimeProfileNode `thrift:"nodes,1,required" frugal:"1,required,list" json:"nodes"` @@ -1190,7 +1330,6 @@ func NewTRuntimeProfileTree() *TRuntimeProfileTree { } func (p *TRuntimeProfileTree) InitDefault() { - *p = TRuntimeProfileTree{} } func (p *TRuntimeProfileTree) GetNodes() (v []*TRuntimeProfileNode) { @@ -1230,17 +1369,14 @@ func (p *TRuntimeProfileTree) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetNodes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -1276,18 +1412,22 @@ func (p *TRuntimeProfileTree) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Nodes = make([]*TRuntimeProfileNode, 0, size) + _field := make([]*TRuntimeProfileNode, 0, size) + values := make([]TRuntimeProfileNode, size) for i := 0; i < size; i++ { - _elem := NewTRuntimeProfileNode() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Nodes = append(p.Nodes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Nodes = _field return nil } @@ -1301,7 +1441,6 @@ func (p *TRuntimeProfileTree) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -1350,6 +1489,7 @@ func (p *TRuntimeProfileTree) String() string { return "" } return fmt.Sprintf("TRuntimeProfileTree(%+v)", *p) + } func (p *TRuntimeProfileTree) DeepEqual(ano *TRuntimeProfileTree) bool { diff --git a/pkg/rpc/kitex_gen/runtimeprofile/k-RuntimeProfile.go b/pkg/rpc/kitex_gen/runtimeprofile/k-RuntimeProfile.go index 049295d2..5e998173 100644 --- a/pkg/rpc/kitex_gen/runtimeprofile/k-RuntimeProfile.go +++ b/pkg/rpc/kitex_gen/runtimeprofile/k-RuntimeProfile.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package runtimeprofile @@ -11,6 +11,7 @@ import ( "github.com/apache/thrift/lib/go/thrift" "github.com/cloudwego/kitex/pkg/protocol/bthrift" + "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/metrics" ) @@ -95,6 +96,20 @@ func (p *TCounter) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -188,6 +203,19 @@ func (p *TCounter) FastReadField3(buf []byte) (int, error) { return offset, nil } +func (p *TCounter) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Level = &v + + } + return offset, nil +} + // for compatibility func (p *TCounter) FastWrite(buf []byte) int { return 0 @@ -198,6 +226,7 @@ func (p *TCounter) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCounter") if p != nil { offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) } @@ -213,6 +242,7 @@ func (p *TCounter) BLength() int { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -246,6 +276,17 @@ func (p *TCounter) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter return offset } +func (p *TCounter) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetLevel() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "level", thrift.I64, 4) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Level) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TCounter) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) @@ -273,6 +314,17 @@ func (p *TCounter) field3Length() int { return l } +func (p *TCounter) field4Length() int { + l := 0 + if p.IsSetLevel() { + l += bthrift.Binary.FieldBeginLength("level", thrift.I64, 4) + l += bthrift.Binary.I64Length(*p.Level) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TRuntimeProfileNode) FastRead(buf []byte) (int, error) { var err error var offset int @@ -439,6 +491,20 @@ func (p *TRuntimeProfileNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 10: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -742,6 +808,19 @@ func (p *TRuntimeProfileNode) FastReadField9(buf []byte) (int, error) { return offset, nil } +func (p *TRuntimeProfileNode) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsSink = &v + + } + return offset, nil +} + // for compatibility func (p *TRuntimeProfileNode) FastWrite(buf []byte) int { return 0 @@ -755,6 +834,7 @@ func (p *TRuntimeProfileNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.B offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) @@ -779,6 +859,7 @@ func (p *TRuntimeProfileNode) BLength() int { l += p.field7Length() l += p.field8Length() l += p.field9Length() + l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -924,6 +1005,17 @@ func (p *TRuntimeProfileNode) fastWriteField9(buf []byte, binaryWriter bthrift.B return offset } +func (p *TRuntimeProfileNode) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsSink() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_sink", thrift.BOOL, 10) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsSink) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TRuntimeProfileNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 1) @@ -1043,6 +1135,17 @@ func (p *TRuntimeProfileNode) field9Length() int { return l } +func (p *TRuntimeProfileNode) field10Length() int { + l := 0 + if p.IsSetIsSink() { + l += bthrift.Binary.FieldBeginLength("is_sink", thrift.BOOL, 10) + l += bthrift.Binary.BoolLength(*p.IsSink) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TRuntimeProfileTree) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/status/Status.go b/pkg/rpc/kitex_gen/status/Status.go index 67128a27..0477d1e6 100644 --- a/pkg/rpc/kitex_gen/status/Status.go +++ b/pkg/rpc/kitex_gen/status/Status.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package status @@ -22,20 +22,13 @@ const ( TStatusCode_INTERNAL_ERROR TStatusCode = 6 TStatusCode_THRIFT_RPC_ERROR TStatusCode = 7 TStatusCode_TIMEOUT TStatusCode = 8 - TStatusCode_KUDU_NOT_ENABLED TStatusCode = 9 - TStatusCode_KUDU_NOT_SUPPORTED_ON_OS TStatusCode = 10 + TStatusCode_LIMIT_REACH TStatusCode = 9 TStatusCode_MEM_ALLOC_FAILED TStatusCode = 11 TStatusCode_BUFFER_ALLOCATION_FAILED TStatusCode = 12 TStatusCode_MINIMUM_RESERVATION_UNAVAILABLE TStatusCode = 13 TStatusCode_PUBLISH_TIMEOUT TStatusCode = 14 TStatusCode_LABEL_ALREADY_EXISTS TStatusCode = 15 TStatusCode_TOO_MANY_TASKS TStatusCode = 16 - TStatusCode_ES_INTERNAL_ERROR TStatusCode = 17 - TStatusCode_ES_INDEX_NOT_FOUND TStatusCode = 18 - TStatusCode_ES_SHARD_NOT_FOUND TStatusCode = 19 - TStatusCode_ES_INVALID_CONTEXTID TStatusCode = 20 - TStatusCode_ES_INVALID_OFFSET TStatusCode = 21 - TStatusCode_ES_REQUEST_ERROR TStatusCode = 22 TStatusCode_END_OF_FILE TStatusCode = 30 TStatusCode_NOT_FOUND TStatusCode = 31 TStatusCode_CORRUPTION TStatusCode = 32 @@ -46,20 +39,11 @@ const ( TStatusCode_ILLEGAL_STATE TStatusCode = 37 TStatusCode_NOT_AUTHORIZED TStatusCode = 38 TStatusCode_ABORTED TStatusCode = 39 - TStatusCode_REMOTE_ERROR TStatusCode = 40 TStatusCode_UNINITIALIZED TStatusCode = 42 - TStatusCode_CONFIGURATION_ERROR TStatusCode = 43 TStatusCode_INCOMPLETE TStatusCode = 44 TStatusCode_OLAP_ERR_VERSION_ALREADY_MERGED TStatusCode = 45 TStatusCode_DATA_QUALITY_ERROR TStatusCode = 46 - TStatusCode_VEC_EXCEPTION TStatusCode = 50 - TStatusCode_VEC_LOGIC_ERROR TStatusCode = 51 - TStatusCode_VEC_ILLEGAL_DIVISION TStatusCode = 52 - TStatusCode_VEC_BAD_CAST TStatusCode = 53 - TStatusCode_VEC_CANNOT_ALLOCATE_MEMORY TStatusCode = 54 - TStatusCode_VEC_CANNOT_MUNMAP TStatusCode = 55 - TStatusCode_VEC_CANNOT_MREMAP TStatusCode = 56 - TStatusCode_VEC_BAD_ARGUMENTS TStatusCode = 57 + TStatusCode_INVALID_JSON_PATH TStatusCode = 47 TStatusCode_BINLOG_DISABLE TStatusCode = 60 TStatusCode_BINLOG_TOO_OLD_COMMIT_SEQ TStatusCode = 61 TStatusCode_BINLOG_TOO_NEW_COMMIT_SEQ TStatusCode = 62 @@ -69,6 +53,9 @@ const ( TStatusCode_HTTP_ERROR TStatusCode = 71 TStatusCode_TABLET_MISSING TStatusCode = 72 TStatusCode_NOT_MASTER TStatusCode = 73 + TStatusCode_OBTAIN_LOCK_FAILED TStatusCode = 74 + TStatusCode_SNAPSHOT_EXPIRED TStatusCode = 75 + TStatusCode_DELETE_BITMAP_LOCK_ERROR TStatusCode = 100 ) func (p TStatusCode) String() string { @@ -91,10 +78,8 @@ func (p TStatusCode) String() string { return "THRIFT_RPC_ERROR" case TStatusCode_TIMEOUT: return "TIMEOUT" - case TStatusCode_KUDU_NOT_ENABLED: - return "KUDU_NOT_ENABLED" - case TStatusCode_KUDU_NOT_SUPPORTED_ON_OS: - return "KUDU_NOT_SUPPORTED_ON_OS" + case TStatusCode_LIMIT_REACH: + return "LIMIT_REACH" case TStatusCode_MEM_ALLOC_FAILED: return "MEM_ALLOC_FAILED" case TStatusCode_BUFFER_ALLOCATION_FAILED: @@ -107,18 +92,6 @@ func (p TStatusCode) String() string { return "LABEL_ALREADY_EXISTS" case TStatusCode_TOO_MANY_TASKS: return "TOO_MANY_TASKS" - case TStatusCode_ES_INTERNAL_ERROR: - return "ES_INTERNAL_ERROR" - case TStatusCode_ES_INDEX_NOT_FOUND: - return "ES_INDEX_NOT_FOUND" - case TStatusCode_ES_SHARD_NOT_FOUND: - return "ES_SHARD_NOT_FOUND" - case TStatusCode_ES_INVALID_CONTEXTID: - return "ES_INVALID_CONTEXTID" - case TStatusCode_ES_INVALID_OFFSET: - return "ES_INVALID_OFFSET" - case TStatusCode_ES_REQUEST_ERROR: - return "ES_REQUEST_ERROR" case TStatusCode_END_OF_FILE: return "END_OF_FILE" case TStatusCode_NOT_FOUND: @@ -139,34 +112,16 @@ func (p TStatusCode) String() string { return "NOT_AUTHORIZED" case TStatusCode_ABORTED: return "ABORTED" - case TStatusCode_REMOTE_ERROR: - return "REMOTE_ERROR" case TStatusCode_UNINITIALIZED: return "UNINITIALIZED" - case TStatusCode_CONFIGURATION_ERROR: - return "CONFIGURATION_ERROR" case TStatusCode_INCOMPLETE: return "INCOMPLETE" case TStatusCode_OLAP_ERR_VERSION_ALREADY_MERGED: return "OLAP_ERR_VERSION_ALREADY_MERGED" case TStatusCode_DATA_QUALITY_ERROR: return "DATA_QUALITY_ERROR" - case TStatusCode_VEC_EXCEPTION: - return "VEC_EXCEPTION" - case TStatusCode_VEC_LOGIC_ERROR: - return "VEC_LOGIC_ERROR" - case TStatusCode_VEC_ILLEGAL_DIVISION: - return "VEC_ILLEGAL_DIVISION" - case TStatusCode_VEC_BAD_CAST: - return "VEC_BAD_CAST" - case TStatusCode_VEC_CANNOT_ALLOCATE_MEMORY: - return "VEC_CANNOT_ALLOCATE_MEMORY" - case TStatusCode_VEC_CANNOT_MUNMAP: - return "VEC_CANNOT_MUNMAP" - case TStatusCode_VEC_CANNOT_MREMAP: - return "VEC_CANNOT_MREMAP" - case TStatusCode_VEC_BAD_ARGUMENTS: - return "VEC_BAD_ARGUMENTS" + case TStatusCode_INVALID_JSON_PATH: + return "INVALID_JSON_PATH" case TStatusCode_BINLOG_DISABLE: return "BINLOG_DISABLE" case TStatusCode_BINLOG_TOO_OLD_COMMIT_SEQ: @@ -185,6 +140,12 @@ func (p TStatusCode) String() string { return "TABLET_MISSING" case TStatusCode_NOT_MASTER: return "NOT_MASTER" + case TStatusCode_OBTAIN_LOCK_FAILED: + return "OBTAIN_LOCK_FAILED" + case TStatusCode_SNAPSHOT_EXPIRED: + return "SNAPSHOT_EXPIRED" + case TStatusCode_DELETE_BITMAP_LOCK_ERROR: + return "DELETE_BITMAP_LOCK_ERROR" } return "" } @@ -209,10 +170,8 @@ func TStatusCodeFromString(s string) (TStatusCode, error) { return TStatusCode_THRIFT_RPC_ERROR, nil case "TIMEOUT": return TStatusCode_TIMEOUT, nil - case "KUDU_NOT_ENABLED": - return TStatusCode_KUDU_NOT_ENABLED, nil - case "KUDU_NOT_SUPPORTED_ON_OS": - return TStatusCode_KUDU_NOT_SUPPORTED_ON_OS, nil + case "LIMIT_REACH": + return TStatusCode_LIMIT_REACH, nil case "MEM_ALLOC_FAILED": return TStatusCode_MEM_ALLOC_FAILED, nil case "BUFFER_ALLOCATION_FAILED": @@ -225,18 +184,6 @@ func TStatusCodeFromString(s string) (TStatusCode, error) { return TStatusCode_LABEL_ALREADY_EXISTS, nil case "TOO_MANY_TASKS": return TStatusCode_TOO_MANY_TASKS, nil - case "ES_INTERNAL_ERROR": - return TStatusCode_ES_INTERNAL_ERROR, nil - case "ES_INDEX_NOT_FOUND": - return TStatusCode_ES_INDEX_NOT_FOUND, nil - case "ES_SHARD_NOT_FOUND": - return TStatusCode_ES_SHARD_NOT_FOUND, nil - case "ES_INVALID_CONTEXTID": - return TStatusCode_ES_INVALID_CONTEXTID, nil - case "ES_INVALID_OFFSET": - return TStatusCode_ES_INVALID_OFFSET, nil - case "ES_REQUEST_ERROR": - return TStatusCode_ES_REQUEST_ERROR, nil case "END_OF_FILE": return TStatusCode_END_OF_FILE, nil case "NOT_FOUND": @@ -257,34 +204,16 @@ func TStatusCodeFromString(s string) (TStatusCode, error) { return TStatusCode_NOT_AUTHORIZED, nil case "ABORTED": return TStatusCode_ABORTED, nil - case "REMOTE_ERROR": - return TStatusCode_REMOTE_ERROR, nil case "UNINITIALIZED": return TStatusCode_UNINITIALIZED, nil - case "CONFIGURATION_ERROR": - return TStatusCode_CONFIGURATION_ERROR, nil case "INCOMPLETE": return TStatusCode_INCOMPLETE, nil case "OLAP_ERR_VERSION_ALREADY_MERGED": return TStatusCode_OLAP_ERR_VERSION_ALREADY_MERGED, nil case "DATA_QUALITY_ERROR": return TStatusCode_DATA_QUALITY_ERROR, nil - case "VEC_EXCEPTION": - return TStatusCode_VEC_EXCEPTION, nil - case "VEC_LOGIC_ERROR": - return TStatusCode_VEC_LOGIC_ERROR, nil - case "VEC_ILLEGAL_DIVISION": - return TStatusCode_VEC_ILLEGAL_DIVISION, nil - case "VEC_BAD_CAST": - return TStatusCode_VEC_BAD_CAST, nil - case "VEC_CANNOT_ALLOCATE_MEMORY": - return TStatusCode_VEC_CANNOT_ALLOCATE_MEMORY, nil - case "VEC_CANNOT_MUNMAP": - return TStatusCode_VEC_CANNOT_MUNMAP, nil - case "VEC_CANNOT_MREMAP": - return TStatusCode_VEC_CANNOT_MREMAP, nil - case "VEC_BAD_ARGUMENTS": - return TStatusCode_VEC_BAD_ARGUMENTS, nil + case "INVALID_JSON_PATH": + return TStatusCode_INVALID_JSON_PATH, nil case "BINLOG_DISABLE": return TStatusCode_BINLOG_DISABLE, nil case "BINLOG_TOO_OLD_COMMIT_SEQ": @@ -303,6 +232,12 @@ func TStatusCodeFromString(s string) (TStatusCode, error) { return TStatusCode_TABLET_MISSING, nil case "NOT_MASTER": return TStatusCode_NOT_MASTER, nil + case "OBTAIN_LOCK_FAILED": + return TStatusCode_OBTAIN_LOCK_FAILED, nil + case "SNAPSHOT_EXPIRED": + return TStatusCode_SNAPSHOT_EXPIRED, nil + case "DELETE_BITMAP_LOCK_ERROR": + return TStatusCode_DELETE_BITMAP_LOCK_ERROR, nil } return TStatusCode(0), fmt.Errorf("not a valid TStatusCode string") } @@ -332,7 +267,6 @@ func NewTStatus() *TStatus { } func (p *TStatus) InitDefault() { - *p = TStatus{} } func (p *TStatus) GetStatusCode() (v TStatusCode) { @@ -389,27 +323,22 @@ func (p *TStatus) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetStatusCode = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.LIST { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -441,21 +370,24 @@ RequiredFieldNotSetError: } func (p *TStatus) ReadField1(iprot thrift.TProtocol) error { + + var _field TStatusCode if v, err := iprot.ReadI32(); err != nil { return err } else { - p.StatusCode = TStatusCode(v) + _field = TStatusCode(v) } + p.StatusCode = _field return nil } - func (p *TStatus) ReadField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ErrorMsgs = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -463,11 +395,12 @@ func (p *TStatus) ReadField2(iprot thrift.TProtocol) error { _elem = v } - p.ErrorMsgs = append(p.ErrorMsgs, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ErrorMsgs = _field return nil } @@ -485,7 +418,6 @@ func (p *TStatus) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -553,6 +485,7 @@ func (p *TStatus) String() string { return "" } return fmt.Sprintf("TStatus(%+v)", *p) + } func (p *TStatus) DeepEqual(ano *TStatus) bool { diff --git a/pkg/rpc/kitex_gen/status/k-Status.go b/pkg/rpc/kitex_gen/status/k-Status.go index ab2ff06b..6cb20ea8 100644 --- a/pkg/rpc/kitex_gen/status/k-Status.go +++ b/pkg/rpc/kitex_gen/status/k-Status.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package status diff --git a/pkg/rpc/kitex_gen/types/Types.go b/pkg/rpc/kitex_gen/types/Types.go index d2e4fade..3f1baf47 100644 --- a/pkg/rpc/kitex_gen/types/Types.go +++ b/pkg/rpc/kitex_gen/types/Types.go @@ -1,4 +1,4 @@ -// Code generated by thriftgo (0.2.7). DO NOT EDIT. +// Code generated by thriftgo (0.3.13). DO NOT EDIT. package types @@ -194,6 +194,9 @@ const ( TPrimitiveType_VARIANT TPrimitiveType = 34 TPrimitiveType_LAMBDA_FUNCTION TPrimitiveType = 35 TPrimitiveType_AGG_STATE TPrimitiveType = 36 + TPrimitiveType_DECIMAL256 TPrimitiveType = 37 + TPrimitiveType_IPV4 TPrimitiveType = 38 + TPrimitiveType_IPV6 TPrimitiveType = 39 ) func (p TPrimitiveType) String() string { @@ -272,6 +275,12 @@ func (p TPrimitiveType) String() string { return "LAMBDA_FUNCTION" case TPrimitiveType_AGG_STATE: return "AGG_STATE" + case TPrimitiveType_DECIMAL256: + return "DECIMAL256" + case TPrimitiveType_IPV4: + return "IPV4" + case TPrimitiveType_IPV6: + return "IPV6" } return "" } @@ -352,6 +361,12 @@ func TPrimitiveTypeFromString(s string) (TPrimitiveType, error) { return TPrimitiveType_LAMBDA_FUNCTION, nil case "AGG_STATE": return TPrimitiveType_AGG_STATE, nil + case "DECIMAL256": + return TPrimitiveType_DECIMAL256, nil + case "IPV4": + return TPrimitiveType_IPV4, nil + case "IPV6": + return TPrimitiveType_IPV6, nil } return TPrimitiveType(0), fmt.Errorf("not a valid TPrimitiveType string") } @@ -437,6 +452,7 @@ const ( TStorageBackendType_JFS TStorageBackendType = 3 TStorageBackendType_LOCAL TStorageBackendType = 4 TStorageBackendType_OFS TStorageBackendType = 5 + TStorageBackendType_AZURE TStorageBackendType = 6 ) func (p TStorageBackendType) String() string { @@ -453,6 +469,8 @@ func (p TStorageBackendType) String() string { return "LOCAL" case TStorageBackendType_OFS: return "OFS" + case TStorageBackendType_AZURE: + return "AZURE" } return "" } @@ -471,6 +489,8 @@ func TStorageBackendTypeFromString(s string) (TStorageBackendType, error) { return TStorageBackendType_LOCAL, nil case "OFS": return TStorageBackendType_OFS, nil + case "AZURE": + return TStorageBackendType_AZURE, nil } return TStorageBackendType(0), fmt.Errorf("not a valid TStorageBackendType string") } @@ -490,6 +510,55 @@ func (p *TStorageBackendType) Value() (driver.Value, error) { return int64(*p), nil } +type TInvertedIndexFileStorageFormat int64 + +const ( + TInvertedIndexFileStorageFormat_DEFAULT TInvertedIndexFileStorageFormat = 0 + TInvertedIndexFileStorageFormat_V1 TInvertedIndexFileStorageFormat = 1 + TInvertedIndexFileStorageFormat_V2 TInvertedIndexFileStorageFormat = 2 +) + +func (p TInvertedIndexFileStorageFormat) String() string { + switch p { + case TInvertedIndexFileStorageFormat_DEFAULT: + return "DEFAULT" + case TInvertedIndexFileStorageFormat_V1: + return "V1" + case TInvertedIndexFileStorageFormat_V2: + return "V2" + } + return "" +} + +func TInvertedIndexFileStorageFormatFromString(s string) (TInvertedIndexFileStorageFormat, error) { + switch s { + case "DEFAULT": + return TInvertedIndexFileStorageFormat_DEFAULT, nil + case "V1": + return TInvertedIndexFileStorageFormat_V1, nil + case "V2": + return TInvertedIndexFileStorageFormat_V2, nil + } + return TInvertedIndexFileStorageFormat(0), fmt.Errorf("not a valid TInvertedIndexFileStorageFormat string") +} + +func TInvertedIndexFileStorageFormatPtr(v TInvertedIndexFileStorageFormat) *TInvertedIndexFileStorageFormat { + return &v +} +func (p *TInvertedIndexFileStorageFormat) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TInvertedIndexFileStorageFormat(result.Int64) + return +} + +func (p *TInvertedIndexFileStorageFormat) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TAggregationType int64 const ( @@ -654,6 +723,10 @@ const ( TTaskType_PUSH_STORAGE_POLICY TTaskType = 29 TTaskType_ALTER_INVERTED_INDEX TTaskType = 30 TTaskType_GC_BINLOG TTaskType = 31 + TTaskType_CLEAN_TRASH TTaskType = 32 + TTaskType_UPDATE_VISIBLE_VERSION TTaskType = 33 + TTaskType_CLEAN_UDF_CACHE TTaskType = 34 + TTaskType_CALCULATE_DELETE_BITMAP TTaskType = 1000 ) func (p TTaskType) String() string { @@ -722,6 +795,14 @@ func (p TTaskType) String() string { return "ALTER_INVERTED_INDEX" case TTaskType_GC_BINLOG: return "GC_BINLOG" + case TTaskType_CLEAN_TRASH: + return "CLEAN_TRASH" + case TTaskType_UPDATE_VISIBLE_VERSION: + return "UPDATE_VISIBLE_VERSION" + case TTaskType_CLEAN_UDF_CACHE: + return "CLEAN_UDF_CACHE" + case TTaskType_CALCULATE_DELETE_BITMAP: + return "CALCULATE_DELETE_BITMAP" } return "" } @@ -792,6 +873,14 @@ func TTaskTypeFromString(s string) (TTaskType, error) { return TTaskType_ALTER_INVERTED_INDEX, nil case "GC_BINLOG": return TTaskType_GC_BINLOG, nil + case "CLEAN_TRASH": + return TTaskType_CLEAN_TRASH, nil + case "UPDATE_VISIBLE_VERSION": + return TTaskType_UPDATE_VISIBLE_VERSION, nil + case "CLEAN_UDF_CACHE": + return TTaskType_CLEAN_UDF_CACHE, nil + case "CALCULATE_DELETE_BITMAP": + return TTaskType_CALCULATE_DELETE_BITMAP, nil } return TTaskType(0), fmt.Errorf("not a valid TTaskType string") } @@ -1191,6 +1280,8 @@ const ( TOdbcTableType_OCEANBASE TOdbcTableType = 10 TOdbcTableType_OCEANBASE_ORACLE TOdbcTableType = 11 TOdbcTableType_NEBULA TOdbcTableType = 12 + TOdbcTableType_DB2 TOdbcTableType = 13 + TOdbcTableType_GBASE TOdbcTableType = 14 ) func (p TOdbcTableType) String() string { @@ -1221,6 +1312,10 @@ func (p TOdbcTableType) String() string { return "OCEANBASE_ORACLE" case TOdbcTableType_NEBULA: return "NEBULA" + case TOdbcTableType_DB2: + return "DB2" + case TOdbcTableType_GBASE: + return "GBASE" } return "" } @@ -1253,6 +1348,10 @@ func TOdbcTableTypeFromString(s string) (TOdbcTableType, error) { return TOdbcTableType_OCEANBASE_ORACLE, nil case "NEBULA": return TOdbcTableType_NEBULA, nil + case "DB2": + return TOdbcTableType_DB2, nil + case "GBASE": + return TOdbcTableType_GBASE, nil } return TOdbcTableType(0), fmt.Errorf("not a valid TOdbcTableType string") } @@ -1384,19 +1483,21 @@ func (p *TEtlState) Value() (driver.Value, error) { type TTableType int64 const ( - TTableType_MYSQL_TABLE TTableType = 0 - TTableType_OLAP_TABLE TTableType = 1 - TTableType_SCHEMA_TABLE TTableType = 2 - TTableType_KUDU_TABLE TTableType = 3 - TTableType_BROKER_TABLE TTableType = 4 - TTableType_ES_TABLE TTableType = 5 - TTableType_ODBC_TABLE TTableType = 6 - TTableType_HIVE_TABLE TTableType = 7 - TTableType_ICEBERG_TABLE TTableType = 8 - TTableType_HUDI_TABLE TTableType = 9 - TTableType_JDBC_TABLE TTableType = 10 - TTableType_TEST_EXTERNAL_TABLE TTableType = 11 - TTableType_MAX_COMPUTE_TABLE TTableType = 12 + TTableType_MYSQL_TABLE TTableType = 0 + TTableType_OLAP_TABLE TTableType = 1 + TTableType_SCHEMA_TABLE TTableType = 2 + TTableType_KUDU_TABLE TTableType = 3 + TTableType_BROKER_TABLE TTableType = 4 + TTableType_ES_TABLE TTableType = 5 + TTableType_ODBC_TABLE TTableType = 6 + TTableType_HIVE_TABLE TTableType = 7 + TTableType_ICEBERG_TABLE TTableType = 8 + TTableType_HUDI_TABLE TTableType = 9 + TTableType_JDBC_TABLE TTableType = 10 + TTableType_TEST_EXTERNAL_TABLE TTableType = 11 + TTableType_MAX_COMPUTE_TABLE TTableType = 12 + TTableType_LAKESOUL_TABLE TTableType = 13 + TTableType_TRINO_CONNECTOR_TABLE TTableType = 14 ) func (p TTableType) String() string { @@ -1427,6 +1528,10 @@ func (p TTableType) String() string { return "TEST_EXTERNAL_TABLE" case TTableType_MAX_COMPUTE_TABLE: return "MAX_COMPUTE_TABLE" + case TTableType_LAKESOUL_TABLE: + return "LAKESOUL_TABLE" + case TTableType_TRINO_CONNECTOR_TABLE: + return "TRINO_CONNECTOR_TABLE" } return "" } @@ -1459,6 +1564,10 @@ func TTableTypeFromString(s string) (TTableType, error) { return TTableType_TEST_EXTERNAL_TABLE, nil case "MAX_COMPUTE_TABLE": return TTableType_MAX_COMPUTE_TABLE, nil + case "LAKESOUL_TABLE": + return TTableType_LAKESOUL_TABLE, nil + case "TRINO_CONNECTOR_TABLE": + return TTableType_TRINO_CONNECTOR_TABLE, nil } return TTableType(0), fmt.Errorf("not a valid TTableType string") } @@ -1827,6 +1936,53 @@ func (p *TMergeType) Value() (driver.Value, error) { return int64(*p), nil } +type TUniqueKeyUpdateMode int64 + +const ( + TUniqueKeyUpdateMode_UPSERT TUniqueKeyUpdateMode = 0 + TUniqueKeyUpdateMode_UPDATE_FIXED_COLUMNS TUniqueKeyUpdateMode = 1 + TUniqueKeyUpdateMode_UPDATE_FLEXIBLE_COLUMNS TUniqueKeyUpdateMode = 2 +) + +func (p TUniqueKeyUpdateMode) String() string { + switch p { + case TUniqueKeyUpdateMode_UPSERT: + return "UPSERT" + case TUniqueKeyUpdateMode_UPDATE_FIXED_COLUMNS: + return "UPDATE_FIXED_COLUMNS" + case TUniqueKeyUpdateMode_UPDATE_FLEXIBLE_COLUMNS: + return "UPDATE_FLEXIBLE_COLUMNS" + } + return "" +} + +func TUniqueKeyUpdateModeFromString(s string) (TUniqueKeyUpdateMode, error) { + switch s { + case "UPSERT": + return TUniqueKeyUpdateMode_UPSERT, nil + case "UPDATE_FIXED_COLUMNS": + return TUniqueKeyUpdateMode_UPDATE_FIXED_COLUMNS, nil + case "UPDATE_FLEXIBLE_COLUMNS": + return TUniqueKeyUpdateMode_UPDATE_FLEXIBLE_COLUMNS, nil + } + return TUniqueKeyUpdateMode(0), fmt.Errorf("not a valid TUniqueKeyUpdateMode string") +} + +func TUniqueKeyUpdateModePtr(v TUniqueKeyUpdateMode) *TUniqueKeyUpdateMode { return &v } +func (p *TUniqueKeyUpdateMode) Scan(value interface{}) (err error) { + var result sql.NullInt64 + err = result.Scan(value) + *p = TUniqueKeyUpdateMode(result.Int64) + return +} + +func (p *TUniqueKeyUpdateMode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + type TSortType int64 const ( @@ -1872,12 +2028,17 @@ func (p *TSortType) Value() (driver.Value, error) { type TMetadataType int64 const ( - TMetadataType_ICEBERG TMetadataType = 0 - TMetadataType_BACKENDS TMetadataType = 1 - TMetadataType_WORKLOAD_GROUPS TMetadataType = 2 - TMetadataType_FRONTENDS TMetadataType = 3 - TMetadataType_CATALOGS TMetadataType = 4 - TMetadataType_FRONTENDS_DISKS TMetadataType = 5 + TMetadataType_ICEBERG TMetadataType = 0 + TMetadataType_BACKENDS TMetadataType = 1 + TMetadataType_FRONTENDS TMetadataType = 2 + TMetadataType_CATALOGS TMetadataType = 3 + TMetadataType_FRONTENDS_DISKS TMetadataType = 4 + TMetadataType_MATERIALIZED_VIEWS TMetadataType = 5 + TMetadataType_JOBS TMetadataType = 6 + TMetadataType_TASKS TMetadataType = 7 + TMetadataType_WORKLOAD_SCHED_POLICY TMetadataType = 8 + TMetadataType_PARTITIONS TMetadataType = 9 + TMetadataType_PARTITION_VALUES TMetadataType = 10 ) func (p TMetadataType) String() string { @@ -1886,14 +2047,24 @@ func (p TMetadataType) String() string { return "ICEBERG" case TMetadataType_BACKENDS: return "BACKENDS" - case TMetadataType_WORKLOAD_GROUPS: - return "WORKLOAD_GROUPS" case TMetadataType_FRONTENDS: return "FRONTENDS" case TMetadataType_CATALOGS: return "CATALOGS" case TMetadataType_FRONTENDS_DISKS: return "FRONTENDS_DISKS" + case TMetadataType_MATERIALIZED_VIEWS: + return "MATERIALIZED_VIEWS" + case TMetadataType_JOBS: + return "JOBS" + case TMetadataType_TASKS: + return "TASKS" + case TMetadataType_WORKLOAD_SCHED_POLICY: + return "WORKLOAD_SCHED_POLICY" + case TMetadataType_PARTITIONS: + return "PARTITIONS" + case TMetadataType_PARTITION_VALUES: + return "PARTITION_VALUES" } return "" } @@ -1904,14 +2075,24 @@ func TMetadataTypeFromString(s string) (TMetadataType, error) { return TMetadataType_ICEBERG, nil case "BACKENDS": return TMetadataType_BACKENDS, nil - case "WORKLOAD_GROUPS": - return TMetadataType_WORKLOAD_GROUPS, nil case "FRONTENDS": return TMetadataType_FRONTENDS, nil case "CATALOGS": return TMetadataType_CATALOGS, nil case "FRONTENDS_DISKS": return TMetadataType_FRONTENDS_DISKS, nil + case "MATERIALIZED_VIEWS": + return TMetadataType_MATERIALIZED_VIEWS, nil + case "JOBS": + return TMetadataType_JOBS, nil + case "TASKS": + return TMetadataType_TASKS, nil + case "WORKLOAD_SCHED_POLICY": + return TMetadataType_WORKLOAD_SCHED_POLICY, nil + case "PARTITIONS": + return TMetadataType_PARTITIONS, nil + case "PARTITION_VALUES": + return TMetadataType_PARTITION_VALUES, nil } return TMetadataType(0), fmt.Errorf("not a valid TMetadataType string") } @@ -2014,7 +2195,6 @@ func NewTScalarType() *TScalarType { } func (p *TScalarType) InitDefault() { - *p = TScalarType{} } func (p *TScalarType) GetType() (v TPrimitiveType) { @@ -2105,47 +2285,38 @@ func (p *TScalarType) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2177,38 +2348,47 @@ RequiredFieldNotSetError: } func (p *TScalarType) ReadField1(iprot thrift.TProtocol) error { + + var _field TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TPrimitiveType(v) + _field = TPrimitiveType(v) } + p.Type = _field return nil } - func (p *TScalarType) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Len = &v + _field = &v } + p.Len = _field return nil } - func (p *TScalarType) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Precision = &v + _field = &v } + p.Precision = _field return nil } - func (p *TScalarType) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Scale = &v + _field = &v } + p.Scale = _field return nil } @@ -2234,7 +2414,6 @@ func (p *TScalarType) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2332,6 +2511,7 @@ func (p *TScalarType) String() string { return "" } return fmt.Sprintf("TScalarType(%+v)", *p) + } func (p *TScalarType) DeepEqual(ano *TScalarType) bool { @@ -2410,7 +2590,6 @@ func NewTStructField() *TStructField { } func (p *TStructField) InitDefault() { - *p = TStructField{} } func (p *TStructField) GetName() (v string) { @@ -2484,37 +2663,30 @@ func (p *TStructField) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2546,29 +2718,36 @@ RequiredFieldNotSetError: } func (p *TStructField) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } - func (p *TStructField) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Comment = &v + _field = &v } + p.Comment = _field return nil } - func (p *TStructField) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ContainsNull = &v + _field = &v } + p.ContainsNull = _field return nil } @@ -2590,7 +2769,6 @@ func (p *TStructField) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -2669,6 +2847,7 @@ func (p *TStructField) String() string { return "" } return fmt.Sprintf("TStructField(%+v)", *p) + } func (p *TStructField) DeepEqual(ano *TStructField) bool { @@ -2734,7 +2913,6 @@ func NewTTypeNode() *TTypeNode { } func (p *TTypeNode) InitDefault() { - *p = TTypeNode{} } func (p *TTypeNode) GetType() (v TTypeNodeType) { @@ -2842,57 +3020,46 @@ func (p *TTypeNode) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRUCT { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.BOOL { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.LIST { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -2924,58 +3091,66 @@ RequiredFieldNotSetError: } func (p *TTypeNode) ReadField1(iprot thrift.TProtocol) error { + + var _field TTypeNodeType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TTypeNodeType(v) + _field = TTypeNodeType(v) } + p.Type = _field return nil } - func (p *TTypeNode) ReadField2(iprot thrift.TProtocol) error { - p.ScalarType = NewTScalarType() - if err := p.ScalarType.Read(iprot); err != nil { + _field := NewTScalarType() + if err := _field.Read(iprot); err != nil { return err } + p.ScalarType = _field return nil } - func (p *TTypeNode) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.StructFields = make([]*TStructField, 0, size) + _field := make([]*TStructField, 0, size) + values := make([]TStructField, size) for i := 0; i < size; i++ { - _elem := NewTStructField() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.StructFields = append(p.StructFields, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.StructFields = _field return nil } - func (p *TTypeNode) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ContainsNull = &v + _field = &v } + p.ContainsNull = _field return nil } - func (p *TTypeNode) ReadField5(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ContainsNulls = make([]bool, 0, size) + _field := make([]bool, 0, size) for i := 0; i < size; i++ { + var _elem bool if v, err := iprot.ReadBool(); err != nil { return err @@ -2983,11 +3158,12 @@ func (p *TTypeNode) ReadField5(iprot thrift.TProtocol) error { _elem = v } - p.ContainsNulls = append(p.ContainsNulls, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ContainsNulls = _field return nil } @@ -3017,7 +3193,6 @@ func (p *TTypeNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3150,6 +3325,7 @@ func (p *TTypeNode) String() string { return "" } return fmt.Sprintf("TTypeNode(%+v)", *p) + } func (p *TTypeNode) DeepEqual(ano *TTypeNode) bool { @@ -3236,6 +3412,7 @@ type TTypeDesc struct { SubTypes []*TTypeDesc `thrift:"sub_types,4,optional" frugal:"4,optional,list" json:"sub_types,omitempty"` ResultIsNullable *bool `thrift:"result_is_nullable,5,optional" frugal:"5,optional,bool" json:"result_is_nullable,omitempty"` FunctionName *string `thrift:"function_name,6,optional" frugal:"6,optional,string" json:"function_name,omitempty"` + BeExecVersion *int32 `thrift:"be_exec_version,7,optional" frugal:"7,optional,i32" json:"be_exec_version,omitempty"` } func NewTTypeDesc() *TTypeDesc { @@ -3243,7 +3420,6 @@ func NewTTypeDesc() *TTypeDesc { } func (p *TTypeDesc) InitDefault() { - *p = TTypeDesc{} } func (p *TTypeDesc) GetTypes() (v []*TTypeNode) { @@ -3294,6 +3470,15 @@ func (p *TTypeDesc) GetFunctionName() (v string) { } return *p.FunctionName } + +var TTypeDesc_BeExecVersion_DEFAULT int32 + +func (p *TTypeDesc) GetBeExecVersion() (v int32) { + if !p.IsSetBeExecVersion() { + return TTypeDesc_BeExecVersion_DEFAULT + } + return *p.BeExecVersion +} func (p *TTypeDesc) SetTypes(val []*TTypeNode) { p.Types = val } @@ -3312,6 +3497,9 @@ func (p *TTypeDesc) SetResultIsNullable(val *bool) { func (p *TTypeDesc) SetFunctionName(val *string) { p.FunctionName = val } +func (p *TTypeDesc) SetBeExecVersion(val *int32) { + p.BeExecVersion = val +} var fieldIDToName_TTypeDesc = map[int16]string{ 1: "types", @@ -3320,6 +3508,7 @@ var fieldIDToName_TTypeDesc = map[int16]string{ 4: "sub_types", 5: "result_is_nullable", 6: "function_name", + 7: "be_exec_version", } func (p *TTypeDesc) IsSetIsNullable() bool { @@ -3342,6 +3531,10 @@ func (p *TTypeDesc) IsSetFunctionName() bool { return p.FunctionName != nil } +func (p *TTypeDesc) IsSetBeExecVersion() bool { + return p.BeExecVersion != nil +} + func (p *TTypeDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -3366,67 +3559,62 @@ func (p *TTypeDesc) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.BOOL { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -3456,74 +3644,100 @@ func (p *TTypeDesc) ReadField1(iprot thrift.TProtocol) error { if err != nil { return err } - p.Types = make([]*TTypeNode, 0, size) + _field := make([]*TTypeNode, 0, size) + values := make([]TTypeNode, size) for i := 0; i < size; i++ { - _elem := NewTTypeNode() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Types = append(p.Types, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Types = _field return nil } - func (p *TTypeDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsNullable = &v + _field = &v } + p.IsNullable = _field return nil } - func (p *TTypeDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ByteSize = &v + _field = &v } + p.ByteSize = _field return nil } - func (p *TTypeDesc) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.SubTypes = make([]*TTypeDesc, 0, size) + _field := make([]*TTypeDesc, 0, size) + values := make([]TTypeDesc, size) for i := 0; i < size; i++ { - _elem := NewTTypeDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.SubTypes = append(p.SubTypes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.SubTypes = _field return nil } - func (p *TTypeDesc) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.ResultIsNullable = &v + _field = &v } + p.ResultIsNullable = _field return nil } - func (p *TTypeDesc) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FunctionName = &v + _field = &v + } + p.FunctionName = _field + return nil +} +func (p *TTypeDesc) ReadField7(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } + p.BeExecVersion = _field return nil } @@ -3557,7 +3771,10 @@ func (p *TTypeDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -3704,11 +3921,31 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) } +func (p *TTypeDesc) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetBeExecVersion() { + if err = oprot.WriteFieldBegin("be_exec_version", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BeExecVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + func (p *TTypeDesc) String() string { if p == nil { return "" } return fmt.Sprintf("TTypeDesc(%+v)", *p) + } func (p *TTypeDesc) DeepEqual(ano *TTypeDesc) bool { @@ -3735,6 +3972,9 @@ func (p *TTypeDesc) DeepEqual(ano *TTypeDesc) bool { if !p.Field6DeepEqual(ano.FunctionName) { return false } + if !p.Field7DeepEqual(ano.BeExecVersion) { + return false + } return true } @@ -3812,6 +4052,18 @@ func (p *TTypeDesc) Field6DeepEqual(src *string) bool { } return true } +func (p *TTypeDesc) Field7DeepEqual(src *int32) bool { + + if p.BeExecVersion == src { + return true + } else if p.BeExecVersion == nil || src == nil { + return false + } + if *p.BeExecVersion != *src { + return false + } + return true +} type TColumnType struct { Type TPrimitiveType `thrift:"type,1,required" frugal:"1,required,TPrimitiveType" json:"type"` @@ -3826,7 +4078,6 @@ func NewTColumnType() *TColumnType { } func (p *TColumnType) InitDefault() { - *p = TColumnType{} } func (p *TColumnType) GetType() (v TPrimitiveType) { @@ -3934,57 +4185,46 @@ func (p *TColumnType) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I32 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4016,47 +4256,58 @@ RequiredFieldNotSetError: } func (p *TColumnType) ReadField1(iprot thrift.TProtocol) error { + + var _field TPrimitiveType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Type = TPrimitiveType(v) + _field = TPrimitiveType(v) } + p.Type = _field return nil } - func (p *TColumnType) ReadField2(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Len = &v + _field = &v } + p.Len = _field return nil } - func (p *TColumnType) ReadField3(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.IndexLen = &v + _field = &v } + p.IndexLen = _field return nil } - func (p *TColumnType) ReadField4(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Precision = &v + _field = &v } + p.Precision = _field return nil } - func (p *TColumnType) ReadField5(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Scale = &v + _field = &v } + p.Scale = _field return nil } @@ -4086,7 +4337,6 @@ func (p *TColumnType) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4203,6 +4453,7 @@ func (p *TColumnType) String() string { return "" } return fmt.Sprintf("TColumnType(%+v)", *p) + } func (p *TColumnType) DeepEqual(ano *TColumnType) bool { @@ -4295,7 +4546,6 @@ func NewTNetworkAddress() *TNetworkAddress { } func (p *TNetworkAddress) InitDefault() { - *p = TNetworkAddress{} } func (p *TNetworkAddress) GetHostname() (v string) { @@ -4344,10 +4594,8 @@ func (p *TNetworkAddress) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHostname = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -4355,17 +4603,14 @@ func (p *TNetworkAddress) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4402,20 +4647,25 @@ RequiredFieldNotSetError: } func (p *TNetworkAddress) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Hostname = v + _field = v } + p.Hostname = _field return nil } - func (p *TNetworkAddress) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.Port = v + _field = v } + p.Port = _field return nil } @@ -4433,7 +4683,6 @@ func (p *TNetworkAddress) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4491,6 +4740,7 @@ func (p *TNetworkAddress) String() string { return "" } return fmt.Sprintf("TNetworkAddress(%+v)", *p) + } func (p *TNetworkAddress) DeepEqual(ano *TNetworkAddress) bool { @@ -4533,7 +4783,6 @@ func NewTUniqueId() *TUniqueId { } func (p *TUniqueId) InitDefault() { - *p = TUniqueId{} } func (p *TUniqueId) GetHi() (v int64) { @@ -4582,10 +4831,8 @@ func (p *TUniqueId) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHi = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -4593,17 +4840,14 @@ func (p *TUniqueId) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetLo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4640,20 +4884,25 @@ RequiredFieldNotSetError: } func (p *TUniqueId) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Hi = v + _field = v } + p.Hi = _field return nil } - func (p *TUniqueId) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Lo = v + _field = v } + p.Lo = _field return nil } @@ -4671,7 +4920,6 @@ func (p *TUniqueId) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4729,6 +4977,7 @@ func (p *TUniqueId) String() string { return "" } return fmt.Sprintf("TUniqueId(%+v)", *p) + } func (p *TUniqueId) DeepEqual(ano *TUniqueId) bool { @@ -4771,7 +5020,6 @@ func NewTFunctionName() *TFunctionName { } func (p *TFunctionName) InitDefault() { - *p = TFunctionName{} } var TFunctionName_DbName_DEFAULT string @@ -4827,10 +5075,8 @@ func (p *TFunctionName) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -4838,17 +5084,14 @@ func (p *TFunctionName) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetFunctionName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -4880,20 +5123,25 @@ RequiredFieldNotSetError: } func (p *TFunctionName) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DbName = &v + _field = &v } + p.DbName = _field return nil } - func (p *TFunctionName) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FunctionName = v + _field = v } + p.FunctionName = _field return nil } @@ -4911,7 +5159,6 @@ func (p *TFunctionName) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4971,6 +5218,7 @@ func (p *TFunctionName) String() string { return "" } return fmt.Sprintf("TFunctionName(%+v)", *p) + } func (p *TFunctionName) DeepEqual(ano *TFunctionName) bool { @@ -5019,7 +5267,6 @@ func NewTScalarFunction() *TScalarFunction { } func (p *TScalarFunction) InitDefault() { - *p = TScalarFunction{} } func (p *TScalarFunction) GetSymbol() (v string) { @@ -5093,37 +5340,30 @@ func (p *TScalarFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSymbol = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5155,29 +5395,36 @@ RequiredFieldNotSetError: } func (p *TScalarFunction) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Symbol = v + _field = v } + p.Symbol = _field return nil } - func (p *TScalarFunction) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.PrepareFnSymbol = &v + _field = &v } + p.PrepareFnSymbol = _field return nil } - func (p *TScalarFunction) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.CloseFnSymbol = &v + _field = &v } + p.CloseFnSymbol = _field return nil } @@ -5199,7 +5446,6 @@ func (p *TScalarFunction) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -5278,6 +5524,7 @@ func (p *TScalarFunction) String() string { return "" } return fmt.Sprintf("TScalarFunction(%+v)", *p) + } func (p *TScalarFunction) DeepEqual(ano *TScalarFunction) bool { @@ -5351,10 +5598,7 @@ func NewTAggregateFunction() *TAggregateFunction { } func (p *TAggregateFunction) InitDefault() { - *p = TAggregateFunction{ - - IsAnalyticOnlyFn: false, - } + p.IsAnalyticOnlyFn = false } var TAggregateFunction_IntermediateType_DEFAULT *TTypeDesc @@ -5556,107 +5800,86 @@ func (p *TAggregateFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIntermediateType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.BOOL { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.STRING { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -5688,91 +5911,110 @@ RequiredFieldNotSetError: } func (p *TAggregateFunction) ReadField1(iprot thrift.TProtocol) error { - p.IntermediateType = NewTTypeDesc() - if err := p.IntermediateType.Read(iprot); err != nil { + _field := NewTTypeDesc() + if err := _field.Read(iprot); err != nil { return err } + p.IntermediateType = _field return nil } - func (p *TAggregateFunction) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.UpdateFnSymbol = &v + _field = &v } + p.UpdateFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.InitFnSymbol = &v + _field = &v } + p.InitFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.SerializeFnSymbol = &v + _field = &v } + p.SerializeFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.MergeFnSymbol = &v + _field = &v } + p.MergeFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.FinalizeFnSymbol = &v + _field = &v } + p.FinalizeFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.GetValueFnSymbol = &v + _field = &v } + p.GetValueFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField9(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.RemoveFnSymbol = &v + _field = &v } + p.RemoveFnSymbol = _field return nil } - func (p *TAggregateFunction) ReadField10(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsAnalyticOnlyFn = v + _field = v } + p.IsAnalyticOnlyFn = _field return nil } - func (p *TAggregateFunction) ReadField11(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Symbol = &v + _field = &v } + p.Symbol = _field return nil } @@ -5822,7 +6064,6 @@ func (p *TAggregateFunction) Write(oprot thrift.TProtocol) (err error) { fieldId = 11 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -6034,6 +6275,7 @@ func (p *TAggregateFunction) String() string { return "" } return fmt.Sprintf("TAggregateFunction(%+v)", *p) + } func (p *TAggregateFunction) DeepEqual(ano *TAggregateFunction) bool { @@ -6187,33 +6429,37 @@ func (p *TAggregateFunction) Field11DeepEqual(src *string) bool { } type TFunction struct { - Name *TFunctionName `thrift:"name,1,required" frugal:"1,required,TFunctionName" json:"name"` - BinaryType TFunctionBinaryType `thrift:"binary_type,2,required" frugal:"2,required,TFunctionBinaryType" json:"binary_type"` - ArgTypes []*TTypeDesc `thrift:"arg_types,3,required" frugal:"3,required,list" json:"arg_types"` - RetType *TTypeDesc `thrift:"ret_type,4,required" frugal:"4,required,TTypeDesc" json:"ret_type"` - HasVarArgs_ bool `thrift:"has_var_args,5,required" frugal:"5,required,bool" json:"has_var_args"` - Comment *string `thrift:"comment,6,optional" frugal:"6,optional,string" json:"comment,omitempty"` - Signature *string `thrift:"signature,7,optional" frugal:"7,optional,string" json:"signature,omitempty"` - HdfsLocation *string `thrift:"hdfs_location,8,optional" frugal:"8,optional,string" json:"hdfs_location,omitempty"` - ScalarFn *TScalarFunction `thrift:"scalar_fn,9,optional" frugal:"9,optional,TScalarFunction" json:"scalar_fn,omitempty"` - AggregateFn *TAggregateFunction `thrift:"aggregate_fn,10,optional" frugal:"10,optional,TAggregateFunction" json:"aggregate_fn,omitempty"` - Id *int64 `thrift:"id,11,optional" frugal:"11,optional,i64" json:"id,omitempty"` - Checksum *string `thrift:"checksum,12,optional" frugal:"12,optional,string" json:"checksum,omitempty"` - Vectorized bool `thrift:"vectorized,13,optional" frugal:"13,optional,bool" json:"vectorized,omitempty"` + Name *TFunctionName `thrift:"name,1,required" frugal:"1,required,TFunctionName" json:"name"` + BinaryType TFunctionBinaryType `thrift:"binary_type,2,required" frugal:"2,required,TFunctionBinaryType" json:"binary_type"` + ArgTypes []*TTypeDesc `thrift:"arg_types,3,required" frugal:"3,required,list" json:"arg_types"` + RetType *TTypeDesc `thrift:"ret_type,4,required" frugal:"4,required,TTypeDesc" json:"ret_type"` + HasVarArgs_ bool `thrift:"has_var_args,5,required" frugal:"5,required,bool" json:"has_var_args"` + Comment *string `thrift:"comment,6,optional" frugal:"6,optional,string" json:"comment,omitempty"` + Signature *string `thrift:"signature,7,optional" frugal:"7,optional,string" json:"signature,omitempty"` + HdfsLocation *string `thrift:"hdfs_location,8,optional" frugal:"8,optional,string" json:"hdfs_location,omitempty"` + ScalarFn *TScalarFunction `thrift:"scalar_fn,9,optional" frugal:"9,optional,TScalarFunction" json:"scalar_fn,omitempty"` + AggregateFn *TAggregateFunction `thrift:"aggregate_fn,10,optional" frugal:"10,optional,TAggregateFunction" json:"aggregate_fn,omitempty"` + Id *int64 `thrift:"id,11,optional" frugal:"11,optional,i64" json:"id,omitempty"` + Checksum *string `thrift:"checksum,12,optional" frugal:"12,optional,string" json:"checksum,omitempty"` + Vectorized bool `thrift:"vectorized,13,optional" frugal:"13,optional,bool" json:"vectorized,omitempty"` + IsUdtfFunction bool `thrift:"is_udtf_function,14,optional" frugal:"14,optional,bool" json:"is_udtf_function,omitempty"` + IsStaticLoad bool `thrift:"is_static_load,15,optional" frugal:"15,optional,bool" json:"is_static_load,omitempty"` + ExpirationTime *int64 `thrift:"expiration_time,16,optional" frugal:"16,optional,i64" json:"expiration_time,omitempty"` } func NewTFunction() *TFunction { return &TFunction{ - Vectorized: false, + Vectorized: false, + IsUdtfFunction: false, + IsStaticLoad: false, } } func (p *TFunction) InitDefault() { - *p = TFunction{ - - Vectorized: false, - } + p.Vectorized = false + p.IsUdtfFunction = false + p.IsStaticLoad = false } var TFunction_Name_DEFAULT *TFunctionName @@ -6317,6 +6563,33 @@ func (p *TFunction) GetVectorized() (v bool) { } return p.Vectorized } + +var TFunction_IsUdtfFunction_DEFAULT bool = false + +func (p *TFunction) GetIsUdtfFunction() (v bool) { + if !p.IsSetIsUdtfFunction() { + return TFunction_IsUdtfFunction_DEFAULT + } + return p.IsUdtfFunction +} + +var TFunction_IsStaticLoad_DEFAULT bool = false + +func (p *TFunction) GetIsStaticLoad() (v bool) { + if !p.IsSetIsStaticLoad() { + return TFunction_IsStaticLoad_DEFAULT + } + return p.IsStaticLoad +} + +var TFunction_ExpirationTime_DEFAULT int64 + +func (p *TFunction) GetExpirationTime() (v int64) { + if !p.IsSetExpirationTime() { + return TFunction_ExpirationTime_DEFAULT + } + return *p.ExpirationTime +} func (p *TFunction) SetName(val *TFunctionName) { p.Name = val } @@ -6356,6 +6629,15 @@ func (p *TFunction) SetChecksum(val *string) { func (p *TFunction) SetVectorized(val bool) { p.Vectorized = val } +func (p *TFunction) SetIsUdtfFunction(val bool) { + p.IsUdtfFunction = val +} +func (p *TFunction) SetIsStaticLoad(val bool) { + p.IsStaticLoad = val +} +func (p *TFunction) SetExpirationTime(val *int64) { + p.ExpirationTime = val +} var fieldIDToName_TFunction = map[int16]string{ 1: "name", @@ -6371,6 +6653,9 @@ var fieldIDToName_TFunction = map[int16]string{ 11: "id", 12: "checksum", 13: "vectorized", + 14: "is_udtf_function", + 15: "is_static_load", + 16: "expiration_time", } func (p *TFunction) IsSetName() bool { @@ -6413,6 +6698,18 @@ func (p *TFunction) IsSetVectorized() bool { return p.Vectorized != TFunction_Vectorized_DEFAULT } +func (p *TFunction) IsSetIsUdtfFunction() bool { + return p.IsUdtfFunction != TFunction_IsUdtfFunction_DEFAULT +} + +func (p *TFunction) IsSetIsStaticLoad() bool { + return p.IsStaticLoad != TFunction_IsStaticLoad_DEFAULT +} + +func (p *TFunction) IsSetExpirationTime() bool { + return p.ExpirationTime != nil +} + func (p *TFunction) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -6443,10 +6740,8 @@ func (p *TFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -6454,10 +6749,8 @@ func (p *TFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBinaryType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { @@ -6465,10 +6758,8 @@ func (p *TFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetArgTypes = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRUCT { @@ -6476,10 +6767,8 @@ func (p *TFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetRetType = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.BOOL { @@ -6487,97 +6776,102 @@ func (p *TFunction) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHasVarArgs_ = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.STRING { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.STRING { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRUCT { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.STRUCT { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.STRING { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.BOOL { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.I64 { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -6629,126 +6923,179 @@ RequiredFieldNotSetError: } func (p *TFunction) ReadField1(iprot thrift.TProtocol) error { - p.Name = NewTFunctionName() - if err := p.Name.Read(iprot); err != nil { + _field := NewTFunctionName() + if err := _field.Read(iprot); err != nil { return err } + p.Name = _field return nil } - func (p *TFunction) ReadField2(iprot thrift.TProtocol) error { + + var _field TFunctionBinaryType if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BinaryType = TFunctionBinaryType(v) + _field = TFunctionBinaryType(v) } + p.BinaryType = _field return nil } - func (p *TFunction) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.ArgTypes = make([]*TTypeDesc, 0, size) + _field := make([]*TTypeDesc, 0, size) + values := make([]TTypeDesc, size) for i := 0; i < size; i++ { - _elem := NewTTypeDesc() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.ArgTypes = append(p.ArgTypes, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.ArgTypes = _field return nil } - func (p *TFunction) ReadField4(iprot thrift.TProtocol) error { - p.RetType = NewTTypeDesc() - if err := p.RetType.Read(iprot); err != nil { + _field := NewTTypeDesc() + if err := _field.Read(iprot); err != nil { return err } + p.RetType = _field return nil } - func (p *TFunction) ReadField5(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.HasVarArgs_ = v + _field = v } + p.HasVarArgs_ = _field return nil } - func (p *TFunction) ReadField6(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Comment = &v + _field = &v } + p.Comment = _field return nil } - func (p *TFunction) ReadField7(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Signature = &v + _field = &v } + p.Signature = _field return nil } - func (p *TFunction) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.HdfsLocation = &v + _field = &v } + p.HdfsLocation = _field return nil } - func (p *TFunction) ReadField9(iprot thrift.TProtocol) error { - p.ScalarFn = NewTScalarFunction() - if err := p.ScalarFn.Read(iprot); err != nil { + _field := NewTScalarFunction() + if err := _field.Read(iprot); err != nil { return err } + p.ScalarFn = _field return nil } - func (p *TFunction) ReadField10(iprot thrift.TProtocol) error { - p.AggregateFn = NewTAggregateFunction() - if err := p.AggregateFn.Read(iprot); err != nil { + _field := NewTAggregateFunction() + if err := _field.Read(iprot); err != nil { return err } + p.AggregateFn = _field return nil } - func (p *TFunction) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Id = &v + _field = &v } + p.Id = _field return nil } - func (p *TFunction) ReadField12(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Checksum = &v + _field = &v } + p.Checksum = _field return nil } - func (p *TFunction) ReadField13(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.Vectorized = _field + return nil +} +func (p *TFunction) ReadField14(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsUdtfFunction = _field + return nil +} +func (p *TFunction) ReadField15(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.Vectorized = v + _field = v + } + p.IsStaticLoad = _field + return nil +} +func (p *TFunction) ReadField16(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.ExpirationTime = _field return nil } @@ -6810,7 +7157,18 @@ func (p *TFunction) Write(oprot thrift.TProtocol) (err error) { fieldId = 13 goto WriteFieldError } - + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7074,11 +7432,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } +func (p *TFunction) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetIsUdtfFunction() { + if err = oprot.WriteFieldBegin("is_udtf_function", thrift.BOOL, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsUdtfFunction); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TFunction) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetIsStaticLoad() { + if err = oprot.WriteFieldBegin("is_static_load", thrift.BOOL, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsStaticLoad); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TFunction) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetExpirationTime() { + if err = oprot.WriteFieldBegin("expiration_time", thrift.I64, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ExpirationTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + func (p *TFunction) String() string { if p == nil { return "" } return fmt.Sprintf("TFunction(%+v)", *p) + } func (p *TFunction) DeepEqual(ano *TFunction) bool { @@ -7126,6 +7542,15 @@ func (p *TFunction) DeepEqual(ano *TFunction) bool { if !p.Field13DeepEqual(ano.Vectorized) { return false } + if !p.Field14DeepEqual(ano.IsUdtfFunction) { + return false + } + if !p.Field15DeepEqual(ano.IsStaticLoad) { + return false + } + if !p.Field16DeepEqual(ano.ExpirationTime) { + return false + } return true } @@ -7251,17 +7676,50 @@ func (p *TFunction) Field13DeepEqual(src bool) bool { } return true } +func (p *TFunction) Field14DeepEqual(src bool) bool { + + if p.IsUdtfFunction != src { + return false + } + return true +} +func (p *TFunction) Field15DeepEqual(src bool) bool { + + if p.IsStaticLoad != src { + return false + } + return true +} +func (p *TFunction) Field16DeepEqual(src *int64) bool { + + if p.ExpirationTime == src { + return true + } else if p.ExpirationTime == nil || src == nil { + return false + } + if *p.ExpirationTime != *src { + return false + } + return true +} type TJdbcExecutorCtorParams struct { - Statement *string `thrift:"statement,1,optional" frugal:"1,optional,string" json:"statement,omitempty"` - JdbcUrl *string `thrift:"jdbc_url,2,optional" frugal:"2,optional,string" json:"jdbc_url,omitempty"` - JdbcUser *string `thrift:"jdbc_user,3,optional" frugal:"3,optional,string" json:"jdbc_user,omitempty"` - JdbcPassword *string `thrift:"jdbc_password,4,optional" frugal:"4,optional,string" json:"jdbc_password,omitempty"` - JdbcDriverClass *string `thrift:"jdbc_driver_class,5,optional" frugal:"5,optional,string" json:"jdbc_driver_class,omitempty"` - BatchSize *int32 `thrift:"batch_size,6,optional" frugal:"6,optional,i32" json:"batch_size,omitempty"` - Op *TJdbcOperation `thrift:"op,7,optional" frugal:"7,optional,TJdbcOperation" json:"op,omitempty"` - DriverPath *string `thrift:"driver_path,8,optional" frugal:"8,optional,string" json:"driver_path,omitempty"` - TableType *TOdbcTableType `thrift:"table_type,9,optional" frugal:"9,optional,TOdbcTableType" json:"table_type,omitempty"` + Statement *string `thrift:"statement,1,optional" frugal:"1,optional,string" json:"statement,omitempty"` + JdbcUrl *string `thrift:"jdbc_url,2,optional" frugal:"2,optional,string" json:"jdbc_url,omitempty"` + JdbcUser *string `thrift:"jdbc_user,3,optional" frugal:"3,optional,string" json:"jdbc_user,omitempty"` + JdbcPassword *string `thrift:"jdbc_password,4,optional" frugal:"4,optional,string" json:"jdbc_password,omitempty"` + JdbcDriverClass *string `thrift:"jdbc_driver_class,5,optional" frugal:"5,optional,string" json:"jdbc_driver_class,omitempty"` + BatchSize *int32 `thrift:"batch_size,6,optional" frugal:"6,optional,i32" json:"batch_size,omitempty"` + Op *TJdbcOperation `thrift:"op,7,optional" frugal:"7,optional,TJdbcOperation" json:"op,omitempty"` + DriverPath *string `thrift:"driver_path,8,optional" frugal:"8,optional,string" json:"driver_path,omitempty"` + TableType *TOdbcTableType `thrift:"table_type,9,optional" frugal:"9,optional,TOdbcTableType" json:"table_type,omitempty"` + ConnectionPoolMinSize *int32 `thrift:"connection_pool_min_size,10,optional" frugal:"10,optional,i32" json:"connection_pool_min_size,omitempty"` + ConnectionPoolMaxSize *int32 `thrift:"connection_pool_max_size,11,optional" frugal:"11,optional,i32" json:"connection_pool_max_size,omitempty"` + ConnectionPoolMaxWaitTime *int32 `thrift:"connection_pool_max_wait_time,12,optional" frugal:"12,optional,i32" json:"connection_pool_max_wait_time,omitempty"` + ConnectionPoolMaxLifeTime *int32 `thrift:"connection_pool_max_life_time,13,optional" frugal:"13,optional,i32" json:"connection_pool_max_life_time,omitempty"` + ConnectionPoolCacheClearTime *int32 `thrift:"connection_pool_cache_clear_time,14,optional" frugal:"14,optional,i32" json:"connection_pool_cache_clear_time,omitempty"` + ConnectionPoolKeepAlive *bool `thrift:"connection_pool_keep_alive,15,optional" frugal:"15,optional,bool" json:"connection_pool_keep_alive,omitempty"` + CatalogId *int64 `thrift:"catalog_id,16,optional" frugal:"16,optional,i64" json:"catalog_id,omitempty"` } func NewTJdbcExecutorCtorParams() *TJdbcExecutorCtorParams { @@ -7269,7 +7727,6 @@ func NewTJdbcExecutorCtorParams() *TJdbcExecutorCtorParams { } func (p *TJdbcExecutorCtorParams) InitDefault() { - *p = TJdbcExecutorCtorParams{} } var TJdbcExecutorCtorParams_Statement_DEFAULT string @@ -7352,6 +7809,69 @@ func (p *TJdbcExecutorCtorParams) GetTableType() (v TOdbcTableType) { } return *p.TableType } + +var TJdbcExecutorCtorParams_ConnectionPoolMinSize_DEFAULT int32 + +func (p *TJdbcExecutorCtorParams) GetConnectionPoolMinSize() (v int32) { + if !p.IsSetConnectionPoolMinSize() { + return TJdbcExecutorCtorParams_ConnectionPoolMinSize_DEFAULT + } + return *p.ConnectionPoolMinSize +} + +var TJdbcExecutorCtorParams_ConnectionPoolMaxSize_DEFAULT int32 + +func (p *TJdbcExecutorCtorParams) GetConnectionPoolMaxSize() (v int32) { + if !p.IsSetConnectionPoolMaxSize() { + return TJdbcExecutorCtorParams_ConnectionPoolMaxSize_DEFAULT + } + return *p.ConnectionPoolMaxSize +} + +var TJdbcExecutorCtorParams_ConnectionPoolMaxWaitTime_DEFAULT int32 + +func (p *TJdbcExecutorCtorParams) GetConnectionPoolMaxWaitTime() (v int32) { + if !p.IsSetConnectionPoolMaxWaitTime() { + return TJdbcExecutorCtorParams_ConnectionPoolMaxWaitTime_DEFAULT + } + return *p.ConnectionPoolMaxWaitTime +} + +var TJdbcExecutorCtorParams_ConnectionPoolMaxLifeTime_DEFAULT int32 + +func (p *TJdbcExecutorCtorParams) GetConnectionPoolMaxLifeTime() (v int32) { + if !p.IsSetConnectionPoolMaxLifeTime() { + return TJdbcExecutorCtorParams_ConnectionPoolMaxLifeTime_DEFAULT + } + return *p.ConnectionPoolMaxLifeTime +} + +var TJdbcExecutorCtorParams_ConnectionPoolCacheClearTime_DEFAULT int32 + +func (p *TJdbcExecutorCtorParams) GetConnectionPoolCacheClearTime() (v int32) { + if !p.IsSetConnectionPoolCacheClearTime() { + return TJdbcExecutorCtorParams_ConnectionPoolCacheClearTime_DEFAULT + } + return *p.ConnectionPoolCacheClearTime +} + +var TJdbcExecutorCtorParams_ConnectionPoolKeepAlive_DEFAULT bool + +func (p *TJdbcExecutorCtorParams) GetConnectionPoolKeepAlive() (v bool) { + if !p.IsSetConnectionPoolKeepAlive() { + return TJdbcExecutorCtorParams_ConnectionPoolKeepAlive_DEFAULT + } + return *p.ConnectionPoolKeepAlive +} + +var TJdbcExecutorCtorParams_CatalogId_DEFAULT int64 + +func (p *TJdbcExecutorCtorParams) GetCatalogId() (v int64) { + if !p.IsSetCatalogId() { + return TJdbcExecutorCtorParams_CatalogId_DEFAULT + } + return *p.CatalogId +} func (p *TJdbcExecutorCtorParams) SetStatement(val *string) { p.Statement = val } @@ -7379,17 +7899,45 @@ func (p *TJdbcExecutorCtorParams) SetDriverPath(val *string) { func (p *TJdbcExecutorCtorParams) SetTableType(val *TOdbcTableType) { p.TableType = val } +func (p *TJdbcExecutorCtorParams) SetConnectionPoolMinSize(val *int32) { + p.ConnectionPoolMinSize = val +} +func (p *TJdbcExecutorCtorParams) SetConnectionPoolMaxSize(val *int32) { + p.ConnectionPoolMaxSize = val +} +func (p *TJdbcExecutorCtorParams) SetConnectionPoolMaxWaitTime(val *int32) { + p.ConnectionPoolMaxWaitTime = val +} +func (p *TJdbcExecutorCtorParams) SetConnectionPoolMaxLifeTime(val *int32) { + p.ConnectionPoolMaxLifeTime = val +} +func (p *TJdbcExecutorCtorParams) SetConnectionPoolCacheClearTime(val *int32) { + p.ConnectionPoolCacheClearTime = val +} +func (p *TJdbcExecutorCtorParams) SetConnectionPoolKeepAlive(val *bool) { + p.ConnectionPoolKeepAlive = val +} +func (p *TJdbcExecutorCtorParams) SetCatalogId(val *int64) { + p.CatalogId = val +} var fieldIDToName_TJdbcExecutorCtorParams = map[int16]string{ - 1: "statement", - 2: "jdbc_url", - 3: "jdbc_user", - 4: "jdbc_password", - 5: "jdbc_driver_class", - 6: "batch_size", - 7: "op", - 8: "driver_path", - 9: "table_type", + 1: "statement", + 2: "jdbc_url", + 3: "jdbc_user", + 4: "jdbc_password", + 5: "jdbc_driver_class", + 6: "batch_size", + 7: "op", + 8: "driver_path", + 9: "table_type", + 10: "connection_pool_min_size", + 11: "connection_pool_max_size", + 12: "connection_pool_max_wait_time", + 13: "connection_pool_max_life_time", + 14: "connection_pool_cache_clear_time", + 15: "connection_pool_keep_alive", + 16: "catalog_id", } func (p *TJdbcExecutorCtorParams) IsSetStatement() bool { @@ -7428,9 +7976,37 @@ func (p *TJdbcExecutorCtorParams) IsSetTableType() bool { return p.TableType != nil } -func (p *TJdbcExecutorCtorParams) Read(iprot thrift.TProtocol) (err error) { +func (p *TJdbcExecutorCtorParams) IsSetConnectionPoolMinSize() bool { + return p.ConnectionPoolMinSize != nil +} - var fieldTypeId thrift.TType +func (p *TJdbcExecutorCtorParams) IsSetConnectionPoolMaxSize() bool { + return p.ConnectionPoolMaxSize != nil +} + +func (p *TJdbcExecutorCtorParams) IsSetConnectionPoolMaxWaitTime() bool { + return p.ConnectionPoolMaxWaitTime != nil +} + +func (p *TJdbcExecutorCtorParams) IsSetConnectionPoolMaxLifeTime() bool { + return p.ConnectionPoolMaxLifeTime != nil +} + +func (p *TJdbcExecutorCtorParams) IsSetConnectionPoolCacheClearTime() bool { + return p.ConnectionPoolCacheClearTime != nil +} + +func (p *TJdbcExecutorCtorParams) IsSetConnectionPoolKeepAlive() bool { + return p.ConnectionPoolKeepAlive != nil +} + +func (p *TJdbcExecutorCtorParams) IsSetCatalogId() bool { + return p.CatalogId != nil +} + +func (p *TJdbcExecutorCtorParams) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType var fieldId int16 if _, err = iprot.ReadStructBegin(); err != nil { @@ -7452,97 +8028,134 @@ func (p *TJdbcExecutorCtorParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.STRING { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.STRING { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.STRING { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I32 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I32 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.STRING { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I32 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.I32 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.I32 { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.I32 { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.I32 { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 14: + if fieldTypeId == thrift.I32 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.I64 { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -7568,85 +8181,181 @@ ReadStructEndError: } func (p *TJdbcExecutorCtorParams) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Statement = &v + _field = &v } + p.Statement = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcUrl = &v + _field = &v } + p.JdbcUrl = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcUser = &v + _field = &v } + p.JdbcUser = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcPassword = &v + _field = &v } + p.JdbcPassword = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JdbcDriverClass = &v + _field = &v } + p.JdbcDriverClass = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BatchSize = &v + _field = &v } + p.BatchSize = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *TJdbcOperation if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TJdbcOperation(v) - p.Op = &tmp + _field = &tmp } + p.Op = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField8(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.DriverPath = &v + _field = &v } + p.DriverPath = _field return nil } - func (p *TJdbcExecutorCtorParams) ReadField9(iprot thrift.TProtocol) error { + + var _field *TOdbcTableType if v, err := iprot.ReadI32(); err != nil { return err } else { tmp := TOdbcTableType(v) - p.TableType = &tmp + _field = &tmp + } + p.TableType = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField10(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v } + p.ConnectionPoolMinSize = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField11(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMaxSize = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField12(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMaxWaitTime = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField13(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolMaxLifeTime = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField14(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolCacheClearTime = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField15(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.ConnectionPoolKeepAlive = _field + return nil +} +func (p *TJdbcExecutorCtorParams) ReadField16(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CatalogId = _field return nil } @@ -7692,7 +8401,34 @@ func (p *TJdbcExecutorCtorParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } - + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -7841,15 +8577,148 @@ func (p *TJdbcExecutorCtorParams) writeField7(oprot thrift.TProtocol) (err error WriteFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetDriverPath() { + if err = oprot.WriteFieldBegin("driver_path", thrift.STRING, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DriverPath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTableType() { + if err = oprot.WriteFieldBegin("table_type", thrift.I32, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(int32(*p.TableType)); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMinSize() { + if err = oprot.WriteFieldBegin("connection_pool_min_size", thrift.I32, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMinSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMaxSize() { + if err = oprot.WriteFieldBegin("connection_pool_max_size", thrift.I32, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMaxSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMaxWaitTime() { + if err = oprot.WriteFieldBegin("connection_pool_max_wait_time", thrift.I32, 12); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMaxWaitTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolMaxLifeTime() { + if err = oprot.WriteFieldBegin("connection_pool_max_life_time", thrift.I32, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolMaxLifeTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + +func (p *TJdbcExecutorCtorParams) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolCacheClearTime() { + if err = oprot.WriteFieldBegin("connection_pool_cache_clear_time", thrift.I32, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ConnectionPoolCacheClearTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } -func (p *TJdbcExecutorCtorParams) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetDriverPath() { - if err = oprot.WriteFieldBegin("driver_path", thrift.STRING, 8); err != nil { +func (p *TJdbcExecutorCtorParams) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionPoolKeepAlive() { + if err = oprot.WriteFieldBegin("connection_pool_keep_alive", thrift.BOOL, 15); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteString(*p.DriverPath); err != nil { + if err := oprot.WriteBool(*p.ConnectionPoolKeepAlive); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -7858,17 +8727,17 @@ func (p *TJdbcExecutorCtorParams) writeField8(oprot thrift.TProtocol) (err error } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) } -func (p *TJdbcExecutorCtorParams) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetTableType() { - if err = oprot.WriteFieldBegin("table_type", thrift.I32, 9); err != nil { +func (p *TJdbcExecutorCtorParams) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogId() { + if err = oprot.WriteFieldBegin("catalog_id", thrift.I64, 16); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteI32(int32(*p.TableType)); err != nil { + if err := oprot.WriteI64(*p.CatalogId); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -7877,9 +8746,9 @@ func (p *TJdbcExecutorCtorParams) writeField9(oprot thrift.TProtocol) (err error } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) } func (p *TJdbcExecutorCtorParams) String() string { @@ -7887,6 +8756,7 @@ func (p *TJdbcExecutorCtorParams) String() string { return "" } return fmt.Sprintf("TJdbcExecutorCtorParams(%+v)", *p) + } func (p *TJdbcExecutorCtorParams) DeepEqual(ano *TJdbcExecutorCtorParams) bool { @@ -7922,6 +8792,27 @@ func (p *TJdbcExecutorCtorParams) DeepEqual(ano *TJdbcExecutorCtorParams) bool { if !p.Field9DeepEqual(ano.TableType) { return false } + if !p.Field10DeepEqual(ano.ConnectionPoolMinSize) { + return false + } + if !p.Field11DeepEqual(ano.ConnectionPoolMaxSize) { + return false + } + if !p.Field12DeepEqual(ano.ConnectionPoolMaxWaitTime) { + return false + } + if !p.Field13DeepEqual(ano.ConnectionPoolMaxLifeTime) { + return false + } + if !p.Field14DeepEqual(ano.ConnectionPoolCacheClearTime) { + return false + } + if !p.Field15DeepEqual(ano.ConnectionPoolKeepAlive) { + return false + } + if !p.Field16DeepEqual(ano.CatalogId) { + return false + } return true } @@ -8033,6 +8924,90 @@ func (p *TJdbcExecutorCtorParams) Field9DeepEqual(src *TOdbcTableType) bool { } return true } +func (p *TJdbcExecutorCtorParams) Field10DeepEqual(src *int32) bool { + + if p.ConnectionPoolMinSize == src { + return true + } else if p.ConnectionPoolMinSize == nil || src == nil { + return false + } + if *p.ConnectionPoolMinSize != *src { + return false + } + return true +} +func (p *TJdbcExecutorCtorParams) Field11DeepEqual(src *int32) bool { + + if p.ConnectionPoolMaxSize == src { + return true + } else if p.ConnectionPoolMaxSize == nil || src == nil { + return false + } + if *p.ConnectionPoolMaxSize != *src { + return false + } + return true +} +func (p *TJdbcExecutorCtorParams) Field12DeepEqual(src *int32) bool { + + if p.ConnectionPoolMaxWaitTime == src { + return true + } else if p.ConnectionPoolMaxWaitTime == nil || src == nil { + return false + } + if *p.ConnectionPoolMaxWaitTime != *src { + return false + } + return true +} +func (p *TJdbcExecutorCtorParams) Field13DeepEqual(src *int32) bool { + + if p.ConnectionPoolMaxLifeTime == src { + return true + } else if p.ConnectionPoolMaxLifeTime == nil || src == nil { + return false + } + if *p.ConnectionPoolMaxLifeTime != *src { + return false + } + return true +} +func (p *TJdbcExecutorCtorParams) Field14DeepEqual(src *int32) bool { + + if p.ConnectionPoolCacheClearTime == src { + return true + } else if p.ConnectionPoolCacheClearTime == nil || src == nil { + return false + } + if *p.ConnectionPoolCacheClearTime != *src { + return false + } + return true +} +func (p *TJdbcExecutorCtorParams) Field15DeepEqual(src *bool) bool { + + if p.ConnectionPoolKeepAlive == src { + return true + } else if p.ConnectionPoolKeepAlive == nil || src == nil { + return false + } + if *p.ConnectionPoolKeepAlive != *src { + return false + } + return true +} +func (p *TJdbcExecutorCtorParams) Field16DeepEqual(src *int64) bool { + + if p.CatalogId == src { + return true + } else if p.CatalogId == nil || src == nil { + return false + } + if *p.CatalogId != *src { + return false + } + return true +} type TJavaUdfExecutorCtorParams struct { Fn *TFunction `thrift:"fn,1,optional" frugal:"1,optional,TFunction" json:"fn,omitempty"` @@ -8057,7 +9032,6 @@ func NewTJavaUdfExecutorCtorParams() *TJavaUdfExecutorCtorParams { } func (p *TJavaUdfExecutorCtorParams) InitDefault() { - *p = TJavaUdfExecutorCtorParams{} } var TJavaUdfExecutorCtorParams_Fn_DEFAULT *TFunction @@ -8342,157 +9316,126 @@ func (p *TJavaUdfExecutorCtorParams) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { if err = p.ReadField5(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { if err = p.ReadField6(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { if err = p.ReadField7(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { if err = p.ReadField8(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.I64 { if err = p.ReadField9(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 10: if fieldTypeId == thrift.I64 { if err = p.ReadField10(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 11: if fieldTypeId == thrift.I64 { if err = p.ReadField11(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 12: if fieldTypeId == thrift.I64 { if err = p.ReadField12(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 13: if fieldTypeId == thrift.I64 { if err = p.ReadField13(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 14: if fieldTypeId == thrift.I64 { if err = p.ReadField14(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 15: if fieldTypeId == thrift.I64 { if err = p.ReadField15(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -8518,136 +9461,165 @@ ReadStructEndError: } func (p *TJavaUdfExecutorCtorParams) ReadField1(iprot thrift.TProtocol) error { - p.Fn = NewTFunction() - if err := p.Fn.Read(iprot); err != nil { + _field := NewTFunction() + if err := _field.Read(iprot); err != nil { return err } + p.Fn = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Location = &v + _field = &v } + p.Location = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InputOffsetsPtrs = &v + _field = &v } + p.InputOffsetsPtrs = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InputNullsPtrs = &v + _field = &v } + p.InputNullsPtrs = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InputBufferPtrs = &v + _field = &v } + p.InputBufferPtrs = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.OutputNullPtr = &v + _field = &v } + p.OutputNullPtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.OutputBufferPtr = &v + _field = &v } + p.OutputBufferPtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField8(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.OutputOffsetsPtr = &v + _field = &v } + p.OutputOffsetsPtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField9(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.OutputIntermediateStatePtr = &v + _field = &v } + p.OutputIntermediateStatePtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BatchSizePtr = &v + _field = &v } + p.BatchSizePtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField11(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InputPlacesPtr = &v + _field = &v } + p.InputPlacesPtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField12(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InputArrayNullsBufferPtr = &v + _field = &v } + p.InputArrayNullsBufferPtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField13(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.InputArrayStringOffsetsPtrs = &v + _field = &v } + p.InputArrayStringOffsetsPtrs = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.OutputArrayNullPtr = &v + _field = &v } + p.OutputArrayNullPtr = _field return nil } - func (p *TJavaUdfExecutorCtorParams) ReadField15(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.OutputArrayStringOffsetsPtr = &v + _field = &v } + p.OutputArrayStringOffsetsPtr = _field return nil } @@ -8717,7 +9689,6 @@ func (p *TJavaUdfExecutorCtorParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 15 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9026,6 +9997,7 @@ func (p *TJavaUdfExecutorCtorParams) String() string { return "" } return fmt.Sprintf("TJavaUdfExecutorCtorParams(%+v)", *p) + } func (p *TJavaUdfExecutorCtorParams) DeepEqual(ano *TJavaUdfExecutorCtorParams) bool { @@ -9275,7 +10247,6 @@ func NewTJvmMemoryPool() *TJvmMemoryPool { } func (p *TJvmMemoryPool) InitDefault() { - *p = TJvmMemoryPool{} } func (p *TJvmMemoryPool) GetCommitted() (v int64) { @@ -9387,10 +10358,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCommitted = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -9398,10 +10367,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetInit = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -9409,10 +10376,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetMax = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -9420,10 +10385,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUsed = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -9431,10 +10394,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPeakCommitted = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { @@ -9442,10 +10403,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPeakInit = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 7: if fieldTypeId == thrift.I64 { @@ -9453,10 +10412,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPeakMax = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 8: if fieldTypeId == thrift.I64 { @@ -9464,10 +10421,8 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPeakUsed = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 9: if fieldTypeId == thrift.STRING { @@ -9475,17 +10430,14 @@ func (p *TJvmMemoryPool) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetName = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -9557,83 +10509,102 @@ RequiredFieldNotSetError: } func (p *TJvmMemoryPool) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Committed = v + _field = v } + p.Committed = _field return nil } - func (p *TJvmMemoryPool) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Init = v + _field = v } + p.Init = _field return nil } - func (p *TJvmMemoryPool) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Max = v + _field = v } + p.Max = _field return nil } - func (p *TJvmMemoryPool) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.Used = v + _field = v } + p.Used = _field return nil } - func (p *TJvmMemoryPool) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PeakCommitted = v + _field = v } + p.PeakCommitted = _field return nil } - func (p *TJvmMemoryPool) ReadField6(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PeakInit = v + _field = v } + p.PeakInit = _field return nil } - func (p *TJvmMemoryPool) ReadField7(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PeakMax = v + _field = v } + p.PeakMax = _field return nil } - func (p *TJvmMemoryPool) ReadField8(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.PeakUsed = v + _field = v } + p.PeakUsed = _field return nil } - func (p *TJvmMemoryPool) ReadField9(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Name = v + _field = v } + p.Name = _field return nil } @@ -9679,7 +10650,6 @@ func (p *TJvmMemoryPool) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -9856,6 +10826,7 @@ func (p *TJvmMemoryPool) String() string { return "" } return fmt.Sprintf("TJvmMemoryPool(%+v)", *p) + } func (p *TJvmMemoryPool) DeepEqual(ano *TJvmMemoryPool) bool { @@ -9972,7 +10943,6 @@ func NewTGetJvmMemoryMetricsResponse() *TGetJvmMemoryMetricsResponse { } func (p *TGetJvmMemoryMetricsResponse) InitDefault() { - *p = TGetJvmMemoryMetricsResponse{} } func (p *TGetJvmMemoryMetricsResponse) GetMemoryPools() (v []*TJvmMemoryPool) { @@ -10057,10 +11027,8 @@ func (p *TGetJvmMemoryMetricsResponse) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetMemoryPools = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -10068,10 +11036,8 @@ func (p *TGetJvmMemoryMetricsResponse) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetGcNumWarnThresholdExceeded = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -10079,10 +11045,8 @@ func (p *TGetJvmMemoryMetricsResponse) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetGcNumInfoThresholdExceeded = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -10090,10 +11054,8 @@ func (p *TGetJvmMemoryMetricsResponse) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetGcTotalExtraSleepTimeMillis = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -10101,10 +11063,8 @@ func (p *TGetJvmMemoryMetricsResponse) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetGcCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.I64 { @@ -10112,17 +11072,14 @@ func (p *TGetJvmMemoryMetricsResponse) Read(iprot thrift.TProtocol) (err error) goto ReadFieldError } issetGcTimeMillis = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10183,63 +11140,77 @@ func (p *TGetJvmMemoryMetricsResponse) ReadField1(iprot thrift.TProtocol) error if err != nil { return err } - p.MemoryPools = make([]*TJvmMemoryPool, 0, size) + _field := make([]*TJvmMemoryPool, 0, size) + values := make([]TJvmMemoryPool, size) for i := 0; i < size; i++ { - _elem := NewTJvmMemoryPool() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.MemoryPools = append(p.MemoryPools, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.MemoryPools = _field return nil } - func (p *TGetJvmMemoryMetricsResponse) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.GcNumWarnThresholdExceeded = v + _field = v } + p.GcNumWarnThresholdExceeded = _field return nil } - func (p *TGetJvmMemoryMetricsResponse) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.GcNumInfoThresholdExceeded = v + _field = v } + p.GcNumInfoThresholdExceeded = _field return nil } - func (p *TGetJvmMemoryMetricsResponse) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.GcTotalExtraSleepTimeMillis = v + _field = v } + p.GcTotalExtraSleepTimeMillis = _field return nil } - func (p *TGetJvmMemoryMetricsResponse) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.GcCount = v + _field = v } + p.GcCount = _field return nil } - func (p *TGetJvmMemoryMetricsResponse) ReadField6(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.GcTimeMillis = v + _field = v } + p.GcTimeMillis = _field return nil } @@ -10273,7 +11244,6 @@ func (p *TGetJvmMemoryMetricsResponse) Write(oprot thrift.TProtocol) (err error) fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10407,6 +11377,7 @@ func (p *TGetJvmMemoryMetricsResponse) String() string { return "" } return fmt.Sprintf("TGetJvmMemoryMetricsResponse(%+v)", *p) + } func (p *TGetJvmMemoryMetricsResponse) DeepEqual(ano *TGetJvmMemoryMetricsResponse) bool { @@ -10499,7 +11470,6 @@ func NewTJvmThreadInfo() *TJvmThreadInfo { } func (p *TJvmThreadInfo) InitDefault() { - *p = TJvmThreadInfo{} } func (p *TJvmThreadInfo) GetSummary() (v string) { @@ -10584,10 +11554,8 @@ func (p *TJvmThreadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetSummary = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -10595,10 +11563,8 @@ func (p *TJvmThreadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetCpuTimeInNs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I64 { @@ -10606,10 +11572,8 @@ func (p *TJvmThreadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUserTimeInNs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I64 { @@ -10617,10 +11581,8 @@ func (p *TJvmThreadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBlockedCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -10628,10 +11590,8 @@ func (p *TJvmThreadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBlockedTimeInMs = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 6: if fieldTypeId == thrift.BOOL { @@ -10639,17 +11599,14 @@ func (p *TJvmThreadInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetIsInNative = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -10706,56 +11663,69 @@ RequiredFieldNotSetError: } func (p *TJvmThreadInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Summary = v + _field = v } + p.Summary = _field return nil } - func (p *TJvmThreadInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.CpuTimeInNs = v + _field = v } + p.CpuTimeInNs = _field return nil } - func (p *TJvmThreadInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.UserTimeInNs = v + _field = v } + p.UserTimeInNs = _field return nil } - func (p *TJvmThreadInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BlockedCount = v + _field = v } + p.BlockedCount = _field return nil } - func (p *TJvmThreadInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BlockedTimeInMs = v + _field = v } + p.BlockedTimeInMs = _field return nil } - func (p *TJvmThreadInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsInNative = v + _field = v } + p.IsInNative = _field return nil } @@ -10789,7 +11759,6 @@ func (p *TJvmThreadInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 6 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -10915,6 +11884,7 @@ func (p *TJvmThreadInfo) String() string { return "" } return fmt.Sprintf("TJvmThreadInfo(%+v)", *p) + } func (p *TJvmThreadInfo) DeepEqual(ano *TJvmThreadInfo) bool { @@ -10996,7 +11966,6 @@ func NewTGetJvmThreadsInfoRequest() *TGetJvmThreadsInfoRequest { } func (p *TGetJvmThreadsInfoRequest) InitDefault() { - *p = TGetJvmThreadsInfoRequest{} } func (p *TGetJvmThreadsInfoRequest) GetGetCompleteInfo() (v bool) { @@ -11036,17 +12005,14 @@ func (p *TGetJvmThreadsInfoRequest) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetGetCompleteInfo = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11078,11 +12044,14 @@ RequiredFieldNotSetError: } func (p *TGetJvmThreadsInfoRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.GetCompleteInfo = v + _field = v } + p.GetCompleteInfo = _field return nil } @@ -11096,7 +12065,6 @@ func (p *TGetJvmThreadsInfoRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11137,6 +12105,7 @@ func (p *TGetJvmThreadsInfoRequest) String() string { return "" } return fmt.Sprintf("TGetJvmThreadsInfoRequest(%+v)", *p) + } func (p *TGetJvmThreadsInfoRequest) DeepEqual(ano *TGetJvmThreadsInfoRequest) bool { @@ -11171,7 +12140,6 @@ func NewTGetJvmThreadsInfoResponse() *TGetJvmThreadsInfoResponse { } func (p *TGetJvmThreadsInfoResponse) InitDefault() { - *p = TGetJvmThreadsInfoResponse{} } func (p *TGetJvmThreadsInfoResponse) GetTotalThreadCount() (v int32) { @@ -11246,10 +12214,8 @@ func (p *TGetJvmThreadsInfoResponse) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTotalThreadCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -11257,10 +12223,8 @@ func (p *TGetJvmThreadsInfoResponse) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetDaemonThreadCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -11268,27 +12232,22 @@ func (p *TGetJvmThreadsInfoResponse) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetPeakThreadCount = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.LIST { if err = p.ReadField4(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11330,49 +12289,59 @@ RequiredFieldNotSetError: } func (p *TGetJvmThreadsInfoResponse) ReadField1(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.TotalThreadCount = v + _field = v } + p.TotalThreadCount = _field return nil } - func (p *TGetJvmThreadsInfoResponse) ReadField2(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.DaemonThreadCount = v + _field = v } + p.DaemonThreadCount = _field return nil } - func (p *TGetJvmThreadsInfoResponse) ReadField3(iprot thrift.TProtocol) error { + + var _field int32 if v, err := iprot.ReadI32(); err != nil { return err } else { - p.PeakThreadCount = v + _field = v } + p.PeakThreadCount = _field return nil } - func (p *TGetJvmThreadsInfoResponse) ReadField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.Threads = make([]*TJvmThreadInfo, 0, size) + _field := make([]*TJvmThreadInfo, 0, size) + values := make([]TJvmThreadInfo, size) for i := 0; i < size; i++ { - _elem := NewTJvmThreadInfo() + _elem := &values[i] + _elem.InitDefault() + if err := _elem.Read(iprot); err != nil { return err } - p.Threads = append(p.Threads, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.Threads = _field return nil } @@ -11398,7 +12367,6 @@ func (p *TGetJvmThreadsInfoResponse) Write(oprot thrift.TProtocol) (err error) { fieldId = 4 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11500,6 +12468,7 @@ func (p *TGetJvmThreadsInfoResponse) String() string { return "" } return fmt.Sprintf("TGetJvmThreadsInfoResponse(%+v)", *p) + } func (p *TGetJvmThreadsInfoResponse) DeepEqual(ano *TGetJvmThreadsInfoResponse) bool { @@ -11567,7 +12536,6 @@ func NewTGetJMXJsonResponse() *TGetJMXJsonResponse { } func (p *TGetJMXJsonResponse) InitDefault() { - *p = TGetJMXJsonResponse{} } func (p *TGetJMXJsonResponse) GetJmxJson() (v string) { @@ -11607,17 +12575,14 @@ func (p *TGetJMXJsonResponse) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetJmxJson = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11649,11 +12614,14 @@ RequiredFieldNotSetError: } func (p *TGetJMXJsonResponse) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.JmxJson = v + _field = v } + p.JmxJson = _field return nil } @@ -11667,7 +12635,6 @@ func (p *TGetJMXJsonResponse) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11708,6 +12675,7 @@ func (p *TGetJMXJsonResponse) String() string { return "" } return fmt.Sprintf("TGetJMXJsonResponse(%+v)", *p) + } func (p *TGetJMXJsonResponse) DeepEqual(ano *TGetJMXJsonResponse) bool { @@ -11734,6 +12702,9 @@ type TBackend struct { Host string `thrift:"host,1,required" frugal:"1,required,string" json:"host"` BePort TPort `thrift:"be_port,2,required" frugal:"2,required,i32" json:"be_port"` HttpPort TPort `thrift:"http_port,3,required" frugal:"3,required,i32" json:"http_port"` + BrpcPort *TPort `thrift:"brpc_port,4,optional" frugal:"4,optional,i32" json:"brpc_port,omitempty"` + IsAlive *bool `thrift:"is_alive,5,optional" frugal:"5,optional,bool" json:"is_alive,omitempty"` + Id *int64 `thrift:"id,6,optional" frugal:"6,optional,i64" json:"id,omitempty"` } func NewTBackend() *TBackend { @@ -11741,7 +12712,6 @@ func NewTBackend() *TBackend { } func (p *TBackend) InitDefault() { - *p = TBackend{} } func (p *TBackend) GetHost() (v string) { @@ -11755,6 +12725,33 @@ func (p *TBackend) GetBePort() (v TPort) { func (p *TBackend) GetHttpPort() (v TPort) { return p.HttpPort } + +var TBackend_BrpcPort_DEFAULT TPort + +func (p *TBackend) GetBrpcPort() (v TPort) { + if !p.IsSetBrpcPort() { + return TBackend_BrpcPort_DEFAULT + } + return *p.BrpcPort +} + +var TBackend_IsAlive_DEFAULT bool + +func (p *TBackend) GetIsAlive() (v bool) { + if !p.IsSetIsAlive() { + return TBackend_IsAlive_DEFAULT + } + return *p.IsAlive +} + +var TBackend_Id_DEFAULT int64 + +func (p *TBackend) GetId() (v int64) { + if !p.IsSetId() { + return TBackend_Id_DEFAULT + } + return *p.Id +} func (p *TBackend) SetHost(val string) { p.Host = val } @@ -11764,11 +12761,35 @@ func (p *TBackend) SetBePort(val TPort) { func (p *TBackend) SetHttpPort(val TPort) { p.HttpPort = val } +func (p *TBackend) SetBrpcPort(val *TPort) { + p.BrpcPort = val +} +func (p *TBackend) SetIsAlive(val *bool) { + p.IsAlive = val +} +func (p *TBackend) SetId(val *int64) { + p.Id = val +} var fieldIDToName_TBackend = map[int16]string{ 1: "host", 2: "be_port", 3: "http_port", + 4: "brpc_port", + 5: "is_alive", + 6: "id", +} + +func (p *TBackend) IsSetBrpcPort() bool { + return p.BrpcPort != nil +} + +func (p *TBackend) IsSetIsAlive() bool { + return p.IsAlive != nil +} + +func (p *TBackend) IsSetId() bool { + return p.Id != nil } func (p *TBackend) Read(iprot thrift.TProtocol) (err error) { @@ -11799,10 +12820,8 @@ func (p *TBackend) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHost = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -11810,10 +12829,8 @@ func (p *TBackend) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBePort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -11821,17 +12838,38 @@ func (p *TBackend) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHttpPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I32 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -11873,29 +12911,69 @@ RequiredFieldNotSetError: } func (p *TBackend) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = v + _field = v } + p.Host = _field return nil } - func (p *TBackend) ReadField2(iprot thrift.TProtocol) error { + + var _field TPort if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BePort = v + _field = v } + p.BePort = _field return nil } - func (p *TBackend) ReadField3(iprot thrift.TProtocol) error { + + var _field TPort + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.HttpPort = _field + return nil +} +func (p *TBackend) ReadField4(iprot thrift.TProtocol) error { + + var _field *TPort if v, err := iprot.ReadI32(); err != nil { return err } else { - p.HttpPort = v + _field = &v + } + p.BrpcPort = _field + return nil +} +func (p *TBackend) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsAlive = _field + return nil +} +func (p *TBackend) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v } + p.Id = _field return nil } @@ -11917,7 +12995,18 @@ func (p *TBackend) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -11987,11 +13076,69 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) } +func (p *TBackend) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBrpcPort() { + if err = oprot.WriteFieldBegin("brpc_port", thrift.I32, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.BrpcPort); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TBackend) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetIsAlive() { + if err = oprot.WriteFieldBegin("is_alive", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsAlive); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TBackend) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetId() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Id); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + func (p *TBackend) String() string { if p == nil { return "" } return fmt.Sprintf("TBackend(%+v)", *p) + } func (p *TBackend) DeepEqual(ano *TBackend) bool { @@ -12009,6 +13156,15 @@ func (p *TBackend) DeepEqual(ano *TBackend) bool { if !p.Field3DeepEqual(ano.HttpPort) { return false } + if !p.Field4DeepEqual(ano.BrpcPort) { + return false + } + if !p.Field5DeepEqual(ano.IsAlive) { + return false + } + if !p.Field6DeepEqual(ano.Id) { + return false + } return true } @@ -12033,6 +13189,42 @@ func (p *TBackend) Field3DeepEqual(src TPort) bool { } return true } +func (p *TBackend) Field4DeepEqual(src *TPort) bool { + + if p.BrpcPort == src { + return true + } else if p.BrpcPort == nil || src == nil { + return false + } + if *p.BrpcPort != *src { + return false + } + return true +} +func (p *TBackend) Field5DeepEqual(src *bool) bool { + + if p.IsAlive == src { + return true + } else if p.IsAlive == nil || src == nil { + return false + } + if *p.IsAlive != *src { + return false + } + return true +} +func (p *TBackend) Field6DeepEqual(src *int64) bool { + + if p.Id == src { + return true + } else if p.Id == nil || src == nil { + return false + } + if *p.Id != *src { + return false + } + return true +} type TReplicaInfo struct { Host string `thrift:"host,1,required" frugal:"1,required,string" json:"host"` @@ -12047,7 +13239,6 @@ func NewTReplicaInfo() *TReplicaInfo { } func (p *TReplicaInfo) InitDefault() { - *p = TReplicaInfo{} } func (p *TReplicaInfo) GetHost() (v string) { @@ -12123,10 +13314,8 @@ func (p *TReplicaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHost = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I32 { @@ -12134,10 +13323,8 @@ func (p *TReplicaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBePort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.I32 { @@ -12145,10 +13332,8 @@ func (p *TReplicaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetHttpPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 4: if fieldTypeId == thrift.I32 { @@ -12156,10 +13341,8 @@ func (p *TReplicaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBrpcPort = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 5: if fieldTypeId == thrift.I64 { @@ -12167,17 +13350,14 @@ func (p *TReplicaInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetReplicaId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12229,47 +13409,58 @@ RequiredFieldNotSetError: } func (p *TReplicaInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = v + _field = v } + p.Host = _field return nil } - func (p *TReplicaInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field TPort if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BePort = v + _field = v } + p.BePort = _field return nil } - func (p *TReplicaInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field TPort if v, err := iprot.ReadI32(); err != nil { return err } else { - p.HttpPort = v + _field = v } + p.HttpPort = _field return nil } - func (p *TReplicaInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field TPort if v, err := iprot.ReadI32(); err != nil { return err } else { - p.BrpcPort = v + _field = v } + p.BrpcPort = _field return nil } - func (p *TReplicaInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field TReplicaId if v, err := iprot.ReadI64(); err != nil { return err } else { - p.ReplicaId = v + _field = v } + p.ReplicaId = _field return nil } @@ -12299,7 +13490,6 @@ func (p *TReplicaInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12408,6 +13598,7 @@ func (p *TReplicaInfo) String() string { return "" } return fmt.Sprintf("TReplicaInfo(%+v)", *p) + } func (p *TReplicaInfo) DeepEqual(ano *TReplicaInfo) bool { @@ -12480,7 +13671,6 @@ func NewTResourceInfo() *TResourceInfo { } func (p *TResourceInfo) InitDefault() { - *p = TResourceInfo{} } func (p *TResourceInfo) GetUser() (v string) { @@ -12529,10 +13719,8 @@ func (p *TResourceInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetUser = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { @@ -12540,17 +13728,14 @@ func (p *TResourceInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetGroup = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12587,20 +13772,25 @@ RequiredFieldNotSetError: } func (p *TResourceInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.User = v + _field = v } + p.User = _field return nil } - func (p *TResourceInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Group = v + _field = v } + p.Group = _field return nil } @@ -12618,7 +13808,6 @@ func (p *TResourceInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12676,6 +13865,7 @@ func (p *TResourceInfo) String() string { return "" } return fmt.Sprintf("TResourceInfo(%+v)", *p) + } func (p *TResourceInfo) DeepEqual(ano *TResourceInfo) bool { @@ -12719,7 +13909,6 @@ func NewTTabletCommitInfo() *TTabletCommitInfo { } func (p *TTabletCommitInfo) InitDefault() { - *p = TTabletCommitInfo{} } func (p *TTabletCommitInfo) GetTabletId() (v int64) { @@ -12785,10 +13974,8 @@ func (p *TTabletCommitInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetTabletId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.I64 { @@ -12796,27 +13983,22 @@ func (p *TTabletCommitInfo) Read(iprot thrift.TProtocol) (err error) { goto ReadFieldError } issetBackendId = true - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.LIST { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -12853,30 +14035,35 @@ RequiredFieldNotSetError: } func (p *TTabletCommitInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = v + _field = v } + p.TabletId = _field return nil } - func (p *TTabletCommitInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.BackendId = v + _field = v } + p.BackendId = _field return nil } - func (p *TTabletCommitInfo) ReadField3(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { return err } - p.InvalidDictCols = make([]string, 0, size) + _field := make([]string, 0, size) for i := 0; i < size; i++ { + var _elem string if v, err := iprot.ReadString(); err != nil { return err @@ -12884,11 +14071,12 @@ func (p *TTabletCommitInfo) ReadField3(iprot thrift.TProtocol) error { _elem = v } - p.InvalidDictCols = append(p.InvalidDictCols, _elem) + _field = append(_field, _elem) } if err := iprot.ReadListEnd(); err != nil { return err } + p.InvalidDictCols = _field return nil } @@ -12910,7 +14098,6 @@ func (p *TTabletCommitInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12995,6 +14182,7 @@ func (p *TTabletCommitInfo) String() string { return "" } return fmt.Sprintf("TTabletCommitInfo(%+v)", *p) + } func (p *TTabletCommitInfo) DeepEqual(ano *TTabletCommitInfo) bool { @@ -13053,7 +14241,6 @@ func NewTErrorTabletInfo() *TErrorTabletInfo { } func (p *TErrorTabletInfo) InitDefault() { - *p = TErrorTabletInfo{} } var TErrorTabletInfo_TabletId_DEFAULT int64 @@ -13117,27 +14304,22 @@ func (p *TErrorTabletInfo) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13163,20 +14345,25 @@ ReadStructEndError: } func (p *TErrorTabletInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 if v, err := iprot.ReadI64(); err != nil { return err } else { - p.TabletId = &v + _field = &v } + p.TabletId = _field return nil } - func (p *TErrorTabletInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Msg = &v + _field = &v } + p.Msg = _field return nil } @@ -13194,7 +14381,6 @@ func (p *TErrorTabletInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13256,6 +14442,7 @@ func (p *TErrorTabletInfo) String() string { return "" } return fmt.Sprintf("TErrorTabletInfo(%+v)", *p) + } func (p *TErrorTabletInfo) DeepEqual(ano *TErrorTabletInfo) bool { @@ -13309,7 +14496,6 @@ func NewTUserIdentity() *TUserIdentity { } func (p *TUserIdentity) InitDefault() { - *p = TUserIdentity{} } var TUserIdentity_Username_DEFAULT string @@ -13390,37 +14576,30 @@ func (p *TUserIdentity) Read(iprot thrift.TProtocol) (err error) { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 2: if fieldTypeId == thrift.STRING { if err = p.ReadField2(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } case 3: if fieldTypeId == thrift.BOOL { if err = p.ReadField3(iprot); err != nil { goto ReadFieldError } - } else { - if err = iprot.Skip(fieldTypeId); err != nil { - goto SkipFieldError - } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } } - if err = iprot.ReadFieldEnd(); err != nil { goto ReadFieldEndError } @@ -13446,29 +14625,36 @@ ReadStructEndError: } func (p *TUserIdentity) ReadField1(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Username = &v + _field = &v } + p.Username = _field return nil } - func (p *TUserIdentity) ReadField2(iprot thrift.TProtocol) error { + + var _field *string if v, err := iprot.ReadString(); err != nil { return err } else { - p.Host = &v + _field = &v } + p.Host = _field return nil } - func (p *TUserIdentity) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool if v, err := iprot.ReadBool(); err != nil { return err } else { - p.IsDomain = &v + _field = &v } + p.IsDomain = _field return nil } @@ -13490,7 +14676,6 @@ func (p *TUserIdentity) Write(oprot thrift.TProtocol) (err error) { fieldId = 3 goto WriteFieldError } - } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -13571,6 +14756,7 @@ func (p *TUserIdentity) String() string { return "" } return fmt.Sprintf("TUserIdentity(%+v)", *p) + } func (p *TUserIdentity) DeepEqual(ano *TUserIdentity) bool { diff --git a/pkg/rpc/kitex_gen/types/k-Types.go b/pkg/rpc/kitex_gen/types/k-Types.go index 9ddafab5..17c932a4 100644 --- a/pkg/rpc/kitex_gen/types/k-Types.go +++ b/pkg/rpc/kitex_gen/types/k-Types.go @@ -1,4 +1,4 @@ -// Code generated by Kitex v0.4.4. DO NOT EDIT. +// Code generated by Kitex v0.8.0. DO NOT EDIT. package types @@ -1051,6 +1051,20 @@ func (p *TTypeDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -1192,6 +1206,19 @@ func (p *TTypeDesc) FastReadField6(buf []byte) (int, error) { return offset, nil } +func (p *TTypeDesc) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BeExecVersion = &v + + } + return offset, nil +} + // for compatibility func (p *TTypeDesc) FastWrite(buf []byte) int { return 0 @@ -1204,6 +1231,7 @@ func (p *TTypeDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) @@ -1223,6 +1251,7 @@ func (p *TTypeDesc) BLength() int { l += p.field4Length() l += p.field5Length() l += p.field6Length() + l += p.field7Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -1307,6 +1336,17 @@ func (p *TTypeDesc) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWrite return offset } +func (p *TTypeDesc) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBeExecVersion() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_exec_version", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BeExecVersion) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TTypeDesc) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("types", thrift.LIST, 1) @@ -1377,6 +1417,17 @@ func (p *TTypeDesc) field6Length() int { return l } +func (p *TTypeDesc) field7Length() int { + l := 0 + if p.IsSetBeExecVersion() { + l += bthrift.Binary.FieldBeginLength("be_exec_version", thrift.I32, 7) + l += bthrift.Binary.I32Length(*p.BeExecVersion) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TColumnType) FastRead(buf []byte) (int, error) { var err error var offset int @@ -3343,6 +3394,48 @@ func (p *TFunction) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 14: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -3590,6 +3683,47 @@ func (p *TFunction) FastReadField13(buf []byte) (int, error) { return offset, nil } +func (p *TFunction) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsUdtfFunction = v + + } + return offset, nil +} + +func (p *TFunction) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsStaticLoad = v + + } + return offset, nil +} + +func (p *TFunction) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ExpirationTime = &v + + } + return offset, nil +} + // for compatibility func (p *TFunction) FastWrite(buf []byte) int { return 0 @@ -3602,6 +3736,9 @@ func (p *TFunction) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -3635,6 +3772,9 @@ func (p *TFunction) BLength() int { l += p.field11Length() l += p.field12Length() l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -3777,6 +3917,39 @@ func (p *TFunction) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWrit return offset } +func (p *TFunction) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsUdtfFunction() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_udtf_function", thrift.BOOL, 14) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsUdtfFunction) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFunction) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsStaticLoad() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_static_load", thrift.BOOL, 15) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsStaticLoad) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TFunction) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetExpirationTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "expiration_time", thrift.I64, 16) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ExpirationTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TFunction) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("name", thrift.STRUCT, 1) @@ -3909,6 +4082,39 @@ func (p *TFunction) field13Length() int { return l } +func (p *TFunction) field14Length() int { + l := 0 + if p.IsSetIsUdtfFunction() { + l += bthrift.Binary.FieldBeginLength("is_udtf_function", thrift.BOOL, 14) + l += bthrift.Binary.BoolLength(p.IsUdtfFunction) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFunction) field15Length() int { + l := 0 + if p.IsSetIsStaticLoad() { + l += bthrift.Binary.FieldBeginLength("is_static_load", thrift.BOOL, 15) + l += bthrift.Binary.BoolLength(p.IsStaticLoad) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TFunction) field16Length() int { + l := 0 + if p.IsSetExpirationTime() { + l += bthrift.Binary.FieldBeginLength("expiration_time", thrift.I64, 16) + l += bthrift.Binary.I64Length(*p.ExpirationTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TJdbcExecutorCtorParams) FastRead(buf []byte) (int, error) { var err error var offset int @@ -4057,6 +4263,104 @@ func (p *TJdbcExecutorCtorParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 10: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 14: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -4151,63 +4455,154 @@ func (p *TJdbcExecutorCtorParams) FastReadField5(buf []byte) (int, error) { return offset, err } else { offset += l - p.JdbcDriverClass = &v + p.JdbcDriverClass = &v + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BatchSize = &v + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TJdbcOperation(v) + p.Op = &tmp + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField8(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.DriverPath = &v + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + tmp := TOdbcTableType(v) + p.TableType = &tmp + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMinSize = &v + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMaxSize = &v + + } + return offset, nil +} + +func (p *TJdbcExecutorCtorParams) FastReadField12(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ConnectionPoolMaxWaitTime = &v } return offset, nil } -func (p *TJdbcExecutorCtorParams) FastReadField6(buf []byte) (int, error) { +func (p *TJdbcExecutorCtorParams) FastReadField13(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - p.BatchSize = &v + p.ConnectionPoolMaxLifeTime = &v } return offset, nil } -func (p *TJdbcExecutorCtorParams) FastReadField7(buf []byte) (int, error) { +func (p *TJdbcExecutorCtorParams) FastReadField14(buf []byte) (int, error) { offset := 0 if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TJdbcOperation(v) - p.Op = &tmp + p.ConnectionPoolCacheClearTime = &v } return offset, nil } -func (p *TJdbcExecutorCtorParams) FastReadField8(buf []byte) (int, error) { +func (p *TJdbcExecutorCtorParams) FastReadField15(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { return offset, err } else { offset += l - p.DriverPath = &v + p.ConnectionPoolKeepAlive = &v } return offset, nil } -func (p *TJdbcExecutorCtorParams) FastReadField9(buf []byte) (int, error) { +func (p *TJdbcExecutorCtorParams) FastReadField16(buf []byte) (int, error) { offset := 0 - if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { return offset, err } else { offset += l - - tmp := TOdbcTableType(v) - p.TableType = &tmp + p.CatalogId = &v } return offset, nil @@ -4223,6 +4618,13 @@ func (p *TJdbcExecutorCtorParams) FastWriteNocopy(buf []byte, binaryWriter bthri offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TJdbcExecutorCtorParams") if p != nil { offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -4250,6 +4652,13 @@ func (p *TJdbcExecutorCtorParams) BLength() int { l += p.field7Length() l += p.field8Length() l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -4355,6 +4764,83 @@ func (p *TJdbcExecutorCtorParams) fastWriteField9(buf []byte, binaryWriter bthri return offset } +func (p *TJdbcExecutorCtorParams) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMinSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_min_size", thrift.I32, 10) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMinSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcExecutorCtorParams) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMaxSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_max_size", thrift.I32, 11) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMaxSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcExecutorCtorParams) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMaxWaitTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_max_wait_time", thrift.I32, 12) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMaxWaitTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcExecutorCtorParams) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolMaxLifeTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_max_life_time", thrift.I32, 13) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolMaxLifeTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcExecutorCtorParams) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolCacheClearTime() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_cache_clear_time", thrift.I32, 14) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.ConnectionPoolCacheClearTime) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcExecutorCtorParams) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetConnectionPoolKeepAlive() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "connection_pool_keep_alive", thrift.BOOL, 15) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.ConnectionPoolKeepAlive) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TJdbcExecutorCtorParams) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCatalogId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "catalog_id", thrift.I64, 16) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.CatalogId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TJdbcExecutorCtorParams) field1Length() int { l := 0 if p.IsSetStatement() { @@ -4454,6 +4940,83 @@ func (p *TJdbcExecutorCtorParams) field9Length() int { return l } +func (p *TJdbcExecutorCtorParams) field10Length() int { + l := 0 + if p.IsSetConnectionPoolMinSize() { + l += bthrift.Binary.FieldBeginLength("connection_pool_min_size", thrift.I32, 10) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMinSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcExecutorCtorParams) field11Length() int { + l := 0 + if p.IsSetConnectionPoolMaxSize() { + l += bthrift.Binary.FieldBeginLength("connection_pool_max_size", thrift.I32, 11) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMaxSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcExecutorCtorParams) field12Length() int { + l := 0 + if p.IsSetConnectionPoolMaxWaitTime() { + l += bthrift.Binary.FieldBeginLength("connection_pool_max_wait_time", thrift.I32, 12) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMaxWaitTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcExecutorCtorParams) field13Length() int { + l := 0 + if p.IsSetConnectionPoolMaxLifeTime() { + l += bthrift.Binary.FieldBeginLength("connection_pool_max_life_time", thrift.I32, 13) + l += bthrift.Binary.I32Length(*p.ConnectionPoolMaxLifeTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcExecutorCtorParams) field14Length() int { + l := 0 + if p.IsSetConnectionPoolCacheClearTime() { + l += bthrift.Binary.FieldBeginLength("connection_pool_cache_clear_time", thrift.I32, 14) + l += bthrift.Binary.I32Length(*p.ConnectionPoolCacheClearTime) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcExecutorCtorParams) field15Length() int { + l := 0 + if p.IsSetConnectionPoolKeepAlive() { + l += bthrift.Binary.FieldBeginLength("connection_pool_keep_alive", thrift.BOOL, 15) + l += bthrift.Binary.BoolLength(*p.ConnectionPoolKeepAlive) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TJdbcExecutorCtorParams) field16Length() int { + l := 0 + if p.IsSetCatalogId() { + l += bthrift.Binary.FieldBeginLength("catalog_id", thrift.I64, 16) + l += bthrift.Binary.I64Length(*p.CatalogId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TJavaUdfExecutorCtorParams) FastRead(buf []byte) (int, error) { var err error var offset int @@ -7395,6 +7958,48 @@ func (p *TBackend) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 4: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -7488,6 +8093,45 @@ func (p *TBackend) FastReadField3(buf []byte) (int, error) { return offset, nil } +func (p *TBackend) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BrpcPort = &v + + } + return offset, nil +} + +func (p *TBackend) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsAlive = &v + + } + return offset, nil +} + +func (p *TBackend) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Id = &v + + } + return offset, nil +} + // for compatibility func (p *TBackend) FastWrite(buf []byte) int { return 0 @@ -7499,6 +8143,9 @@ func (p *TBackend) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter if p != nil { offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -7513,6 +8160,9 @@ func (p *TBackend) BLength() int { l += p.field1Length() l += p.field2Length() l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -7546,6 +8196,39 @@ func (p *TBackend) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter return offset } +func (p *TBackend) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBrpcPort() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "brpc_port", thrift.I32, 4) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.BrpcPort) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackend) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsAlive() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_alive", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsAlive) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TBackend) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TBackend) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("host", thrift.STRING, 1) @@ -7573,6 +8256,39 @@ func (p *TBackend) field3Length() int { return l } +func (p *TBackend) field4Length() int { + l := 0 + if p.IsSetBrpcPort() { + l += bthrift.Binary.FieldBeginLength("brpc_port", thrift.I32, 4) + l += bthrift.Binary.I32Length(*p.BrpcPort) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackend) field5Length() int { + l := 0 + if p.IsSetIsAlive() { + l += bthrift.Binary.FieldBeginLength("is_alive", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.IsAlive) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TBackend) field6Length() int { + l := 0 + if p.IsSetId() { + l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.Id) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TReplicaInfo) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/rpc_factory.go b/pkg/rpc/rpc_factory.go index 284d7efa..2dac1608 100644 --- a/pkg/rpc/rpc_factory.go +++ b/pkg/rpc/rpc_factory.go @@ -2,10 +2,10 @@ package rpc import ( "fmt" + "sync" "github.com/selectdb/ccr_syncer/pkg/ccr/base" beservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice/backendservice" - feservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice/frontendservice" "github.com/selectdb/ccr_syncer/pkg/xerror" "github.com/cloudwego/kitex/client" @@ -17,10 +17,18 @@ type IRpcFactory interface { } type RpcFactory struct { + feRpcs map[*base.Spec]IFeRpc + feRpcsLock sync.Mutex + + beRpcs map[base.Backend]IBeRpc + beRpcsLock sync.Mutex } func NewRpcFactory() IRpcFactory { - return &RpcFactory{} + return &RpcFactory{ + feRpcs: make(map[*base.Spec]IFeRpc), + beRpcs: make(map[base.Backend]IBeRpc), + } } func (rf *RpcFactory) NewFeRpc(spec *base.Spec) (IFeRpc, error) { @@ -29,24 +37,46 @@ func (rf *RpcFactory) NewFeRpc(spec *base.Spec) (IFeRpc, error) { return nil, err } - // create kitex FrontendService client - if fe_client, err := feservice.NewClient("FrontendService", client.WithHostPorts(spec.Host+":"+spec.ThriftPort)); err != nil { - return nil, xerror.Wrapf(err, xerror.Normal, "NewFeClient error: %v, spec: %s", err, spec) - } else { - return &FeRpc{ - client: fe_client, - }, nil + rf.feRpcsLock.Lock() + if feRpc, ok := rf.feRpcs[spec]; ok { + rf.feRpcsLock.Unlock() + return feRpc, nil + } + rf.feRpcsLock.Unlock() + + feRpc, err := NewFeRpc(spec) + if err != nil { + return nil, err } + + rf.feRpcsLock.Lock() + defer rf.feRpcsLock.Unlock() + rf.feRpcs[spec] = feRpc + return feRpc, nil } func (rf *RpcFactory) NewBeRpc(be *base.Backend) (IBeRpc, error) { - // create kitex FrontendService client - if client, err := beservice.NewClient("FrontendService", client.WithHostPorts(fmt.Sprintf("%s:%d", be.Host, be.BePort))); err != nil { + rf.beRpcsLock.Lock() + if beRpc, ok := rf.beRpcs[*be]; ok { + rf.beRpcsLock.Unlock() + return beRpc, nil + } + rf.beRpcsLock.Unlock() + + // create kitex BackendService client + addr := fmt.Sprintf("%s:%d", be.Host, be.BePort) + client, err := beservice.NewClient("BackendService", client.WithHostPorts(addr), client.WithConnectTimeout(connectTimeout), client.WithRPCTimeout(rpcTimeout)) + if err != nil { return nil, xerror.Wrapf(err, xerror.Normal, "NewBeClient error: %v", err) - } else { - return &BeRpc{ - backend: be, - client: client, - }, nil } + + beRpc := &BeRpc{ + backend: be, + client: client, + } + + rf.beRpcsLock.Lock() + defer rf.beRpcsLock.Unlock() + rf.beRpcs[*be] = beRpc + return beRpc, nil } diff --git a/pkg/rpc/thrift/AgentService.thrift b/pkg/rpc/thrift/AgentService.thrift index a61c9512..fdbf4483 100644 --- a/pkg/rpc/thrift/AgentService.thrift +++ b/pkg/rpc/thrift/AgentService.thrift @@ -44,6 +44,12 @@ struct TTabletSchema { 16: optional bool store_row_column = false 17: optional bool enable_single_replica_compaction = false 18: optional bool skip_write_index_on_load = false + 19: optional list cluster_key_idxes + // col unique id for row store column + 20: optional list row_store_col_cids + 21: optional i64 row_store_page_size = 16384 + 22: optional bool variant_enable_flatten_nested = false + 23: optional i64 storage_page_size = 65536 } // this enum stands for different storage format in src_backends @@ -60,6 +66,17 @@ enum TTabletType { TABLET_TYPE_MEMORY = 1 } +enum TObjStorageType { + UNKNOWN = 0, + AWS = 1, + AZURE = 2, + BOS = 3, + COS = 4, + OBS = 5, + OSS = 6, + GCP = 7 +} + struct TS3StorageParam { 1: optional string endpoint 2: optional string region @@ -71,6 +88,8 @@ struct TS3StorageParam { 8: optional string root_path 9: optional string bucket 10: optional bool use_path_style = false + 11: optional string token + 12: optional TObjStorageType provider } struct TStoragePolicy { @@ -87,6 +106,7 @@ struct TStorageResource { 2: optional string name 3: optional i64 version // alter version 4: optional TS3StorageParam s3_storage_param + 5: optional PlanNodes.THdfsParams hdfs_storage_param // more storage resource type } @@ -96,6 +116,12 @@ struct TPushStoragePolicyReq { 3: optional list dropped_storage_policy } +struct TCleanTrashReq {} + +struct TCleanUDFCacheReq { + 1: optional string function_signature //function_name(arg_type) +} + enum TCompressionType { UNKNOWN_COMPRESSION = 0, DEFAULT_COMPRESSION = 1, @@ -108,6 +134,14 @@ enum TCompressionType { LZ4HC = 8 } +// Enumerates the storage formats for inverted indexes in src_backends. +// This enum is used to distinguish between different organizational methods +// of inverted index data, affecting how the index is stored and accessed. +enum TInvertedIndexStorageFormat { + DEFAULT, // Default format, unspecified storage method. + V1, // Index per idx: Each index is stored separately based on its identifier. + V2 // Segment id per idx: Indexes are organized based on segment identifiers, grouping indexes by their associated segment. +} struct TBinlogConfig { 1: optional bool enable; @@ -147,6 +181,14 @@ struct TCreateTabletReq { 23: optional i64 time_series_compaction_goal_size_mbytes = 1024 24: optional i64 time_series_compaction_file_count_threshold = 2000 25: optional i64 time_series_compaction_time_threshold_seconds = 3600 + 26: optional i64 time_series_compaction_empty_rowsets_threshold = 5 + 27: optional i64 time_series_compaction_level_threshold = 1 + 28: optional TInvertedIndexStorageFormat inverted_index_storage_format = TInvertedIndexStorageFormat.DEFAULT // Deprecated + 29: optional Types.TInvertedIndexFileStorageFormat inverted_index_file_storage_format = Types.TInvertedIndexFileStorageFormat.V2 + + // For cloud + 1000: optional bool is_in_memory = false + 1001: optional bool is_persistent = false } struct TDropTabletReq { @@ -189,6 +231,11 @@ struct TAlterTabletReqV2 { 9: optional Descriptors.TDescriptorTable desc_tbl 10: optional list columns 11: optional i32 be_exec_version = 0 + + // For cloud + 1000: optional i64 job_id + 1001: optional i64 expiration + 1002: optional string storage_vault_id } struct TAlterInvertedIndexReq { @@ -247,6 +294,8 @@ struct TPushReq { 14: optional PlanNodes.TBrokerScanRange broker_scan_range 15: optional Descriptors.TDescriptorTable desc_tbl 16: optional list columns_desc + 17: optional string storage_vault_id + 18: optional i32 schema_version } struct TCloneReq { @@ -255,7 +304,7 @@ struct TCloneReq { 3: required list src_backends 4: optional Types.TStorageMedium storage_medium // these are visible version(hash) actually - 5: optional Types.TVersion committed_version + 5: optional Types.TVersion version 6: optional Types.TVersionHash committed_version_hash // Deprecated 7: optional i32 task_version; 8: optional i64 src_path_hash; @@ -263,6 +312,7 @@ struct TCloneReq { 10: optional i32 timeout_s; 11: optional Types.TReplicaId replica_id = 0 12: optional i64 partition_id + 13: optional i64 table_id = -1 } struct TCompactionReq { @@ -340,6 +390,7 @@ struct TSnapshotRequest { 11: optional Types.TVersion start_version 12: optional Types.TVersion end_version 13: optional bool is_copy_binlog + 14: optional Types.TTabletId ref_tablet_id } struct TReleaseSnapshotRequest { @@ -374,6 +425,27 @@ struct TPublishVersionRequest { 2: required list partition_version_infos // strict mode means BE will check tablet missing version 3: optional bool strict_mode = false + // for delta rows statistics to exclude rollup tablets + 4: optional set base_tablet_ids +} + +struct TVisibleVersionReq { + 1: required map partition_version +} + +struct TCalcDeleteBitmapPartitionInfo { + 1: required Types.TPartitionId partition_id + 2: required Types.TVersion version + 3: required list tablet_ids + 4: optional list base_compaction_cnts + 5: optional list cumulative_compaction_cnts + 6: optional list cumulative_points + 7: optional list sub_txn_ids +} + +struct TCalcDeleteBitmapRequest { + 1: required Types.TTransactionId transaction_id + 2: required list partitions; } struct TClearAlterTaskRequest { @@ -415,6 +487,9 @@ struct TTabletMetaInfo { 13: optional i64 time_series_compaction_time_threshold_seconds 14: optional bool enable_single_replica_compaction 15: optional bool skip_write_index_on_load + 16: optional bool disable_auto_compaction + 17: optional i64 time_series_compaction_empty_rowsets_threshold + 18: optional i64 time_series_compaction_level_threshold } struct TUpdateTabletMetaInfoReq { @@ -473,6 +548,12 @@ struct TAgentTaskRequest { 31: optional TPushStoragePolicyReq push_storage_policy_req 32: optional TAlterInvertedIndexReq alter_inverted_index_req 33: optional TGcBinlogReq gc_binlog_req + 34: optional TCleanTrashReq clean_trash_req + 35: optional TVisibleVersionReq visible_version_req + 36: optional TCleanUDFCacheReq clean_udf_cache_req + + // For cloud + 1000: optional TCalcDeleteBitmapRequest calc_delete_bitmap_req } struct TAgentResult { @@ -492,7 +573,7 @@ struct TTopicItem { } enum TTopicType { - RESOURCE + RESOURCE = 0 } struct TTopicUpdate { @@ -505,4 +586,3 @@ struct TAgentPublishRequest { 1: required TAgentServiceVersion protocol_version 2: required list updates } - diff --git a/pkg/rpc/thrift/BackendService.thrift b/pkg/rpc/thrift/BackendService.thrift index 3d77eab4..7f073b2b 100644 --- a/pkg/rpc/thrift/BackendService.thrift +++ b/pkg/rpc/thrift/BackendService.thrift @@ -24,6 +24,7 @@ include "PlanNodes.thrift" include "AgentService.thrift" include "PaloInternalService.thrift" include "DorisExternalService.thrift" +include "FrontendService.thrift" struct TExportTaskRequest { 1: required PaloInternalService.TExecPlanFragmentParams params @@ -33,9 +34,11 @@ struct TTabletStat { 1: required i64 tablet_id // local data size 2: optional i64 data_size - 3: optional i64 row_num - 4: optional i64 version_count + 3: optional i64 row_count + 4: optional i64 total_version_count 5: optional i64 remote_data_size + 6: optional i64 visible_version_count + 7: optional i64 visible_version } struct TTabletStatResult { @@ -67,6 +70,9 @@ struct TRoutineLoadTask { 14: optional PlanNodes.TFileFormatType format 15: optional PaloInternalService.TPipelineFragmentParams pipeline_params 16: optional bool is_multi_table + 17: optional bool memtable_on_sink_node; + 18: optional string qualified_user + 19: optional string cloud_cluster } struct TKafkaMetaProxyRequest { @@ -123,6 +129,87 @@ struct TCheckStorageFormatResult { 2: optional list v2_tablets; } +struct TWarmUpCacheAsyncRequest { + 1: required string host + 2: required i32 brpc_port + 3: required list tablet_ids +} + +struct TWarmUpCacheAsyncResponse { + 1: required Status.TStatus status +} + +struct TCheckWarmUpCacheAsyncRequest { + 1: optional list tablets +} + +struct TCheckWarmUpCacheAsyncResponse { + 1: required Status.TStatus status + 2: optional map task_done; +} + +struct TSyncLoadForTabletsRequest { + 1: required list tablet_ids +} + +struct TSyncLoadForTabletsResponse { +} + +struct THotPartition { + 1: required i64 partition_id + 2: required i64 last_access_time + 3: optional i64 query_per_day + 4: optional i64 query_per_week +} + +struct THotTableMessage { + 1: required i64 table_id + 2: required i64 index_id + 3: optional list hot_partitions +} + +struct TGetTopNHotPartitionsRequest { +} + +struct TGetTopNHotPartitionsResponse { + 1: required i64 file_cache_size + 2: optional list hot_tables +} + +enum TDownloadType { + BE = 0, + S3 = 1, +} + +enum TWarmUpTabletsRequestType { + SET_JOB = 0, + SET_BATCH = 1, + GET_CURRENT_JOB_STATE_AND_LEASE = 2, + CLEAR_JOB = 3, +} + +struct TJobMeta { + 1: required TDownloadType download_type + 2: optional string be_ip + 3: optional i32 brpc_port + 4: optional list tablet_ids +} + +struct TWarmUpTabletsRequest { + 1: required i64 job_id + 2: required i64 batch_id + 3: optional list job_metas + 4: required TWarmUpTabletsRequestType type +} + +struct TWarmUpTabletsResponse { + 1: required Status.TStatus status; + 2: optional i64 job_id + 3: optional i64 batch_id + 4: optional i64 pending_job_size + 5: optional i64 finish_job_size +} + struct TIngestBinlogRequest { 1: optional i64 txn_id; 2: optional i64 remote_tablet_id; @@ -136,6 +223,122 @@ struct TIngestBinlogRequest { struct TIngestBinlogResult { 1: optional Status.TStatus status; + 2: optional bool is_async; +} + +struct TQueryIngestBinlogRequest { + 1: optional i64 txn_id; + 2: optional i64 partition_id; + 3: optional i64 tablet_id; + 4: optional Types.TUniqueId load_id; +} + +enum TIngestBinlogStatus { + ANALYSIS_ERROR, + UNKNOWN, + NOT_FOUND, + OK, + FAILED, + DOING +} + +struct TQueryIngestBinlogResult { + 1: optional TIngestBinlogStatus status; + 2: optional string err_msg; +} + +enum TTopicInfoType { + WORKLOAD_GROUP = 0 + MOVE_QUERY_TO_GROUP = 1 + WORKLOAD_SCHED_POLICY = 2 +} + +struct TWorkloadGroupInfo { + 1: optional i64 id + 2: optional string name + 3: optional i64 version + 4: optional i64 cpu_share + 5: optional i32 cpu_hard_limit + 6: optional string mem_limit + 7: optional bool enable_memory_overcommit + 8: optional bool enable_cpu_hard_limit + 9: optional i32 scan_thread_num + 10: optional i32 max_remote_scan_thread_num + 11: optional i32 min_remote_scan_thread_num + 12: optional i32 memory_low_watermark + 13: optional i32 memory_high_watermark + 14: optional i64 read_bytes_per_second + 15: optional i64 remote_read_bytes_per_second + 16: optional string tag +} + +enum TWorkloadMetricType { + QUERY_TIME = 0 + BE_SCAN_ROWS = 1 + BE_SCAN_BYTES = 2 + QUERY_BE_MEMORY_BYTES = 3 +} + +enum TCompareOperator { + EQUAL = 0 + GREATER = 1 + GREATER_EQUAL = 2 + LESS = 3 + LESS_EQUAL = 4 +} + +struct TWorkloadCondition { + 1: optional TWorkloadMetricType metric_name + 2: optional TCompareOperator op + 3: optional string value +} + +enum TWorkloadActionType { + MOVE_QUERY_TO_GROUP = 0 + CANCEL_QUERY = 1 +} + +struct TWorkloadAction { + 1: optional TWorkloadActionType action + 2: optional string action_args +} + +struct TWorkloadSchedPolicy { + 1: optional i64 id + 2: optional string name + 3: optional i32 version + 4: optional i32 priority + 5: optional bool enabled + 6: optional list condition_list + 7: optional list action_list + 8: optional list wg_id_list +} + +struct TopicInfo { + 1: optional TWorkloadGroupInfo workload_group_info + 2: optional TWorkloadSchedPolicy workload_sched_policy +} + +struct TPublishTopicRequest { + 1: required map> topic_map +} + +struct TPublishTopicResult { + 1: required Status.TStatus status +} + +enum TWorkloadType { + INTERNAL = 2 +} + +struct TGetRealtimeExecStatusRequest { + // maybe query id or other unique id + 1: optional Types.TUniqueId id +} + +struct TGetRealtimeExecStatusResponse { + 1: optional Status.TStatus status + 2: optional FrontendService.TReportExecStatusParams report_exec_status_params } service BackendService { @@ -187,10 +390,23 @@ service BackendService { TStreamLoadRecordResult get_stream_load_record(1: i64 last_stream_record_time); - oneway void clean_trash(); - // check tablet rowset type TCheckStorageFormatResult check_storage_format(); + TWarmUpCacheAsyncResponse warm_up_cache_async(1: TWarmUpCacheAsyncRequest request); + + TCheckWarmUpCacheAsyncResponse check_warm_up_cache_async(1: TCheckWarmUpCacheAsyncRequest request); + + TSyncLoadForTabletsResponse sync_load_for_tablets(1: TSyncLoadForTabletsRequest request); + + TGetTopNHotPartitionsResponse get_top_n_hot_partitions(1: TGetTopNHotPartitionsRequest request); + + TWarmUpTabletsResponse warm_up_tablets(1: TWarmUpTabletsRequest request); + TIngestBinlogResult ingest_binlog(1: TIngestBinlogRequest ingest_binlog_request); + TQueryIngestBinlogResult query_ingest_binlog(1: TQueryIngestBinlogRequest query_ingest_binlog_request); + + TPublishTopicResult publish_topic_info(1:TPublishTopicRequest topic_request); + + TGetRealtimeExecStatusResponse get_realtime_exec_status(1:TGetRealtimeExecStatusRequest request); } diff --git a/pkg/rpc/thrift/Data.thrift b/pkg/rpc/thrift/Data.thrift index d1163821..dc1190c6 100644 --- a/pkg/rpc/thrift/Data.thrift +++ b/pkg/rpc/thrift/Data.thrift @@ -54,6 +54,7 @@ struct TCell { 3: optional i64 longVal 4: optional double doubleVal 5: optional string stringVal + 6: optional bool isNull // add type: date datetime } diff --git a/pkg/rpc/thrift/DataSinks.thrift b/pkg/rpc/thrift/DataSinks.thrift index f1cad4cf..ed7ccee6 100644 --- a/pkg/rpc/thrift/DataSinks.thrift +++ b/pkg/rpc/thrift/DataSinks.thrift @@ -36,7 +36,10 @@ enum TDataSinkType { RESULT_FILE_SINK, JDBC_TABLE_SINK, MULTI_CAST_DATA_STREAM_SINK, - GROUP_COMMIT_OLAP_TABLE_SINK, + GROUP_COMMIT_OLAP_TABLE_SINK, // deprecated + GROUP_COMMIT_BLOCK_SINK, + HIVE_TABLE_SINK, + ICEBERG_TABLE_SINK, } enum TResultSinkType { @@ -100,7 +103,7 @@ enum TParquetRepetitionType { struct TParquetSchema { 1: optional TParquetRepetitionType schema_repetition_type 2: optional TParquetDataType schema_data_type - 3: optional string schema_column_name + 3: optional string schema_column_name 4: optional TParquetDataLogicalType schema_data_logical_type } @@ -127,6 +130,16 @@ struct TResultFileSinkOptions { 16: optional bool delete_existing_files; 17: optional string file_suffix; + 18: optional bool with_bom; + + 19: optional PlanNodes.TFileCompressType orc_compression_type; + + // Since we have changed the type mapping from Doris to Orc type, + // using the Outfile to export Date/Datetime types will cause BE core dump + // when only upgrading BE without upgrading FE. + // orc_writer_version = 1 means doris FE is higher than version 2.1.5 + // orc_writer_version = 0 means doris FE is less than or equal to version 2.1.5 + 20: optional i64 orc_writer_version; } struct TMemoryScratchSink { @@ -157,17 +170,25 @@ struct TDataStreamSink { 3: optional bool ignore_not_found - // per-destination projections - 4: optional list output_exprs + // per-destination projections + 4: optional list output_exprs + + // project output tuple id + 5: optional Types.TTupleId output_tuple_id - // project output tuple id - 5: optional Types.TTupleId output_tuple_id + // per-destination filters + 6: optional list conjuncts - // per-destination filters - 6: optional list conjuncts + // per-destination runtime filters + 7: optional list runtime_filters - // per-destination runtime filters - 7: optional list runtime_filters + // used for partition_type = TABLET_SINK_SHUFFLE_PARTITIONED + 8: optional Descriptors.TOlapTableSchemaParam tablet_sink_schema + 9: optional Descriptors.TOlapTablePartitionParam tablet_sink_partition + 10: optional Descriptors.TOlapTableLocationParam tablet_sink_location + 11: optional i64 tablet_sink_txn_id + 12: optional Types.TTupleId tablet_sink_tuple_id + 13: optional list tablet_sink_exprs } struct TMultiCastDataStreamSink { @@ -234,6 +255,12 @@ struct TExportSink { 7: optional string header } +enum TGroupCommitMode { + SYNC_MODE, + ASYNC_MODE, + OFF_MODE +} + struct TOlapTableSink { 1: required Types.TUniqueId load_id 2: required i64 txn_id @@ -255,6 +282,137 @@ struct TOlapTableSink { 18: optional Descriptors.TOlapTableLocationParam slave_location 19: optional i64 txn_timeout_s // timeout of load txn in second 20: optional bool write_file_cache + + // used by GroupCommitBlockSink + 21: optional i64 base_schema_version + 22: optional TGroupCommitMode group_commit_mode + 23: optional double max_filter_ratio + + 24: optional string storage_vault_id +} + +struct THiveLocationParams { + 1: optional string write_path + 2: optional string target_path + 3: optional Types.TFileType file_type + // Other object store will convert write_path to s3 scheme path for BE, this field keeps the original write path. + 4: optional string original_write_path +} + +struct TSortedColumn { + 1: optional string sort_column_name + 2: optional i32 order // asc(1) or desc(0) +} + +struct TBucketingMode { + 1: optional i32 bucket_version +} + +struct THiveBucket { + 1: optional list bucketed_by + 2: optional TBucketingMode bucket_mode + 3: optional i32 bucket_count + 4: optional list sorted_by +} + +enum THiveColumnType { + PARTITION_KEY = 0, + REGULAR = 1, + SYNTHESIZED = 2 +} + +struct THiveColumn { + 1: optional string name + 2: optional THiveColumnType column_type +} + +struct THivePartition { + 1: optional list values + 2: optional THiveLocationParams location + 3: optional PlanNodes.TFileFormatType file_format +} + +struct THiveSerDeProperties { + 1: optional string field_delim + 2: optional string line_delim + 3: optional string collection_delim // array ,map ,struct delimiter + 4: optional string mapkv_delim + 5: optional string escape_char + 6: optional string null_format +} + +struct THiveTableSink { + 1: optional string db_name + 2: optional string table_name + 3: optional list columns + 4: optional list partitions + 5: optional THiveBucket bucket_info + 6: optional PlanNodes.TFileFormatType file_format + 7: optional PlanNodes.TFileCompressType compression_type + 8: optional THiveLocationParams location + 9: optional map hadoop_config + 10: optional bool overwrite + 11: optional THiveSerDeProperties serde_properties +} + +enum TUpdateMode { + NEW = 0, // add partition + APPEND = 1, // alter partition + OVERWRITE = 2 // insert overwrite +} + +struct TS3MPUPendingUpload { + 1: optional string bucket + 2: optional string key + 3: optional string upload_id + 4: optional map etags +} + +struct THivePartitionUpdate { + 1: optional string name + 2: optional TUpdateMode update_mode + 3: optional THiveLocationParams location + 4: optional list file_names + 5: optional i64 row_count + 6: optional i64 file_size + 7: optional list s3_mpu_pending_uploads +} + +enum TFileContent { + DATA = 0, + POSITION_DELETES = 1, + EQUALITY_DELETES = 2 +} + +struct TIcebergCommitData { + 1: optional string file_path + 2: optional i64 row_count + 3: optional i64 file_size + 4: optional TFileContent file_content + 5: optional list partition_values + 6: optional list referenced_data_files +} + +struct TSortField { + 1: optional i32 source_column_id + 2: optional bool ascending + 3: optional bool null_first +} + +struct TIcebergTableSink { + 1: optional string db_name + 2: optional string tb_name + 3: optional string schema_json + 4: optional map partition_specs_json + 5: optional i32 partition_spec_id + 6: optional list sort_fields + 7: optional PlanNodes.TFileFormatType file_format + 8: optional string output_path + 9: optional map hadoop_config + 10: optional bool overwrite + 11: optional Types.TFileType file_type + 12: optional string original_output_path + 13: optional PlanNodes.TFileCompressType compression_type } struct TDataSink { @@ -269,5 +427,6 @@ struct TDataSink { 10: optional TResultFileSink result_file_sink 11: optional TJdbcTableSink jdbc_table_sink 12: optional TMultiCastDataStreamSink multi_cast_stream_sink + 13: optional THiveTableSink hive_table_sink + 14: optional TIcebergTableSink iceberg_table_sink } - diff --git a/pkg/rpc/thrift/Descriptors.thrift b/pkg/rpc/thrift/Descriptors.thrift index fa391feb..b80ce5ca 100644 --- a/pkg/rpc/thrift/Descriptors.thrift +++ b/pkg/rpc/thrift/Descriptors.thrift @@ -41,6 +41,8 @@ struct TColumn { 16: optional string aggregation 17: optional bool result_is_nullable 18: optional bool is_auto_increment = false; + 19: optional i32 cluster_key_id = -1 + 20: optional i32 be_exec_version = -1 } struct TSlotDescriptor { @@ -48,7 +50,7 @@ struct TSlotDescriptor { 2: required Types.TTupleId parent 3: required Types.TTypeDesc slotType 4: required i32 columnPos // in originating table - 5: required i32 byteOffset // into tuple + 5: required i32 byteOffset // deprecated 6: required i32 nullIndicatorByte 7: required i32 nullIndicatorBit 8: required string colName; @@ -62,84 +64,98 @@ struct TSlotDescriptor { 14: optional bool is_auto_increment = false; // subcolumn path info list for semi structure column(variant) 15: optional list column_paths + 16: optional string col_default_value + 17: optional Types.TPrimitiveType primitive_type = Types.TPrimitiveType.INVALID_TYPE } struct TTupleDescriptor { 1: required Types.TTupleId id - 2: required i32 byteSize - 3: required i32 numNullBytes + 2: required i32 byteSize // deprecated + 3: required i32 numNullBytes // deprecated 4: optional Types.TTableId tableId - 5: optional i32 numNullSlots + 5: optional i32 numNullSlots // deprecated } enum THdfsFileFormat { - TEXT, - LZO_TEXT, - RC_FILE, - SEQUENCE_FILE, - AVRO, - PARQUET + TEXT = 0, + LZO_TEXT = 1, + RC_FILE = 2, + SEQUENCE_FILE =3, + AVRO = 4, + PARQUET = 5 } enum TSchemaTableType { - SCH_AUTHORS= 0, - SCH_CHARSETS, - SCH_COLLATIONS, - SCH_COLLATION_CHARACTER_SET_APPLICABILITY, - SCH_COLUMNS, - SCH_COLUMN_PRIVILEGES, - SCH_CREATE_TABLE, - SCH_ENGINES, - SCH_EVENTS, - SCH_FILES, - SCH_GLOBAL_STATUS, - SCH_GLOBAL_VARIABLES, - SCH_KEY_COLUMN_USAGE, - SCH_OPEN_TABLES, - SCH_PARTITIONS, - SCH_PLUGINS, - SCH_PROCESSLIST, - SCH_PROFILES, - SCH_REFERENTIAL_CONSTRAINTS, - SCH_PROCEDURES, - SCH_SCHEMATA, - SCH_SCHEMA_PRIVILEGES, - SCH_SESSION_STATUS, - SCH_SESSION_VARIABLES, - SCH_STATISTICS, - SCH_STATUS, - SCH_TABLES, - SCH_TABLE_CONSTRAINTS, - SCH_TABLE_NAMES, - SCH_TABLE_PRIVILEGES, - SCH_TRIGGERS, - SCH_USER_PRIVILEGES, - SCH_VARIABLES, - SCH_VIEWS, - SCH_INVALID, - SCH_ROWSETS, - SCH_BACKENDS, - SCH_COLUMN_STATISTICS, - SCH_PARAMETERS, - SCH_METADATA_NAME_IDS, - SCH_PROFILING; + SCH_AUTHORS = 0, + SCH_CHARSETS = 1, + SCH_COLLATIONS = 2, + SCH_COLLATION_CHARACTER_SET_APPLICABILITY = 3, + SCH_COLUMNS = 4, + SCH_COLUMN_PRIVILEGES = 5, + SCH_CREATE_TABLE = 6, + SCH_ENGINES = 7, + SCH_EVENTS = 8, + SCH_FILES = 9, + SCH_GLOBAL_STATUS = 10, + SCH_GLOBAL_VARIABLES = 11, + SCH_KEY_COLUMN_USAGE = 12, + SCH_OPEN_TABLES = 13, + SCH_PARTITIONS = 14, + SCH_PLUGINS = 15, + SCH_PROCESSLIST = 16, + SCH_PROFILES = 17, + SCH_REFERENTIAL_CONSTRAINTS = 18, + SCH_PROCEDURES = 19, + SCH_SCHEMATA = 20, + SCH_SCHEMA_PRIVILEGES = 21, + SCH_SESSION_STATUS = 22, + SCH_SESSION_VARIABLES = 23, + SCH_STATISTICS = 24, + SCH_STATUS = 25, + SCH_TABLES = 26, + SCH_TABLE_CONSTRAINTS = 27, + SCH_TABLE_NAMES = 28, + SCH_TABLE_PRIVILEGES = 29, + SCH_TRIGGERS = 30, + SCH_USER_PRIVILEGES = 31, + SCH_VARIABLES = 32, + SCH_VIEWS = 33, + SCH_INVALID = 34, + SCH_ROWSETS = 35 + SCH_BACKENDS = 36, + SCH_COLUMN_STATISTICS = 37, + SCH_PARAMETERS = 38, + SCH_METADATA_NAME_IDS = 39, + SCH_PROFILING = 40, + SCH_BACKEND_ACTIVE_TASKS = 41, + SCH_ACTIVE_QUERIES = 42, + SCH_WORKLOAD_GROUPS = 43, + SCH_USER = 44, + SCH_PROCS_PRIV = 45, + SCH_WORKLOAD_POLICY = 46, + SCH_TABLE_OPTIONS = 47, + SCH_WORKLOAD_GROUP_PRIVILEGES = 48, + SCH_WORKLOAD_GROUP_RESOURCE_USAGE = 49, + SCH_TABLE_PROPERTIES = 50, + SCH_FILE_CACHE_STATISTICS = 51, + SCH_CATALOG_META_CACHE_STATISTICS = 52; } enum THdfsCompression { - NONE, - DEFAULT, - GZIP, - DEFLATE, - BZIP2, - SNAPPY, - SNAPPY_BLOCKED // Used by sequence and rc files but not stored in the metadata. + NONE = 0, + DEFAULT = 1, + GZIP = 2, + DEFLATE = 3, + BZIP2 = 4, + SNAPPY = 5, + SNAPPY_BLOCKED = 6 // Used by sequence and rc files but not stored in the metadata. } enum TIndexType { - BITMAP, - INVERTED, - BLOOMFILTER, - NGRAM_BF + BITMAP = 0, + INVERTED = 1, + BLOOMFILTER = 2, + NGRAM_BF = 3 } // Mapping from names defined by Avro to the enum. @@ -176,6 +192,8 @@ struct TOlapTablePartition { 9: optional bool is_mutable = true // only used in List Partition 10: optional bool is_default_partition; + // only used in random distribution scenario to make data distributed even + 11: optional i64 load_tablet_idx } struct TOlapTablePartitionParam { @@ -197,6 +215,10 @@ struct TOlapTablePartitionParam { 8: optional list partition_function_exprs 9: optional bool enable_automatic_partition 10: optional Partitions.TPartitionType partition_type + // insert overwrite partition(*) + 11: optional bool enable_auto_detect_overwrite + 12: optional i64 overwrite_group_id + 13: optional bool partitions_is_fake = false } struct TOlapTableIndex { @@ -206,6 +228,7 @@ struct TOlapTableIndex { 4: optional string comment 5: optional i64 index_id 6: optional map properties + 7: optional list column_unique_ids } struct TOlapTableIndexSchema { @@ -227,9 +250,14 @@ struct TOlapTableSchemaParam { 5: required TTupleDescriptor tuple_desc 6: required list indexes 7: optional bool is_dynamic_schema // deprecated - 8: optional bool is_partial_update + 8: optional bool is_partial_update // deprecated, use unique_key_update_mode 9: optional list partial_update_input_columns - 10: optional bool is_strict_mode = false; + 10: optional bool is_strict_mode = false + 11: optional string auto_increment_column + 12: optional i32 auto_increment_column_unique_id = -1 + 13: optional Types.TInvertedIndexFileStorageFormat inverted_index_file_storage_format = Types.TInvertedIndexFileStorageFormat.V1 + 14: optional Types.TUniqueKeyUpdateMode unique_key_update_mode + 15: optional i32 sequence_map_col_unique_id = -1 } struct TTabletLocation { @@ -319,16 +347,37 @@ struct TJdbcTable { 6: optional string jdbc_resource_name 7: optional string jdbc_driver_class 8: optional string jdbc_driver_checksum - + 9: optional i32 connection_pool_min_size + 10: optional i32 connection_pool_max_size + 11: optional i32 connection_pool_max_wait_time + 12: optional i32 connection_pool_max_life_time + 13: optional bool connection_pool_keep_alive + 14: optional i64 catalog_id } struct TMCTable { - 1: optional string region + 1: optional string region // deprecated 2: optional string project 3: optional string table 4: optional string access_key 5: optional string secret_key - 6: optional string public_access + 6: optional string public_access // deprecated + 7: optional string odps_url // deprecated + 8: optional string tunnel_url // deprecated + 9: optional string endpoint + 10: optional string quota +} + +struct TTrinoConnectorTable { + 1: optional string db_name + 2: optional string table_name + 3: optional map properties +} + +struct TLakeSoulTable { + 1: optional string db_name + 2: optional string table_name + 3: optional map properties } // "Union" of all table types. @@ -354,6 +403,8 @@ struct TTableDescriptor { 19: optional THudiTable hudiTable 20: optional TJdbcTable jdbcTable 21: optional TMCTable mcTable + 22: optional TTrinoConnectorTable trinoConnectorTable + 23: optional TLakeSoulTable lakesoulTable } struct TDescriptorTable { diff --git a/pkg/rpc/thrift/Exprs.thrift b/pkg/rpc/thrift/Exprs.thrift index e102babc..e6091cfd 100644 --- a/pkg/rpc/thrift/Exprs.thrift +++ b/pkg/rpc/thrift/Exprs.thrift @@ -74,6 +74,14 @@ enum TExprNodeType { LAMBDA_FUNCTION_CALL_EXPR, // for column_ref expr COLUMN_REF, + + IPV4_LITERAL, + IPV6_LITERAL + + // only used in runtime filter + // to prevent push to storage layer + NULL_AWARE_IN_PRED, + NULL_AWARE_BINARY_PRED, } //enum TAggregationOp { @@ -127,6 +135,14 @@ struct TLargeIntLiteral { 1: required string value } +struct TIPv4Literal { + 1: required i64 value +} + +struct TIPv6Literal { + 1: required string value +} + struct TInPredicate { 1: required bool is_not_in } @@ -143,6 +159,8 @@ struct TMatchPredicate { 1: required string parser_type; 2: required string parser_mode; 3: optional map char_filter_map; + 4: optional bool parser_lowercase = true; + 5: optional string parser_stopwords = ""; } struct TLiteralPredicate { @@ -175,6 +193,11 @@ struct TStringLiteral { 1: required string value; } +struct TNullableStringLiteral { + 1: optional string value; + 2: optional bool is_null = false; +} + struct TJsonLiteral { 1: required string value; } @@ -232,7 +255,7 @@ struct TExprNode { 26: optional Types.TFunction fn // If set, child[vararg_start_idx] is the first vararg child. 27: optional i32 vararg_start_idx - 28: optional Types.TPrimitiveType child_type + 28: optional Types.TPrimitiveType child_type // Deprecated // For vectorized engine 29: optional bool is_nullable @@ -242,6 +265,9 @@ struct TExprNode { 32: optional TColumnRef column_ref 33: optional TMatchPredicate match_predicate + 34: optional TIPv4Literal ipv4_literal + 35: optional TIPv6Literal ipv6_literal + 36: optional string label // alias name, a/b in `select xxx as a, count(1) as b` } // A flattened representation of a tree of Expr nodes, obtained by depth-first diff --git a/pkg/rpc/thrift/FrontendService.thrift b/pkg/rpc/thrift/FrontendService.thrift index b1ccf7db..56f9ab2f 100644 --- a/pkg/rpc/thrift/FrontendService.thrift +++ b/pkg/rpc/thrift/FrontendService.thrift @@ -29,6 +29,8 @@ include "Exprs.thrift" include "RuntimeProfile.thrift" include "MasterService.thrift" include "AgentService.thrift" +include "DataSinks.thrift" +include "HeartbeatService.thrift" // These are supporting structs for JniFrontend.java, which serves as the glue // between our C++ execution environment and the Java frontend. @@ -104,7 +106,7 @@ struct TShowVariableRequest { // Results of a call to describeTable() struct TShowVariableResult { - 1: required map variables + 1: required list> variables } // Valid table file formats @@ -356,11 +358,11 @@ struct TListTableStatusResult { struct TTableMetadataNameIds { 1: optional string name - 2: optional i64 id + 2: optional i64 id } struct TListTableMetadataNameIdsResult { - 1: optional list tables + 1: optional list tables } // getTableNames returns a list of unqualified table names @@ -394,8 +396,52 @@ struct TDetailedReportParams { 1: optional Types.TUniqueId fragment_instance_id 2: optional RuntimeProfile.TRuntimeProfileTree profile 3: optional RuntimeProfile.TRuntimeProfileTree loadChannelProfile + 4: optional bool is_fragment_level } + +struct TQueryStatistics { + // A thrift structure identical to the PQueryStatistics structure. + 1: optional i64 scan_rows + 2: optional i64 scan_bytes + 3: optional i64 returned_rows + 4: optional i64 cpu_ms + 5: optional i64 max_peak_memory_bytes + 6: optional i64 current_used_memory_bytes + 7: optional i64 workload_group_id + 8: optional i64 shuffle_send_bytes + 9: optional i64 shuffle_send_rows + 10: optional i64 scan_bytes_from_local_storage + 11: optional i64 scan_bytes_from_remote_storage +} + +struct TReportWorkloadRuntimeStatusParams { + 1: optional i64 backend_id + 2: optional map query_statistics_map +} + +struct TQueryProfile { + 1: optional Types.TUniqueId query_id + + 2: optional map> fragment_id_to_profile + + // Types.TUniqueId should not be used as key in thrift map, so we use two lists instead + // https://thrift.apache.org/docs/types#containers + 3: optional list fragment_instance_ids + // Types.TUniqueId can not be used as key in thrift map, so we use two lists instead + 4: optional list instance_profiles + + 5: optional list load_channel_profiles +} + +struct TFragmentInstanceReport { + 1: optional Types.TUniqueId fragment_instance_id; + 2: optional i32 num_finished_range; + 3: optional i64 loaded_rows + 4: optional i64 loaded_bytes +} + + // The results of an INSERT query, sent to the coordinator as part of // TReportExecStatusParams struct TReportExecStatusParams { @@ -423,7 +469,7 @@ struct TReportExecStatusParams { // cumulative profile // required in V1 // Move to TDetailedReportParams for pipelineX - 7: optional RuntimeProfile.TRuntimeProfileTree profile + 7: optional RuntimeProfile.TRuntimeProfileTree profile // to be deprecated // New errors that have not been reported to the coordinator // optional in V1 @@ -453,16 +499,65 @@ struct TReportExecStatusParams { 20: optional PaloInternalService.TQueryType query_type // Move to TDetailedReportParams for pipelineX - 21: optional RuntimeProfile.TRuntimeProfileTree loadChannelProfile + 21: optional RuntimeProfile.TRuntimeProfileTree loadChannelProfile // to be deprecated 22: optional i32 finished_scan_ranges - 23: optional list detailed_report + 23: optional list detailed_report // to be deprecated + + 24: optional TQueryStatistics query_statistics // deprecated + + 25: optional TReportWorkloadRuntimeStatusParams report_workload_runtime_status + + 26: optional list hive_partition_updates + + 27: optional TQueryProfile query_profile + + 28: optional list iceberg_commit_datas + + 29: optional i64 txn_id + 30: optional string label + + 31: optional list fragment_instance_reports; } struct TFeResult { 1: required FrontendServiceVersion protocolVersion 2: required Status.TStatus status + + // For cloud + 1000: optional string cloud_cluster + 1001: optional bool noAuth +} + +enum TSubTxnType { + INSERT = 0, + DELETE = 1 +} + +struct TSubTxnInfo { + 1: optional i64 sub_txn_id + 2: optional i64 table_id + 3: optional list tablet_commit_infos + 4: optional TSubTxnType sub_txn_type +} + +struct TTxnLoadInfo { + 1: optional string label + 2: optional i64 dbId + 3: optional i64 txnId + 4: optional i64 timeoutTimestamp + 5: optional i64 allSubTxnNum + 6: optional list subTxnInfos +} + +struct TGroupCommitInfo{ + 1: optional bool getGroupCommitLoadBeId + 2: optional i64 groupCommitLoadTableId + 3: optional string cluster + 5: optional bool updateLoadData + 6: optional i64 tableId + 7: optional i64 receiveData } struct TMasterOpRequest { @@ -494,6 +589,15 @@ struct TMasterOpRequest { 24: optional bool syncJournalOnly // if set to true, this request means to do nothing but just sync max journal id of master 25: optional string defaultCatalog 26: optional string defaultDatabase + 27: optional bool cancel_qeury // if set to true, this request means to cancel one forwarded query, and query_id needs to be set + 28: optional map user_variables + // transaction load + 29: optional TTxnLoadInfo txnLoadInfo + 30: optional TGroupCommitInfo groupCommitInfo + + // selectdb cloud + 1000: optional string cloud_cluster + 1001: optional bool noAuth; } struct TColumnDefinition { @@ -518,6 +622,12 @@ struct TMasterOpResult { 3: optional TShowResultSet resultSet; 4: optional Types.TUniqueId queryId; 5: optional string status; + 6: optional i32 statusCode; + 7: optional string errMessage; + 8: optional list queryResultBufList; + // transaction load + 9: optional TTxnLoadInfo txnLoadInfo; + 10: optional i64 groupCommitLoadBeId; } struct TUpdateExportTaskStatusRequest { @@ -535,11 +645,14 @@ struct TLoadTxnBeginRequest { 6: optional string user_ip 7: required string label 8: optional i64 timestamp // deprecated, use request_id instead - 9: optional i64 auth_code + 9: optional i64 auth_code // deprecated, use token instead // The real value of timeout should be i32. i64 ensures the compatibility of interface. 10: optional i64 timeout 11: optional Types.TUniqueId request_id 12: optional string token + 13: optional string auth_code_uuid // deprecated, use token instead + 14: optional i64 table_id + 15: optional i64 backend_id } struct TLoadTxnBeginResult { @@ -557,11 +670,14 @@ struct TBeginTxnRequest { 5: optional list table_ids 6: optional string user_ip 7: optional string label - 8: optional i64 auth_code + 8: optional i64 auth_code // deprecated, use token instead // The real value of timeout should be i32. i64 ensures the compatibility of interface. 9: optional i64 timeout 10: optional Types.TUniqueId request_id 11: optional string token + 12: optional i64 backend_id + // used for ccr + 13: optional i64 sub_txn_num = 0 } struct TBeginTxnResult { @@ -569,6 +685,9 @@ struct TBeginTxnResult { 2: optional i64 txn_id 3: optional string job_status // if label already used, set status of existing job 4: optional i64 db_id + 5: optional Types.TNetworkAddress master_address + // used for ccr + 6: optional list sub_txn_ids } // StreamLoad request, used to load a streaming to engine @@ -599,7 +718,7 @@ struct TStreamLoadPutRequest { 14: optional string columnSeparator 15: optional string partitions - 16: optional i64 auth_code + 16: optional i64 auth_code // deprecated, use token instead 17: optional bool negative 18: optional i32 timeout 19: optional bool strictMode @@ -639,6 +758,14 @@ struct TStreamLoadPutRequest { // only valid when file type is CSV 52: optional i8 escape 53: optional bool memtable_on_sink_node; + 54: optional bool group_commit // deprecated + 55: optional i32 stream_per_node; + 56: optional string group_commit_mode + 57: optional Types.TUniqueKeyUpdateMode unique_key_update_mode + + // For cloud + 1000: optional string cloud_cluster + 1001: optional i64 table_id } struct TStreamLoadPutResult { @@ -648,6 +775,11 @@ struct TStreamLoadPutResult { 3: optional PaloInternalService.TPipelineFragmentParams pipeline_params // used for group commit 4: optional i64 base_schema_version + 5: optional i64 db_id + 6: optional i64 table_id + 7: optional bool wait_internal_group_commit_finish = false + 8: optional i64 group_commit_interval_ms + 9: optional i64 group_commit_data_bytes } struct TStreamLoadMultiTablePutResult { @@ -666,16 +798,6 @@ struct TStreamLoadWithLoadStatusResult { 6: optional i64 unselected_rows } -struct TCheckWalRequest { - 1: optional i64 wal_id - 2: optional i64 db_id -} - -struct TCheckWalResult { - 1: optional Status.TStatus status - 2: optional bool need_recovery -} - struct TKafkaRLTaskProgress { 1: required map partitionCmtOffset } @@ -710,13 +832,17 @@ struct TLoadTxnCommitRequest { 7: required i64 txnId 8: required bool sync 9: optional list commitInfos - 10: optional i64 auth_code + 10: optional i64 auth_code // deprecated, use token instead 11: optional TTxnCommitAttachment txnCommitAttachment 12: optional i64 thrift_rpc_timeout_ms 13: optional string token 14: optional i64 db_id 15: optional list tbls 16: optional i64 table_id + 17: optional string auth_code_uuid // deprecated, use token instead + 18: optional bool groupCommit + 19: optional i64 receiveBytes + 20: optional i64 backendId } struct TLoadTxnCommitResult { @@ -731,15 +857,19 @@ struct TCommitTxnRequest { 5: optional string user_ip 6: optional i64 txn_id 7: optional list commit_infos - 8: optional i64 auth_code + 8: optional i64 auth_code // deprecated, use token instead 9: optional TTxnCommitAttachment txn_commit_attachment 10: optional i64 thrift_rpc_timeout_ms 11: optional string token 12: optional i64 db_id + // used for ccr + 13: optional bool txn_insert + 14: optional list sub_txn_infos } struct TCommitTxnResult { 1: optional Status.TStatus status + 2: optional Types.TNetworkAddress master_address } struct TLoadTxn2PCRequest { @@ -750,10 +880,13 @@ struct TLoadTxn2PCRequest { 5: optional string user_ip 6: optional i64 txnId 7: optional string operation - 8: optional i64 auth_code + 8: optional i64 auth_code // deprecated, use token instead 9: optional string token 10: optional i64 thrift_rpc_timeout_ms 11: optional string label + + // For cloud + 1000: optional string auth_code_uuid // deprecated, use token instead } struct TLoadTxn2PCResult { @@ -768,7 +901,7 @@ struct TRollbackTxnRequest { 5: optional string user_ip 6: optional i64 txn_id 7: optional string reason - 9: optional i64 auth_code + 9: optional i64 auth_code // deprecated, use token instead 10: optional TTxnCommitAttachment txn_commit_attachment 11: optional string token 12: optional i64 db_id @@ -776,6 +909,7 @@ struct TRollbackTxnRequest { struct TRollbackTxnResult { 1: optional Status.TStatus status + 2: optional Types.TNetworkAddress master_address } struct TLoadTxnRollbackRequest { @@ -787,11 +921,13 @@ struct TLoadTxnRollbackRequest { 6: optional string user_ip 7: required i64 txnId 8: optional string reason - 9: optional i64 auth_code + 9: optional i64 auth_code // deprecated, use token instead 10: optional TTxnCommitAttachment txnCommitAttachment 11: optional string token 12: optional i64 db_id 13: optional list tbls + 14: optional string auth_code_uuid // deprecated, use token instead + 15: optional string label } struct TLoadTxnRollbackResult { @@ -814,6 +950,7 @@ enum TFrontendPingFrontendStatusCode { struct TFrontendPingFrontendRequest { 1: required i32 clusterId 2: required string token + 3: optional string deployMode } struct TDiskInfo { @@ -871,7 +1008,16 @@ struct TInitExternalCtlMetaResult { enum TSchemaTableName { // BACKENDS = 0, - METADATA_TABLE = 1, + METADATA_TABLE = 1, // tvf + ACTIVE_QUERIES = 2, // db information_schema's table + WORKLOAD_GROUPS = 3, // db information_schema's table + ROUTINES_INFO = 4, // db information_schema's table + WORKLOAD_SCHEDULE_POLICY = 5, + TABLE_OPTIONS = 6, + WORKLOAD_GROUP_PRIVILEGES = 7, + TABLE_PROPERTIES = 8, + CATALOG_META_CACHE_STATS = 9, + PARTITIONS = 10, } struct TMetadataTableRequestParams { @@ -881,12 +1027,28 @@ struct TMetadataTableRequestParams { 4: optional list columns_name 5: optional PlanNodes.TFrontendsMetadataParams frontends_metadata_params 6: optional Types.TUserIdentity current_user_ident + 7: optional PlanNodes.TQueriesMetadataParams queries_metadata_params + 8: optional PlanNodes.TMaterializedViewsMetadataParams materialized_views_metadata_params + 9: optional PlanNodes.TJobsMetadataParams jobs_metadata_params + 10: optional PlanNodes.TTasksMetadataParams tasks_metadata_params + 11: optional PlanNodes.TPartitionsMetadataParams partitions_metadata_params + 12: optional PlanNodes.TMetaCacheStatsParams meta_cache_stats_params + 13: optional PlanNodes.TPartitionValuesMetadataParams partition_values_metadata_params +} + +struct TSchemaTableRequestParams { + 1: optional list columns_name + 2: optional Types.TUserIdentity current_user_ident + 3: optional bool replay_to_other_fe + 4: optional string catalog // use for table specific queries + 5: optional i64 dbId // used for table specific queries } struct TFetchSchemaTableDataRequest { 1: optional string cluster_name 2: optional TSchemaTableName schema_table_name - 3: optional TMetadataTableRequestParams metada_table_params + 3: optional TMetadataTableRequestParams metada_table_params // used for tvf + 4: optional TSchemaTableRequestParams schema_table_params // used for request db information_schema's table } struct TFetchSchemaTableDataResult { @@ -894,23 +1056,6 @@ struct TFetchSchemaTableDataResult { 2: optional list data_batch; } -// Only support base table add columns -struct TAddColumnsRequest { - 1: optional i64 table_id - 2: optional list addColumns - 3: optional string table_name - 4: optional string db_name - 5: optional bool allow_type_conflict -} - -// Only support base table add columns -struct TAddColumnsResult { - 1: optional Status.TStatus status - 2: optional i64 table_id - 3: optional list allColumns - 4: optional i32 schema_version -} - struct TMySqlLoadAcquireTokenResult { 1: optional Status.TStatus status 2: optional string token @@ -1042,6 +1187,124 @@ enum TBinlogType { MODIFY_PARTITIONS = 11, REPLACE_PARTITIONS = 12, TRUNCATE_TABLE = 13, + RENAME_TABLE = 14, + RENAME_COLUMN = 15, + MODIFY_COMMENT = 16, + MODIFY_VIEW_DEF = 17, + REPLACE_TABLE = 18, + MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES = 19, + INDEX_CHANGE_JOB = 20, + RENAME_ROLLUP = 21, + RENAME_PARTITION = 22, + DROP_ROLLUP = 23, + RECOVER_INFO = 24, + RESTORE_INFO = 25, + // Keep some IDs for allocation so that when new binlog types are added in the + // future, the changes can be picked back to the old versions without breaking + // compatibility. + // + // The code will check the IDs of binlog types, any binlog types whose IDs are + // greater than or equal to MIN_UNKNOWN will be ignored. + // + // For example, before you adding new binlog type MODIFY_XXX: + // MIN_UNKNOWN = 17, + // UNKNOWN_2 = 18, + // UNKNOWN_3 = 19, + // After adding binlog type MODIFY_XXX: + // MODIFY_XXX = 17, + // MIN_UNKNOWN = 18, + // UNKNOWN_3 = 19, + MIN_UNKNOWN = 26, + UNKNOWN_11 = 27, + UNKNOWN_12 = 28, + UNKNOWN_13 = 29, + UNKNOWN_14 = 30, + UNKNOWN_15 = 31, + UNKNOWN_16 = 32, + UNKNOWN_17 = 33, + UNKNOWN_18 = 34, + UNKNOWN_19 = 35, + UNKNOWN_20 = 36, + UNKNOWN_21 = 37, + UNKNOWN_22 = 38, + UNKNOWN_23 = 39, + UNKNOWN_24 = 40, + UNKNOWN_25 = 41, + UNKNOWN_26 = 42, + UNKNOWN_27 = 43, + UNKNOWN_28 = 44, + UNKNOWN_29 = 45, + UNKNOWN_30 = 46, + UNKNOWN_31 = 47, + UNKNOWN_32 = 48, + UNKNOWN_33 = 49, + UNKNOWN_34 = 50, + UNKNOWN_35 = 51, + UNKNOWN_36 = 52, + UNKNOWN_37 = 53, + UNKNOWN_38 = 54, + UNKNOWN_39 = 55, + UNKNOWN_40 = 56, + UNKNOWN_41 = 57, + UNKNOWN_42 = 58, + UNKNOWN_43 = 59, + UNKNOWN_44 = 60, + UNKNOWN_45 = 61, + UNKNOWN_46 = 62, + UNKNOWN_47 = 63, + UNKNOWN_48 = 64, + UNKNOWN_49 = 65, + UNKNOWN_50 = 66, + UNKNOWN_51 = 67, + UNKNOWN_52 = 68, + UNKNOWN_53 = 69, + UNKNOWN_54 = 70, + UNKNOWN_55 = 71, + UNKNOWN_56 = 72, + UNKNOWN_57 = 73, + UNKNOWN_58 = 74, + UNKNOWN_59 = 75, + UNKNOWN_60 = 76, + UNKNOWN_61 = 77, + UNKNOWN_62 = 78, + UNKNOWN_63 = 79, + UNKNOWN_64 = 80, + UNKNOWN_65 = 81, + UNKNOWN_66 = 82, + UNKNOWN_67 = 83, + UNKNOWN_68 = 84, + UNKNOWN_69 = 85, + UNKNOWN_70 = 86, + UNKNOWN_71 = 87, + UNKNOWN_72 = 88, + UNKNOWN_73 = 89, + UNKNOWN_74 = 90, + UNKNOWN_75 = 91, + UNKNOWN_76 = 92, + UNKNOWN_77 = 93, + UNKNOWN_78 = 94, + UNKNOWN_79 = 95, + UNKNOWN_80 = 96, + UNKNOWN_81 = 97, + UNKNOWN_82 = 98, + UNKNOWN_83 = 99, + UNKNOWN_84 = 100, + UNKNOWN_85 = 101, + UNKNOWN_86 = 102, + UNKNOWN_87 = 103, + UNKNOWN_88 = 104, + UNKNOWN_89 = 105, + UNKNOWN_90 = 106, + UNKNOWN_91 = 107, + UNKNOWN_92 = 108, + UNKNOWN_93 = 109, + UNKNOWN_94 = 110, + UNKNOWN_95 = 111, + UNKNOWN_96 = 112, + UNKNOWN_97 = 113, + UNKNOWN_98 = 114, + UNKNOWN_99 = 115, + UNKNOWN_100 = 116, } struct TBinlog { @@ -1062,6 +1325,7 @@ struct TGetBinlogResult { 3: optional list binlogs 4: optional string fe_version 5: optional i64 fe_meta_version + 6: optional Types.TNetworkAddress master_address } struct TGetTabletReplicaInfosRequest { @@ -1089,12 +1353,17 @@ struct TGetSnapshotRequest { 7: optional string label_name 8: optional string snapshot_name 9: optional TSnapshotType snapshot_type + 10: optional bool enable_compress; } struct TGetSnapshotResult { 1: optional Status.TStatus status 2: optional binary meta 3: optional binary job_info + 4: optional Types.TNetworkAddress master_address + 5: optional bool compressed; + 6: optional i64 expiredAt; // in millis + 7: optional i64 commit_seq; } struct TTableRef { @@ -1115,10 +1384,67 @@ struct TRestoreSnapshotRequest { 10: optional map properties 11: optional binary meta 12: optional binary job_info + 13: optional bool clean_tables + 14: optional bool clean_partitions + 15: optional bool atomic_restore + 16: optional bool compressed; } struct TRestoreSnapshotResult { 1: optional Status.TStatus status + 2: optional Types.TNetworkAddress master_address +} + +struct TPlsqlStoredProcedure { + 1: optional string name + 2: optional i64 catalogId + 3: optional i64 dbId + 4: optional string packageName + 5: optional string ownerName + 6: optional string source + 7: optional string createTime + 8: optional string modifyTime +} + +struct TPlsqlPackage { + 1: optional string name + 2: optional i64 catalogId + 3: optional i64 dbId + 4: optional string ownerName + 5: optional string header + 6: optional string body +} + +struct TPlsqlProcedureKey { + 1: optional string name + 2: optional i64 catalogId + 3: optional i64 dbId +} + +struct TAddPlsqlStoredProcedureRequest { + 1: optional TPlsqlStoredProcedure plsqlStoredProcedure + 2: optional bool isForce +} + +struct TDropPlsqlStoredProcedureRequest { + 1: optional TPlsqlProcedureKey plsqlProcedureKey +} + +struct TPlsqlStoredProcedureResult { + 1: optional Status.TStatus status +} + +struct TAddPlsqlPackageRequest { + 1: optional TPlsqlPackage plsqlPackage + 2: optional bool isForce +} + +struct TDropPlsqlPackageRequest { + 1: optional TPlsqlProcedureKey plsqlProcedureKey +} + +struct TPlsqlPackageResult { + 1: optional Status.TStatus status } struct TGetMasterTokenRequest { @@ -1130,6 +1456,7 @@ struct TGetMasterTokenRequest { struct TGetMasterTokenResult { 1: optional Status.TStatus status 2: optional string token + 3: optional Types.TNetworkAddress master_address } typedef TGetBinlogRequest TGetBinlogLagRequest @@ -1137,11 +1464,21 @@ typedef TGetBinlogRequest TGetBinlogLagRequest struct TGetBinlogLagResult { 1: optional Status.TStatus status 2: optional i64 lag + 3: optional Types.TNetworkAddress master_address } struct TUpdateFollowerStatsCacheRequest { 1: optional string key; - 2: list statsRows; + 2: optional list statsRows; + 3: optional string colStatsData; +} + +struct TInvalidateFollowerStatsCacheRequest { + 1: optional string key; +} + +struct TUpdateFollowerPartitionStatsCacheRequest { + 1: optional string key; } struct TAutoIncrementRangeRequest { @@ -1156,6 +1493,7 @@ struct TAutoIncrementRangeResult { 1: optional Status.TStatus status 2: optional i64 start 3: optional i64 length + 4: optional Types.TNetworkAddress master_address } struct TCreatePartitionRequest { @@ -1163,7 +1501,9 @@ struct TCreatePartitionRequest { 2: optional i64 db_id 3: optional i64 table_id // for each partition column's partition values. [missing_rows, partition_keys]->Left bound(for range) or Point(for list) - 4: optional list> partitionValues + 4: optional list> partitionValues + // be_endpoint = : to distinguish a particular BE + 5: optional string be_endpoint } struct TCreatePartitionResult { @@ -1173,6 +1513,203 @@ struct TCreatePartitionResult { 4: optional list nodes } +// these two for auto detect replacing partition +struct TReplacePartitionRequest { + 1: optional i64 overwrite_group_id + 2: optional i64 db_id + 3: optional i64 table_id + 4: optional list partition_ids // partition to replace. + // be_endpoint = : to distinguish a particular BE + 5: optional string be_endpoint +} + +struct TReplacePartitionResult { + 1: optional Status.TStatus status + 2: optional list partitions + 3: optional list tablets + 4: optional list nodes +} + +struct TGetMetaReplica { + 1: optional i64 id +} + +struct TGetMetaTablet { + 1: optional i64 id + 2: optional list replicas +} + +struct TGetMetaIndex { + 1: optional i64 id + 2: optional string name + 3: optional list tablets +} + +struct TGetMetaPartition { + 1: optional i64 id + 2: optional string name + 3: optional string key + 4: optional string range + 5: optional bool is_temp + 6: optional list indexes +} + +struct TGetMetaTable { + 1: optional i64 id + 2: optional string name + 3: optional bool in_trash + 4: optional list partitions +} + +struct TGetMetaDB { + 1: optional i64 id + 2: optional string name + 3: optional bool only_table_names + 4: optional list tables +} + +struct TGetMetaRequest { + 1: optional string cluster + 2: optional string user + 3: optional string passwd + 4: optional string user_ip + 5: optional string token + 6: optional TGetMetaDB db + // trash +} + +struct TGetMetaReplicaMeta { + 1: optional i64 id + 2: optional i64 backend_id + 3: optional i64 version +} + +struct TGetMetaTabletMeta { + 1: optional i64 id + 2: optional list replicas +} + +struct TGetMetaIndexMeta { + 1: optional i64 id + 2: optional string name + 3: optional list tablets +} + +struct TGetMetaPartitionMeta { + 1: optional i64 id + 2: optional string name + 3: optional string key + 4: optional string range + 5: optional i64 visible_version + 6: optional bool is_temp + 7: optional list indexes +} + +struct TGetMetaTableMeta { + 1: optional i64 id + 2: optional string name + 3: optional bool in_trash + 4: optional list partitions +} + +struct TGetMetaDBMeta { + 1: optional i64 id + 2: optional string name + 3: optional list tables + 4: optional list dropped_partitions + 5: optional list dropped_tables + 6: optional list dropped_indexes +} + +struct TGetMetaResult { + 1: required Status.TStatus status + 2: optional TGetMetaDBMeta db_meta + 3: optional Types.TNetworkAddress master_address +} + +struct TGetBackendMetaRequest { + 1: optional string cluster + 2: optional string user + 3: optional string passwd + 4: optional string user_ip + 5: optional string token + 6: optional i64 backend_id +} + +struct TGetBackendMetaResult { + 1: required Status.TStatus status + 2: optional list backends + 3: optional Types.TNetworkAddress master_address +} + +struct TColumnInfo { + 1: optional string column_name + 2: optional i64 column_id +} + +struct TGetColumnInfoRequest { + 1: optional i64 db_id + 2: optional i64 table_id +} + +struct TGetColumnInfoResult { + 1: optional Status.TStatus status + 2: optional list columns +} + +struct TShowProcessListRequest { + 1: optional bool show_full_sql + 2: optional Types.TUserIdentity current_user_ident +} + +struct TShowProcessListResult { + 1: optional list> process_list +} + +struct TShowUserRequest { +} + +struct TShowUserResult { + 1: optional list> userinfo_list +} + +struct TReportCommitTxnResultRequest { + 1: optional i64 dbId + 2: optional i64 txnId + 3: optional string label + 4: optional binary payload +} + +struct TQueryColumn { + 1: optional string catalogId + 2: optional string dbId + 3: optional string tblId + 4: optional string colName +} + +struct TSyncQueryColumns { + 1: optional list highPriorityColumns; + 2: optional list midPriorityColumns; +} + +struct TFetchSplitBatchRequest { + 1: optional i64 split_source_id + 2: optional i32 max_num_splits +} + +struct TFetchSplitBatchResult { + 1: optional list splits + 2: optional Status.TStatus status +} + +struct TFetchRunningQueriesResult { + 1: optional Status.TStatus status + 2: optional list running_queries +} + +struct TFetchRunningQueriesRequest { +} + service FrontendService { TGetDbsResult getDbNames(1: TGetDbsParams params) TGetTablesResult getTableNames(1: TGetTablesParams params) @@ -1219,14 +1756,14 @@ service FrontendService { TFrontendPingFrontendResult ping(1: TFrontendPingFrontendRequest request) - TAddColumnsResult addColumns(1: TAddColumnsRequest request) - TInitExternalCtlMetaResult initExternalCtlMeta(1: TInitExternalCtlMetaRequest request) TFetchSchemaTableDataResult fetchSchemaTableData(1: TFetchSchemaTableDataRequest request) TMySqlLoadAcquireTokenResult acquireToken() + bool checkToken(1: string token) + TConfirmUnusedRemoteFilesResult confirmUnusedRemoteFiles(1: TConfirmUnusedRemoteFilesRequest request) TCheckAuthResult checkAuth(1: TCheckAuthRequest request) @@ -1235,6 +1772,11 @@ service FrontendService { TGetTabletReplicaInfosResult getTabletReplicaInfos(1: TGetTabletReplicaInfosRequest request) + TPlsqlStoredProcedureResult addPlsqlStoredProcedure(1: TAddPlsqlStoredProcedureRequest request) + TPlsqlStoredProcedureResult dropPlsqlStoredProcedure(1: TDropPlsqlStoredProcedureRequest request) + TPlsqlPackageResult addPlsqlPackage(1: TAddPlsqlPackageRequest request) + TPlsqlPackageResult dropPlsqlPackage(1: TDropPlsqlPackageRequest request) + TGetMasterTokenResult getMasterToken(1: TGetMasterTokenRequest request) TGetBinlogLagResult getBinlogLag(1: TGetBinlogLagRequest request) @@ -1244,4 +1786,24 @@ service FrontendService { TAutoIncrementRangeResult getAutoIncrementRange(1: TAutoIncrementRangeRequest request) TCreatePartitionResult createPartition(1: TCreatePartitionRequest request) + // insert overwrite partition(*) + TReplacePartitionResult replacePartition(1: TReplacePartitionRequest request) + + TGetMetaResult getMeta(1: TGetMetaRequest request) + + TGetBackendMetaResult getBackendMeta(1: TGetBackendMetaRequest request) + + TGetColumnInfoResult getColumnInfo(1: TGetColumnInfoRequest request) + + Status.TStatus invalidateStatsCache(1: TInvalidateFollowerStatsCacheRequest request) + + TShowProcessListResult showProcessList(1: TShowProcessListRequest request) + Status.TStatus reportCommitTxnResult(1: TReportCommitTxnResultRequest request) + TShowUserResult showUser(1: TShowUserRequest request) + Status.TStatus syncQueryColumns(1: TSyncQueryColumns request) + + TFetchSplitBatchResult fetchSplitBatch(1: TFetchSplitBatchRequest request) + Status.TStatus updatePartitionStatsCache(1: TUpdateFollowerPartitionStatsCacheRequest request) + + TFetchRunningQueriesResult fetchRunningQueries(1: TFetchRunningQueriesRequest request) } diff --git a/pkg/rpc/thrift/HeartbeatService.thrift b/pkg/rpc/thrift/HeartbeatService.thrift index 5a7e47d9..47c41650 100644 --- a/pkg/rpc/thrift/HeartbeatService.thrift +++ b/pkg/rpc/thrift/HeartbeatService.thrift @@ -39,6 +39,11 @@ struct TMasterInfo { 7: optional i64 heartbeat_flags 8: optional i64 backend_id 9: optional list frontend_infos + 10: optional string meta_service_endpoint; + 11: optional string cloud_unique_id; + // See configuration item Config.java rehash_tablet_after_be_dead_seconds for meaning + 12: optional i64 tablet_report_inactive_duration_ms; + 13: optional string auth_token; } struct TBackendInfo { @@ -51,6 +56,10 @@ struct TBackendInfo { 7: optional string be_node_role 8: optional bool is_shutdown 9: optional Types.TPort arrow_flight_sql_port + 10: optional i64 be_mem // The physical memory available for use by BE. + // For cloud + 1000: optional i64 fragment_executing_count + 1001: optional i64 fragment_last_active_time } struct THeartbeatResult { diff --git a/pkg/rpc/thrift/Makefile b/pkg/rpc/thrift/Makefile index e2d81952..bc30124b 100644 --- a/pkg/rpc/thrift/Makefile +++ b/pkg/rpc/thrift/Makefile @@ -31,7 +31,7 @@ all: ${GEN_OBJECTS} ${OBJECTS} $(shell mkdir -p ${BUILD_DIR}/gen_java) -THRIFT_CPP_ARGS = -I ${CURDIR} -I ${BUILD_DIR}/thrift/ --gen cpp -out ${BUILD_DIR}/gen_cpp --allow-64bit-consts -strict +THRIFT_CPP_ARGS = -I ${CURDIR} -I ${BUILD_DIR}/thrift/ --gen cpp:moveable_types -out ${BUILD_DIR}/gen_cpp --allow-64bit-consts -strict THRIFT_JAVA_ARGS = -I ${CURDIR} -I ${BUILD_DIR}/thrift/ --gen java:fullcamel -out ${BUILD_DIR}/gen_java --allow-64bit-consts -strict ${BUILD_DIR}/gen_cpp: diff --git a/pkg/rpc/thrift/MasterService.thrift b/pkg/rpc/thrift/MasterService.thrift index 0e56c0e6..9d8cd911 100644 --- a/pkg/rpc/thrift/MasterService.thrift +++ b/pkg/rpc/thrift/MasterService.thrift @@ -33,7 +33,7 @@ struct TTabletInfo { 6: required Types.TSize data_size 7: optional Types.TStorageMedium storage_medium 8: optional list transaction_ids - 9: optional i64 version_count + 9: optional i64 total_version_count 10: optional i64 path_hash 11: optional bool version_miss 12: optional bool used @@ -46,6 +46,10 @@ struct TTabletInfo { // 18: optional bool is_cooldown 19: optional i64 cooldown_term 20: optional Types.TUniqueId cooldown_meta_id + 21: optional i64 visible_version_count + + // For cloud + 1000: optional bool is_persistent } struct TFinishTaskRequest { @@ -66,7 +70,10 @@ struct TFinishTaskRequest { 15: optional i64 copy_size 16: optional i64 copy_time_ms 17: optional map succ_tablets - 18: optional map tablet_id_to_delta_num_rows + 18: optional map table_id_to_delta_num_rows + 19: optional map> table_id_to_tablet_id_to_delta_num_rows + // for Cloud mow table only, used by FE to check if the response is for the latest request + 20: optional list resp_partitions; } struct TTablet { @@ -106,6 +113,9 @@ struct TReportRequest { 10: optional list resource // only id and version 11: i32 num_cores 12: i32 pipeline_executor_size + 13: optional map partitions_version + // tablet num in be, in cloud num_tablets may not eq tablet_list.size() + 14: optional i64 num_tablets } struct TMasterResult { diff --git a/pkg/rpc/thrift/Normalization.thrift b/pkg/rpc/thrift/Normalization.thrift new file mode 100644 index 00000000..1eedfef6 --- /dev/null +++ b/pkg/rpc/thrift/Normalization.thrift @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace java org.apache.doris.thrift + +include "Exprs.thrift" +include "Types.thrift" +include "Opcodes.thrift" +include "Descriptors.thrift" +include "Partitions.thrift" +include "PlanNodes.thrift" + +struct TNormalizedOlapScanNode { + 1: optional i64 table_id + 2: optional i64 index_id + 3: optional bool is_preaggregation + 4: optional list key_column_names + 5: optional list key_column_types + 6: optional string rollup_name + 7: optional string sort_column + 8: optional list select_columns +} + +struct TNormalizedAggregateNode { + 1: optional list grouping_exprs + 2: optional list aggregate_functions + 3: optional Types.TTupleId intermediate_tuple_id + 4: optional Types.TTupleId output_tuple_id + 5: optional bool is_finalize + 6: optional bool use_streaming_preaggregation + 7: optional list projectToAggIntermediateTuple + 8: optional list projectToAggOutputTuple +} + +struct TNormalizedPlanNode { + 1: optional Types.TPlanNodeId node_id + 2: optional PlanNodes.TPlanNodeType node_type + 3: optional i32 num_children + 5: optional set tuple_ids + 6: optional set nullable_tuples + 7: optional list conjuncts + 8: optional list projects + 9: optional i64 limit + + 10: optional TNormalizedOlapScanNode olap_scan_node + 11: optional TNormalizedAggregateNode aggregation_node +} \ No newline at end of file diff --git a/pkg/rpc/thrift/Opcodes.thrift b/pkg/rpc/thrift/Opcodes.thrift index f6444ebe..3f096764 100644 --- a/pkg/rpc/thrift/Opcodes.thrift +++ b/pkg/rpc/thrift/Opcodes.thrift @@ -88,9 +88,12 @@ enum TExprOpcode { MATCH_ANY, MATCH_ALL, MATCH_PHRASE, - MATCH_ELEMENT_EQ, - MATCH_ELEMENT_LT, - MATCH_ELEMENT_GT, - MATCH_ELEMENT_LE, - MATCH_ELEMENT_GE, + MATCH_ELEMENT_EQ, // DEPRECATED + MATCH_ELEMENT_LT, // DEPRECATED + MATCH_ELEMENT_GT, // DEPRECATED + MATCH_ELEMENT_LE, // DEPRECATED + MATCH_ELEMENT_GE, // DEPRECATED + MATCH_PHRASE_PREFIX, + MATCH_REGEXP, + MATCH_PHRASE_EDGE, } diff --git a/pkg/rpc/thrift/PaloBrokerService.thrift b/pkg/rpc/thrift/PaloBrokerService.thrift index 308c6065..e4bc60a2 100644 --- a/pkg/rpc/thrift/PaloBrokerService.thrift +++ b/pkg/rpc/thrift/PaloBrokerService.thrift @@ -91,12 +91,25 @@ struct TBrokerCheckPathExistResponse { 2: required bool isPathExist; } +struct TBrokerIsSplittableResponse { + 1: optional TBrokerOperationStatus opStatus; + 2: optional bool splittable; +} + struct TBrokerListPathRequest { 1: required TBrokerVersion version; 2: required string path; 3: required bool isRecursive; 4: required map properties; 5: optional bool fileNameOnly; + 6: optional bool onlyFiles; +} + +struct TBrokerIsSplittableRequest { + 1: optional TBrokerVersion version; + 2: optional string path; + 3: optional string inputFormat; + 4: optional map properties; } struct TBrokerDeletePathRequest { @@ -184,6 +197,13 @@ service TPaloBrokerService { // return a list of files under a path TBrokerListResponse listPath(1: TBrokerListPathRequest request); + + // return located files of a given path. A broker implementation refers to + // 'org.apache.doris.fs.remote.RemoteFileSystem#listLocatedFiles' in fe-core. + TBrokerListResponse listLocatedFiles(1: TBrokerListPathRequest request); + + // return whether the path with specified input format is splittable. + TBrokerIsSplittableResponse isSplittable(1: TBrokerIsSplittableRequest request); // delete a file, if the deletion of the file fails, the status code will return an error message // input: diff --git a/pkg/rpc/thrift/PaloInternalService.thrift b/pkg/rpc/thrift/PaloInternalService.thrift index cc7102e4..9a0fd910 100644 --- a/pkg/rpc/thrift/PaloInternalService.thrift +++ b/pkg/rpc/thrift/PaloInternalService.thrift @@ -81,6 +81,11 @@ struct TResourceLimit { 1: optional i32 cpu_limit } +enum TSerdeDialect { + DORIS = 0, + PRESTO = 1 +} + // Query options that correspond to PaloService.PaloQueryOptions, // with their respective defaults struct TQueryOptions { @@ -143,7 +148,7 @@ struct TQueryOptions { // whether enable spilling to disk 31: optional bool enable_spilling = false; // whether enable parallel merge in exchange node - 32: optional bool enable_enable_exchange_node_parallel_merge = false; + 32: optional bool enable_enable_exchange_node_parallel_merge = false; // deprecated // Time in ms to wait until runtime filters are delivered. 33: optional i32 runtime_filter_wait_time_ms = 1000 @@ -181,14 +186,14 @@ struct TQueryOptions { 54: optional bool enable_share_hash_table_for_broadcast_join - 55: optional bool check_overflow_for_decimal = false + 55: optional bool check_overflow_for_decimal = true // For debug purpose, skip delete bitmap when reading data 56: optional bool skip_delete_bitmap = false + // non-pipelinex engine removed. always true. + 57: optional bool enable_pipeline_engine = true - 57: optional bool enable_pipeline_engine = false - - 58: optional i32 repeat_max_num = 0 + 58: optional i32 repeat_max_num = 0 // Deprecated 59: optional i64 external_sort_bytes_threshold = 0 @@ -221,8 +226,8 @@ struct TQueryOptions { 72: optional bool enable_orc_lazy_mat = true 73: optional i64 scan_queue_mem_limit - - 74: optional bool enable_scan_node_run_serial = false; + // deprecated + 74: optional bool enable_scan_node_run_serial = false; 75: optional bool enable_insert_strict = false; @@ -231,8 +236,8 @@ struct TQueryOptions { 77: optional bool truncate_char_or_varchar_columns = false 78: optional bool enable_hash_join_early_start_probe = false - - 79: optional bool enable_pipeline_x_engine = false; + // non-pipelinex engine removed. always true. + 79: optional bool enable_pipeline_x_engine = true; 80: optional bool enable_memtable_on_sink_node = false; @@ -246,7 +251,117 @@ struct TQueryOptions { // use is_report_success any more 84: optional bool enable_profile = false; 85: optional bool enable_page_cache = false; - 86: optional i32 analyze_timeout = 43200 + 86: optional i32 analyze_timeout = 43200; + + 87: optional bool faster_float_convert = false; // deprecated + + 88: optional bool enable_decimal256 = false; + + 89: optional bool enable_local_shuffle = false; + // For emergency use, skip missing version when reading rowsets + 90: optional bool skip_missing_version = false; + + 91: optional bool runtime_filter_wait_infinitely = false; + + 92: optional i32 wait_full_block_schedule_times = 1; + + 93: optional i32 inverted_index_max_expansions = 50; + + 94: optional i32 inverted_index_skip_threshold = 50; + + 95: optional bool enable_parallel_scan = false; + + 96: optional i32 parallel_scan_max_scanners_count = 0; + + 97: optional i64 parallel_scan_min_rows_per_scanner = 0; + + 98: optional bool skip_bad_tablet = false; + // Increase concurrency of scanners adaptively, the maxinum times to scale up + 99: optional double scanner_scale_up_ratio = 0; + + 100: optional bool enable_distinct_streaming_aggregation = true; + + 101: optional bool enable_join_spill = false + + 102: optional bool enable_sort_spill = false + + 103: optional bool enable_agg_spill = false + + 104: optional i64 min_revocable_mem = 0 + + 105: optional i64 spill_streaming_agg_mem_limit = 0; + + // max rows of each sub-queue in DataQueue. + 106: optional i64 data_queue_max_blocks = 0; + + // expr pushdown for index filter rows + 107: optional bool enable_common_expr_pushdown_for_inverted_index = false; + 108: optional i64 local_exchange_free_blocks_limit; + + 109: optional bool enable_force_spill = false; + + 110: optional bool enable_parquet_filter_by_min_max = true + 111: optional bool enable_orc_filter_by_min_max = true + + 112: optional i32 max_column_reader_num = 0 + + 113: optional bool enable_local_merge_sort = false; + + 114: optional bool enable_parallel_result_sink = false; + + 115: optional bool enable_short_circuit_query_access_column_store = false; + + 116: optional bool enable_no_need_read_data_opt = true; + + 117: optional bool read_csv_empty_line_as_null = false; + + 118: optional TSerdeDialect serde_dialect = TSerdeDialect.DORIS; + + 119: optional bool enable_match_without_inverted_index = true; + + 120: optional bool enable_fallback_on_missing_inverted_index = true; + + 121: optional bool keep_carriage_return = false; // \n,\r\n split line in CSV. + + 122: optional i32 runtime_bloom_filter_min_size = 1048576; + + //Access Parquet/ORC columns by name by default. Set this property to `false` to access columns + //by their ordinal position in the Hive table definition. + 123: optional bool hive_parquet_use_column_names = true; + 124: optional bool hive_orc_use_column_names = true; + + 125: optional bool enable_segment_cache = true; + + 126: optional i32 runtime_bloom_filter_max_size = 16777216; + + 127: optional i32 in_list_value_count_threshold = 10; + + // We need this two fields to make sure thrift id on master is compatible with other branch. + 128: optional bool enable_verbose_profile = false; + 129: optional i32 rpc_verbose_profile_max_instance_count = 0; + + 130: optional bool enable_adaptive_pipeline_task_serial_read_on_limit = true; + 131: optional i32 adaptive_pipeline_task_serial_read_on_limit = 10000; + + 132: optional i32 parallel_prepare_threshold = 0; + 133: optional i32 partition_topn_max_partitions = 1024; + 134: optional i32 partition_topn_pre_partition_rows = 1000; + + 135: optional bool enable_parallel_outfile = false; + + 136: optional bool enable_phrase_query_sequential_opt = true; + + 137: optional bool enable_auto_create_when_overwrite = false; + + 138: optional i64 orc_tiny_stripe_threshold_bytes = 8388608; + 139: optional i64 orc_once_max_read_bytes = 8388608; + 140: optional i64 orc_max_merge_distance_bytes = 1048576; + + 141: optional bool ignore_runtime_filter_error = false; + // For cloud, to control if the content would be written into file cache + // In write path, to control if the content would be written into file cache. + // In read path, read from file cache or remote storage when execute query. + 1000: optional bool disable_file_cache = false } @@ -266,6 +381,7 @@ struct TRuntimeFilterTargetParamsV2 { 1: required list target_fragment_instance_ids // The address of the instance where the fragment is expected to run 2: required Types.TNetworkAddress target_fragment_instance_addr + 3: optional list target_fragment_ids } struct TRuntimeFilterParams { @@ -319,7 +435,8 @@ struct TPlanFragmentExecParams { 11: optional bool send_query_statistics_with_every_batch // Used to merge and send runtime filter 12: optional TRuntimeFilterParams runtime_filter_params - 13: optional bool group_commit + 13: optional bool group_commit // deprecated + 14: optional list topn_filter_source_node_ids } // Global query parameters assigned by the coordinator. @@ -361,7 +478,8 @@ struct TTxnParams { 9: optional i64 db_id 10: optional double max_filter_ratio // For load task with transaction, use this to indicate we use pipeline or not - 11: optional bool enable_pipeline_txn_load = false; + // non-pipelinex engine removed. always true. + 11: optional bool enable_pipeline_txn_load = true; } // Definition of global dict, global dict is used to accelerate query performance of low cardinality data @@ -375,6 +493,13 @@ struct TGlobalDict { 2: optional map slot_dicts // map from slot id to column dict id, because 2 or more column may share the dict } +struct TPipelineWorkloadGroup { + 1: optional i64 id + 2: optional string name + 3: optional map properties + 4: optional i64 version +} + // ExecPlanFragment struct TExecPlanFragmentParams { 1: required PaloInternalServiceVersion protocol_version @@ -438,6 +563,7 @@ struct TExecPlanFragmentParams { // Otherwise, the fragment will start executing directly on the BE side. 20: optional bool need_wait_execution_trigger = false; + // deprecated 21: optional bool build_hash_table_for_broadcast_join = false; 22: optional list instances_sharing_hash_table; @@ -447,6 +573,25 @@ struct TExecPlanFragmentParams { 24: optional map file_scan_params 25: optional i64 wal_id + + // num load stream for each sink backend + 26: optional i32 load_stream_per_node + + // total num of load streams the downstream backend will see + 27: optional i32 total_load_streams + + 28: optional i32 num_local_sink + + 29: optional i64 content_length + + 30: optional list workload_groups + + 31: optional bool is_nereids = true; + + 32: optional Types.TNetworkAddress current_connect_fe + + // For cloud + 1000: optional bool is_mow_table; } struct TExecPlanFragmentParamsList { @@ -482,6 +627,7 @@ struct TFoldConstantParams { 3: optional bool vec_exec 4: optional TQueryOptions query_options 5: optional Types.TUniqueId query_id + 6: optional bool is_nereids } // TransmitData @@ -595,6 +741,14 @@ struct TFetchDataResult { 4: optional Status.TStatus status } +// For cloud +enum TCompoundType { + UNKNOWN = 0, + AND = 1, + OR = 2, + NOT = 3, +} + struct TCondition { 1: required string column_name 2: required string condition_op @@ -603,6 +757,9 @@ struct TCondition { // using unique id to distinguish them 4: optional i32 column_unique_id 5: optional bool marked_by_runtime_filter = false + + // For cloud + 1000: optional TCompoundType compound_type = TCompoundType.UNKNOWN } struct TExportStatusResult { @@ -613,19 +770,15 @@ struct TExportStatusResult { struct TPipelineInstanceParams { 1: required Types.TUniqueId fragment_instance_id + // deprecated 2: optional bool build_hash_table_for_broadcast_join = false; 3: required map> per_node_scan_ranges 4: optional i32 sender_id 5: optional TRuntimeFilterParams runtime_filter_params 6: optional i32 backend_num - 7: optional map per_node_shared_scans -} - -struct TPipelineWorkloadGroup { - 1: optional i64 id - 2: optional string name - 3: optional map properties - 4: optional i64 version + 7: optional map per_node_shared_scans // deprecated + 8: optional list topn_filter_source_node_ids // deprecated after we set topn_filter_descs + 9: optional list topn_filter_descs } // ExecPlanFragment @@ -662,8 +815,40 @@ struct TPipelineFragmentParams { // scan node id -> scan range params, only for external file scan 29: optional map file_scan_params 30: optional bool group_commit = false; + 31: optional i32 load_stream_per_node // num load stream for each sink backend + 32: optional i32 total_load_streams // total num of load streams the downstream backend will see + 33: optional i32 num_local_sink + 34: optional i32 num_buckets + 35: optional map bucket_seq_to_instance_idx + 36: optional map per_node_shared_scans // deprecated + 37: optional i32 parallel_instances + 38: optional i32 total_instances + 39: optional map shuffle_idx_to_instance_idx + 40: optional bool is_nereids = true; + 41: optional i64 wal_id + 42: optional i64 content_length + 43: optional Types.TNetworkAddress current_connect_fe + // Used by 2.1 + 44: optional list topn_filter_source_node_ids + + // For cloud + 1000: optional bool is_mow_table; } struct TPipelineFragmentParamsList { - 1: optional list params_list; + 1: optional list params_list; + 2: optional Descriptors.TDescriptorTable desc_tbl; + // scan node id -> scan range params, only for external file scan + 3: optional map file_scan_params; + 4: optional Types.TNetworkAddress coord; + 5: optional TQueryGlobals query_globals; + 6: optional Types.TResourceInfo resource_info; + // The total number of fragments on same BE host + 7: optional i32 fragment_num_on_host + 8: optional TQueryOptions query_options + 9: optional bool is_nereids = true; + 10: optional list workload_groups + 11: optional Types.TUniqueId query_id + 12: optional list topn_filter_source_node_ids + 13: optional Types.TNetworkAddress runtime_filter_merge_addr } diff --git a/pkg/rpc/thrift/Partitions.thrift b/pkg/rpc/thrift/Partitions.thrift index 8eecbb41..4e306c29 100644 --- a/pkg/rpc/thrift/Partitions.thrift +++ b/pkg/rpc/thrift/Partitions.thrift @@ -40,7 +40,16 @@ enum TPartitionType { // unordered partition on a set of exprs // (only use in bucket shuffle join) - BUCKET_SHFFULE_HASH_PARTITIONED + BUCKET_SHFFULE_HASH_PARTITIONED, + + // used for shuffle data by parititon and tablet + TABLET_SINK_SHUFFLE_PARTITIONED, + + // used for shuffle data by hive parititon + TABLE_SINK_HASH_PARTITIONED, + + // used for hive unparititoned table + TABLE_SINK_RANDOM_PARTITIONED } enum TDistributionType { diff --git a/pkg/rpc/thrift/PlanNodes.thrift b/pkg/rpc/thrift/PlanNodes.thrift index 3cc1c569..0bbd364f 100644 --- a/pkg/rpc/thrift/PlanNodes.thrift +++ b/pkg/rpc/thrift/PlanNodes.thrift @@ -120,6 +120,7 @@ enum TFileFormatType { FORMAT_CSV_LZ4BLOCK, FORMAT_CSV_SNAPPYBLOCK, FORMAT_WAL, + FORMAT_ARROW } // In previous versions, the data compression format and file format were stored together, as TFileFormatType, @@ -137,7 +138,9 @@ enum TFileCompressType { DEFLATE, LZOP, LZ4BLOCK, - SNAPPYBLOCK + SNAPPYBLOCK, + ZLIB, + ZSTD } struct THdfsConf { @@ -151,6 +154,8 @@ struct THdfsParams { 3: optional string hdfs_kerberos_principal 4: optional string hdfs_kerberos_keytab 5: optional list hdfs_conf + // Used for Cold Heat Separation to specify the root path + 6: optional string root_path } // One broker range information. @@ -253,6 +258,7 @@ struct TFileTextScanRangeParams { 4: optional string mapkv_delimiter; 5: optional i8 enclose; 6: optional i8 escape; + 7: optional string null_format; } struct TFileScanSlotInfo { @@ -278,6 +284,8 @@ struct TFileAttributes { 10: optional bool trim_double_quotes; // csv skip line num, only used when csv header_type is not set. 11: optional i32 skip_lines; + // for cloud copy into + 1001: optional bool ignore_csv_redundant_col; } struct TIcebergDeleteFileDesc { @@ -285,11 +293,14 @@ struct TIcebergDeleteFileDesc { 2: optional i64 position_lower_bound; 3: optional i64 position_upper_bound; 4: optional list field_ids; + // Iceberg file type, 0: data, 1: position delete, 2: equality delete. + 5: optional i32 content; } struct TIcebergFileDesc { 1: optional i32 format_version; // Iceberg file type, 0: data, 1: position delete, 2: equality delete. + // deprecated, a data file can have both position and delete files 2: optional i32 content; // When open a delete file, filter the data file path with the 'file_path' property 3: optional list delete_files; @@ -297,17 +308,53 @@ struct TIcebergFileDesc { 4: optional Types.TTupleId delete_table_tuple_id; // Deprecated 5: optional Exprs.TExpr file_select_conjunct; + 6: optional string original_file_path; + 7: optional i64 row_count; +} + +struct TPaimonDeletionFileDesc { + 1: optional string path; + 2: optional i64 offset; + 3: optional i64 length; } struct TPaimonFileDesc { 1: optional string paimon_split 2: optional string paimon_column_names - 3: optional string db_name - 4: optional string table_name + 3: optional string db_name // deprecated + 4: optional string table_name // deprecated 5: optional string paimon_predicate - 6: optional map paimon_options -} + 6: optional map paimon_options // deprecated + 7: optional i64 ctl_id // deprecated + 8: optional i64 db_id // deprecated + 9: optional i64 tbl_id // deprecated + 10: optional i64 last_update_time // deprecated + 11: optional string file_format + 12: optional TPaimonDeletionFileDesc deletion_file; + 13: optional map hadoop_conf // deprecated + 14: optional string paimon_table // deprecated +} + +struct TTrinoConnectorFileDesc { + 1: optional string catalog_name + 2: optional string db_name + 3: optional string table_name + 4: optional map trino_connector_options + 5: optional string trino_connector_table_handle + 6: optional string trino_connector_column_handles + 7: optional string trino_connector_column_metadata + 8: optional string trino_connector_column_names // not used + 9: optional string trino_connector_split + 10: optional string trino_connector_predicate + 11: optional string trino_connector_trascation_handle +} + +struct TMaxComputeFileDesc { + 1: optional string partition_spec // deprecated + 2: optional string session_id + 3: optional string table_batch_read_session +} struct THudiFileDesc { 1: optional string instant_time; @@ -322,6 +369,14 @@ struct THudiFileDesc { 10: optional list nested_fields; } +struct TLakeSoulFileDesc { + 1: optional list file_paths; + 2: optional list primary_keys; + 3: optional list partition_descs; + 4: optional string table_schema; + 5: optional string options; +} + struct TTransactionalHiveDeleteDeltaDesc { 1: optional string directory_location 2: optional list file_names @@ -338,6 +393,9 @@ struct TTableFormatFileDesc { 3: optional THudiFileDesc hudi_params 4: optional TPaimonFileDesc paimon_params 5: optional TTransactionalHiveDesc transactional_hive_params + 6: optional TMaxComputeFileDesc max_compute_params + 7: optional TTrinoConnectorFileDesc trino_connector_params + 8: optional TLakeSoulFileDesc lakesoul_params } enum TTextSerdeType { @@ -388,6 +446,13 @@ struct TFileScanRangeParams { 20: optional list pre_filter_exprs_list 21: optional Types.TUniqueId load_id 22: optional TTextSerdeType text_serde_type + // used by flexible partial update + 23: optional string sequence_map_col + // table from FE, used for jni scanner + // BE can use table director: + // 1. Reduce the access to HMS and HDFS on the JNI side. + // 2. There will be no inconsistency between the fe and be tables. + 24: optional string serialized_table } struct TFileRangeDesc { @@ -417,6 +482,11 @@ struct TFileRangeDesc { 12: optional string fs_name } +struct TSplitSource { + 1: optional i64 split_source_id + 2: optional i32 num_splits +} + // TFileScanRange represents a set of descriptions of a file and the rules for reading and converting it. // TFileScanRangeParams: describe how to read and convert file // list: file location and range @@ -427,12 +497,12 @@ struct TFileScanRange { // file_scan_params in TExecPlanFragmentParams will always be set in query request, // and TFileScanRangeParams here is used for some other request such as fetch table schema for tvf. 2: optional TFileScanRangeParams params + 3: optional TSplitSource split_source } // Scan range for external datasource, such as file on hdfs, es datanode, etc. struct TExternalScanRange { 1: optional TFileScanRange file_scan_range - // TODO: add more scan range type? } enum TDataGenFunctionName { @@ -442,7 +512,9 @@ enum TDataGenFunctionName { // Every table valued function should have a scan range definition to save its // running parameters struct TTVFNumbersScanRange { - 1: optional i64 totalNumbers + 1: optional i64 totalNumbers + 2: optional bool useConst + 3: optional i64 constValue } struct TDataGenScanRange { @@ -465,11 +537,58 @@ struct TFrontendsMetadataParams { 1: optional string cluster_name } +struct TMaterializedViewsMetadataParams { + 1: optional string database + 2: optional Types.TUserIdentity current_user_ident +} + +struct TPartitionsMetadataParams { + 1: optional string catalog + 2: optional string database + 3: optional string table +} + +struct TPartitionValuesMetadataParams { + 1: optional string catalog + 2: optional string database + 3: optional string table +} + +struct TJobsMetadataParams { + 1: optional string type + 2: optional Types.TUserIdentity current_user_ident +} + +struct TTasksMetadataParams { + 1: optional string type + 2: optional Types.TUserIdentity current_user_ident +} + +struct TQueriesMetadataParams { + 1: optional string cluster_name + 2: optional bool relay_to_other_fe + 3: optional TMaterializedViewsMetadataParams materialized_views_params + 4: optional TJobsMetadataParams jobs_params + 5: optional TTasksMetadataParams tasks_params + 6: optional TPartitionsMetadataParams partitions_params + 7: optional TPartitionValuesMetadataParams partition_values_params +} + +struct TMetaCacheStatsParams { +} + struct TMetaScanRange { 1: optional Types.TMetadataType metadata_type 2: optional TIcebergMetadataParams iceberg_params 3: optional TBackendsMetadataParams backends_params 4: optional TFrontendsMetadataParams frontends_params + 5: optional TQueriesMetadataParams queries_params + 6: optional TMaterializedViewsMetadataParams materialized_views_params + 7: optional TJobsMetadataParams jobs_params + 8: optional TTasksMetadataParams tasks_params + 9: optional TPartitionsMetadataParams partitions_params + 10: optional TMetaCacheStatsParams meta_cache_stats_params + 11: optional TPartitionValuesMetadataParams partition_values_params } // Specification of an individual data range which is held in its entirety @@ -605,6 +724,7 @@ struct TSchemaScanNode { 12: optional bool show_hidden_cloumns = false // 13: optional list table_structure // deprecated 14: optional string catalog + 15: optional list fe_addr_list } struct TMetaScanNode { @@ -657,11 +777,12 @@ struct TOlapScanNode { 10: optional i64 sort_limit 11: optional bool enable_unique_key_merge_on_write 12: optional TPushAggOp push_down_agg_type_opt //Deprecated - 13: optional bool use_topn_opt + 13: optional bool use_topn_opt // Deprecated 14: optional list indexes_desc 15: optional set output_column_unique_ids 16: optional list distribute_column_ids 17: optional i32 schema_version + 18: optional list topn_filter_source_node_ids //deprecated, move to TPlanNode.106 } struct TEqJoinCondition { @@ -690,7 +811,16 @@ enum TJoinOp { // on the build side. Those NULLs are considered candidate matches, and therefore could // be rejected (ANTI-join), based on the other join conjuncts. This is in contrast // to LEFT_ANTI_JOIN where NULLs are not matches and therefore always returned. - NULL_AWARE_LEFT_ANTI_JOIN + NULL_AWARE_LEFT_ANTI_JOIN, + NULL_AWARE_LEFT_SEMI_JOIN +} + +enum TJoinDistributionType { + NONE, + BROADCAST, + PARTITIONED, + BUCKET_SHUFFLE, + COLOCATE, } struct THashJoinNode { @@ -724,6 +854,10 @@ struct THashJoinNode { 10: optional bool is_broadcast_join 11: optional bool is_mark + 12: optional TJoinDistributionType dist_type + 13: optional list mark_join_conjuncts + // use_specific_projections true, if output exprssions is denoted by srcExprList represents, o.w. PlanNode.projections + 14: optional bool use_specific_projections } struct TNestedLoopJoinNode { @@ -743,6 +877,10 @@ struct TNestedLoopJoinNode { 7: optional bool is_mark 8: optional list join_conjuncts + + 9: optional list mark_join_conjuncts + // use_specific_projections true, if output exprssions is denoted by srcExprList represents, o.w. PlanNode.projections + 10: optional bool use_specific_projections } struct TMergeJoinNode { @@ -810,7 +948,8 @@ struct TAggregationNode { 6: optional bool use_streaming_preaggregation 7: optional list agg_sort_infos 8: optional bool is_first_phase - // 9: optional bool use_fixed_length_serialization_opt + 9: optional bool is_colocate + 10: optional TSortInfo agg_sort_info_by_group_key } struct TRepeatNode { @@ -832,6 +971,12 @@ struct TPreAggregationNode { 2: required list aggregate_exprs } +enum TSortAlgorithm { + HEAP_SORT, + TOPN_SORT, + FULL_SORT + } + struct TSortNode { 1: required TSortInfo sort_info // Indicates whether the backend service should use topn vs. sorting @@ -841,7 +986,11 @@ struct TSortNode { // Indicates whether the imposed limit comes DEFAULT_ORDER_BY_LIMIT. 6: optional bool is_default_limit - 7: optional bool use_topn_opt + 7: optional bool use_topn_opt // Deprecated + 8: optional bool merge_by_exchange + 9: optional bool is_analytic_sort + 10: optional bool is_colocate + 11: optional TSortAlgorithm algorithm } enum TopNAlgorithm { @@ -850,12 +999,20 @@ enum TopNAlgorithm { ROW_NUMBER } +enum TPartTopNPhase { + UNKNOWN, + ONE_PHASE_GLOBAL, + TWO_PHASE_LOCAL, + TWO_PHASE_GLOBAL +} + struct TPartitionSortNode { 1: optional list partition_exprs 2: optional TSortInfo sort_info 3: optional bool has_global_limit 4: optional TopNAlgorithm top_n_algorithm 5: optional i64 partition_inner_limit + 6: optional TPartTopNPhase ptopn_phase } enum TAnalyticWindowType { // Specifies the window as a logical offset @@ -943,6 +1100,8 @@ struct TAnalyticNode { // should be evaluated over a row that is composed of the child tuple and the buffered // tuple 9: optional Exprs.TExpr order_by_eq + + 10: optional bool is_colocate } struct TMergeNode { @@ -977,6 +1136,7 @@ struct TIntersectNode { 3: required list> const_expr_lists // Index of the first child that needs to be materialized. 4: required i64 first_materialized_child_idx + 5: optional bool is_colocate } struct TExceptNode { @@ -989,6 +1149,7 @@ struct TExceptNode { 3: required list> const_expr_lists // Index of the first child that needs to be materialized. 4: required i64 first_materialized_child_idx + 5: optional bool is_colocate } @@ -1000,6 +1161,8 @@ struct TExchangeNode { 2: optional TSortInfo sort_info // This is tHe number of rows to skip before returning results 3: optional i64 offset + // Shuffle partition type + 4: optional Partitions.TPartitionType partition_type } struct TOlapRewriteNode { @@ -1046,6 +1209,7 @@ struct TAssertNumRowsNode { 1: optional i64 desired_num_rows; 2: optional string subquery_string; 3: optional TAssertion assertion; + 4: optional bool should_convert_output_to_nullable; } enum TRuntimeFilterType { @@ -1056,6 +1220,27 @@ enum TRuntimeFilterType { BITMAP = 16 } +// generate min-max runtime filter for non-equal condition or equal condition. +enum TMinMaxRuntimeFilterType { + // only min is valid, RF generated according to condition: n < col_A + MIN = 1 + // only max is valid, RF generated according to condition: m > col_A + MAX = 2 + // both min/max are valid, + // support hash join condition: col_A = col_B + // support other join condition: n < col_A and col_A < m + MIN_MAX = 4 +} + +struct TTopnFilterDesc { + // topn node id + 1: required i32 source_node_id + 2: required bool is_asc + 3: required bool null_first + // scan node id -> expr on scan node + 4: required map target_node_id_to_target_expr +} + // Specification of a runtime filter. struct TRuntimeFilterDesc { // Filter unique id (within a query) @@ -1095,9 +1280,24 @@ struct TRuntimeFilterDesc { // for bitmap filter 11: optional bool bitmap_filter_not_in - 12: optional bool opt_remote_rf; + 12: optional bool opt_remote_rf; // Deprecated + + // for min/max rf + 13: optional TMinMaxRuntimeFilterType min_max_type; + + // true, if bloom filter size is calculated by ndv + // if bloom_filter_size_calculated_by_ndv=false, BE could calculate filter size according to the actural row count, and + // ignore bloom_filter_size_bytes + 14: optional bool bloom_filter_size_calculated_by_ndv; + + // true, if join type is null aware like <=>. rf should dispose the case + 15: optional bool null_aware; + + 16: optional bool sync_filter_size; } + + struct TDataGenScanNode { 1: optional Types.TTupleId tuple_id 2: optional TDataGenFunctionName func_name @@ -1170,10 +1370,19 @@ struct TPlanNode { 48: optional TPushAggOp push_down_agg_type_opt 49: optional i64 push_down_count - + + 50: optional list> distribute_expr_lists + 51: optional bool is_serial_operator + // projections is final projections, which means projecting into results and materializing them into the output block. 101: optional list projections 102: optional Types.TTupleId output_tuple_id 103: optional TPartitionSortNode partition_sort_node + // Intermediate projections will not materialize into the output block. + 104: optional list> intermediate_projections_list + 105: optional list intermediate_output_tuple_id_list + + 106: optional list topn_filter_source_node_ids + 107: optional i32 nereids_id } // A flattened representation of a tree of PlanNodes, obtained by depth-first diff --git a/pkg/rpc/thrift/Planner.thrift b/pkg/rpc/thrift/Planner.thrift index 08205b93..866d8d45 100644 --- a/pkg/rpc/thrift/Planner.thrift +++ b/pkg/rpc/thrift/Planner.thrift @@ -23,6 +23,7 @@ include "Exprs.thrift" include "DataSinks.thrift" include "PlanNodes.thrift" include "Partitions.thrift" +include "QueryCache.thrift" // TPlanFragment encapsulates info needed to execute a particular // plan fragment, including how to produce and how to partition its output. @@ -61,6 +62,8 @@ struct TPlanFragment { // sink) in a single instance of this fragment. This is used for an optimization in // InitialReservation. Measured in bytes. required in V1 8: optional i64 initial_reservation_total_claims + + 9: optional QueryCache.TQueryCacheParam query_cache_param } // location information for a single scan range @@ -79,4 +82,4 @@ struct TScanRangeLocations { 1: required PlanNodes.TScanRange scan_range // non-empty list 2: list locations -} +} \ No newline at end of file diff --git a/pkg/rpc/thrift/QueryCache.thrift b/pkg/rpc/thrift/QueryCache.thrift new file mode 100644 index 00000000..048b27f4 --- /dev/null +++ b/pkg/rpc/thrift/QueryCache.thrift @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace cpp doris +namespace java org.apache.doris.thrift + +struct TQueryCacheParam { + 1: optional i32 node_id + + 2: optional binary digest + + // the query slots order can different to the query cache slots order, + // so we should mapping current slot id in planNode to normalized slot id + // say: + // SQL1: select id, count(*) cnt, sum(value) s from tbl group by id + // SQL2: select sum(value) s, count(*) cnt, id from tbl group by id + // the id always has normalized slot id 0, + // the cnt always has normalized slot id 1 + // the s always has normalized slot id 2 + // but in SQL1, id, cnt, s can has slot id 5, 6, 7 + // in SQL2, s, cnt, id can has slot id 10, 11, 12 + // if generate plan cache in SQL1, we will make output_slot_mapping: {5: 0, 6: 1, 7: 2}, + // the SQL2 read plan cache and make output_slot_mapping: {10: 2, 11: 1, 12: 0}, + // even the select order is different, the normalized slot id is always equals: + // the id always is 0, the cnt always is 1, the s always is 2. + // then backend can mapping the current slots in the tuple to the query cached slots + 3: optional map output_slot_mapping + + // mapping tablet to filter range, + // BE will use as the key to search query cache. + // note that, BE not care what the filter range content is, just use as the part of the key. + 4: optional map tablet_to_range + + 5: optional bool force_refresh_query_cache + + 6: optional i64 entry_max_bytes + + 7: optional i64 entry_max_rows +} \ No newline at end of file diff --git a/pkg/rpc/thrift/RuntimeProfile.thrift b/pkg/rpc/thrift/RuntimeProfile.thrift index 36505095..0b4b6179 100644 --- a/pkg/rpc/thrift/RuntimeProfile.thrift +++ b/pkg/rpc/thrift/RuntimeProfile.thrift @@ -25,6 +25,7 @@ struct TCounter { 1: required string name 2: required Metrics.TUnit type 3: required i64 value + 4: optional i64 level } // A single runtime profile @@ -51,6 +52,8 @@ struct TRuntimeProfileNode { 8: required map> child_counters_map 9: required i64 timestamp + + 10: optional bool is_sink } // A flattened tree of runtime profiles, obtained by an diff --git a/pkg/rpc/thrift/Status.thrift b/pkg/rpc/thrift/Status.thrift index 06083b9a..7bdafc59 100644 --- a/pkg/rpc/thrift/Status.thrift +++ b/pkg/rpc/thrift/Status.thrift @@ -43,20 +43,21 @@ enum TStatusCode { INTERNAL_ERROR = 6, THRIFT_RPC_ERROR = 7, TIMEOUT = 8, - KUDU_NOT_ENABLED = 9, // Deprecated - KUDU_NOT_SUPPORTED_ON_OS = 10, // Deprecated + LIMIT_REACH = 9, // Its ok to reuse this error code, because this error code is not used in 1.1 + //KUDU_NOT_ENABLED = 9, // Deprecated + //KUDU_NOT_SUPPORTED_ON_OS = 10, // Deprecated MEM_ALLOC_FAILED = 11, BUFFER_ALLOCATION_FAILED = 12, MINIMUM_RESERVATION_UNAVAILABLE = 13, PUBLISH_TIMEOUT = 14, LABEL_ALREADY_EXISTS = 15, TOO_MANY_TASKS = 16, - ES_INTERNAL_ERROR = 17, - ES_INDEX_NOT_FOUND = 18, - ES_SHARD_NOT_FOUND = 19, - ES_INVALID_CONTEXTID = 20, - ES_INVALID_OFFSET = 21, - ES_REQUEST_ERROR = 22, + //ES_INTERNAL_ERROR = 17, + //ES_INDEX_NOT_FOUND = 18, + //ES_SHARD_NOT_FOUND = 19, + //ES_INVALID_CONTEXTID = 20, + //ES_INVALID_OFFSET = 21, + //ES_REQUEST_ERROR = 22, END_OF_FILE = 30, NOT_FOUND = 31, @@ -68,22 +69,23 @@ enum TStatusCode { ILLEGAL_STATE = 37, NOT_AUTHORIZED = 38, ABORTED = 39, - REMOTE_ERROR = 40, + //REMOTE_ERROR = 40, //SERVICE_UNAVAILABLE = 41, // Not used any more UNINITIALIZED = 42, - CONFIGURATION_ERROR = 43, + //CONFIGURATION_ERROR = 43, INCOMPLETE = 44, OLAP_ERR_VERSION_ALREADY_MERGED = 45, DATA_QUALITY_ERROR = 46, + INVALID_JSON_PATH = 47, - VEC_EXCEPTION = 50, - VEC_LOGIC_ERROR = 51, - VEC_ILLEGAL_DIVISION = 52, - VEC_BAD_CAST = 53, - VEC_CANNOT_ALLOCATE_MEMORY = 54, - VEC_CANNOT_MUNMAP = 55, - VEC_CANNOT_MREMAP = 56, - VEC_BAD_ARGUMENTS = 57, + //VEC_EXCEPTION = 50, + //VEC_LOGIC_ERROR = 51, + //VEC_ILLEGAL_DIVISION = 52, + //VEC_BAD_CAST = 53, + //VEC_CANNOT_ALLOCATE_MEMORY = 54, + //VEC_CANNOT_MUNMAP = 55, + //VEC_CANNOT_MREMAP = 56, + //VEC_BAD_ARGUMENTS = 57, // Binlog Related from 60 BINLOG_DISABLE = 60, @@ -101,6 +103,15 @@ enum TStatusCode { TABLET_MISSING = 72, NOT_MASTER = 73, + + OBTAIN_LOCK_FAILED = 74, + + SNAPSHOT_EXPIRED = 75, + + // used for cloud + DELETE_BITMAP_LOCK_ERROR = 100, + // Not be larger than 200, see status.h + // And all error code defined here, should also be defined in status.h } struct TStatus { diff --git a/pkg/rpc/thrift/Types.thrift b/pkg/rpc/thrift/Types.thrift index baca98b2..235c1cb2 100644 --- a/pkg/rpc/thrift/Types.thrift +++ b/pkg/rpc/thrift/Types.thrift @@ -94,7 +94,10 @@ enum TPrimitiveType { UNSUPPORTED, VARIANT, LAMBDA_FUNCTION, - AGG_STATE + AGG_STATE, + DECIMAL256, + IPV4, + IPV6 } enum TTypeNodeType { @@ -111,7 +114,17 @@ enum TStorageBackendType { HDFS, JFS, LOCAL, - OFS + OFS, + AZURE +} + +// Enumerates the storage formats for inverted indexes in src_backends. +// This enum is used to distinguish between different organizational methods +// of inverted index data, affecting how the index is stored and accessed. +enum TInvertedIndexFileStorageFormat { + DEFAULT, // Default format, unspecified storage method. + V1, // Index per idx: Each index is stored separately based on its identifier. + V2 // Segment id per idx: Indexes are organized based on segment identifiers, grouping indexes by their associated segment. } struct TScalarType { @@ -164,6 +177,7 @@ struct TTypeDesc { 4: optional list sub_types 5: optional bool result_is_nullable 6: optional string function_name + 7: optional i32 be_exec_version } enum TAggregationType { @@ -219,7 +233,13 @@ enum TTaskType { PUSH_COOLDOWN_CONF, PUSH_STORAGE_POLICY, ALTER_INVERTED_INDEX, - GC_BINLOG + GC_BINLOG, + CLEAN_TRASH, + UPDATE_VISIBLE_VERSION, + CLEAN_UDF_CACHE, + + // CLOUD + CALCULATE_DELETE_BITMAP = 1000 } enum TStmtType { @@ -375,6 +395,9 @@ struct TFunction { 11: optional i64 id 12: optional string checksum 13: optional bool vectorized = false + 14: optional bool is_udtf_function = false + 15: optional bool is_static_load = false + 16: optional i64 expiration_time //minutes } enum TJdbcOperation { @@ -395,7 +418,9 @@ enum TOdbcTableType { PRESTO, OCEANBASE, OCEANBASE_ORACLE, - NEBULA + NEBULA, // Deprecated + DB2, + GBASE } struct TJdbcExecutorCtorParams { @@ -421,6 +446,14 @@ struct TJdbcExecutorCtorParams { 8: optional string driver_path 9: optional TOdbcTableType table_type + + 10: optional i32 connection_pool_min_size + 11: optional i32 connection_pool_max_size + 12: optional i32 connection_pool_max_wait_time + 13: optional i32 connection_pool_max_life_time + 14: optional i32 connection_pool_cache_clear_time + 15: optional bool connection_pool_keep_alive + 16: optional i64 catalog_id } struct TJavaUdfExecutorCtorParams { @@ -600,6 +633,8 @@ enum TTableType { JDBC_TABLE, TEST_EXTERNAL_TABLE, MAX_COMPUTE_TABLE, + LAKESOUL_TABLE, + TRINO_CONNECTOR_TABLE } enum TKeysType { @@ -618,6 +653,9 @@ struct TBackend { 1: required string host 2: required TPort be_port 3: required TPort http_port + 4: optional TPort brpc_port + 5: optional bool is_alive + 6: optional i64 id } struct TReplicaInfo { @@ -680,6 +718,12 @@ enum TMergeType { DELETE } +enum TUniqueKeyUpdateMode { + UPSERT, + UPDATE_FIXED_COLUMNS, + UPDATE_FLEXIBLE_COLUMNS +} + enum TSortType { LEXICAL, ZORDER, @@ -688,10 +732,15 @@ enum TSortType { enum TMetadataType { ICEBERG, BACKENDS, - WORKLOAD_GROUPS, FRONTENDS, CATALOGS, FRONTENDS_DISKS, + MATERIALIZED_VIEWS, + JOBS, + TASKS, + WORKLOAD_SCHED_POLICY, + PARTITIONS, + PARTITION_VALUES; } enum TIcebergQueryType { diff --git a/pkg/service/http_service.go b/pkg/service/http_service.go index 4417d0c6..36bd3f63 100644 --- a/pkg/service/http_service.go +++ b/pkg/service/http_service.go @@ -3,9 +3,14 @@ package service import ( "context" "encoding/json" + "flag" "fmt" "net/http" + "reflect" + "strconv" + "strings" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/selectdb/ccr_syncer/pkg/ccr" "github.com/selectdb/ccr_syncer/pkg/ccr/base" "github.com/selectdb/ccr_syncer/pkg/storage" @@ -15,6 +20,39 @@ import ( log "github.com/sirupsen/logrus" ) +// TODO(Drogon): impl a generic http request handle parse json + +func writeJson(w http.ResponseWriter, data interface{}) { + // if exit in redirect, data == nil, do not write data + if data == nil || (reflect.ValueOf(data).Kind() == reflect.Ptr && reflect.ValueOf(data).IsNil()) { + return + } + + if data, err := json.Marshal(data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } else { + w.Write(data) + } +} + +type defaultResult struct { + Success bool `json:"success"` + ErrorMsg string `json:"error_msg,omitempty"` +} + +func newErrorResult(errMsg string) *defaultResult { + return &defaultResult{ + Success: false, + ErrorMsg: errMsg, + } +} + +func newSuccessResult() *defaultResult { + return &defaultResult{ + Success: true, + } +} + type HttpService struct { port int server *http.Server @@ -38,9 +76,12 @@ func NewHttpServer(host string, port int, db storage.DB, jobManager *ccr.JobMana type CreateCcrRequest struct { // must need all fields required - Name string `json:"name,required"` - Src base.Spec `json:"src,required"` - Dest base.Spec `json:"dest,required"` + Name string `json:"name,required"` + Src base.Spec `json:"src,required"` + Dest base.Spec `json:"dest,required"` + SkipError bool `json:"skip_error"` + // For table sync, allow to create ccr job even if the target table already exists. + AllowTableExists bool `json:"allow_table_exists"` } // Stringer @@ -56,35 +97,36 @@ func (s *HttpService) versionHandler(w http.ResponseWriter, r *http.Request) { log.Infof("get version") // Define the version result struct - type vesionResult struct { + type versionResult struct { Version string `json:"version"` } // Create the result object with the current version - result := vesionResult{Version: version.GetVersion()} - - // Write the result as JSON - if data, err := json.Marshal(&result); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else { - w.Write(data) - } + result := versionResult{Version: version.GetVersion()} + writeJson(w, result) } // createCcr creates a new CCR job and adds it to the job manager. // It takes a CreateCcrRequest as input and returns an error if there was a problem creating the job or adding it to the job manager. -func (s *HttpService) createCcr(request *CreateCcrRequest) error { +func createCcr(request *CreateCcrRequest, db storage.DB, jobManager *ccr.JobManager) error { log.Infof("create ccr %s", request) - // _job - ctx := ccr.NewJobContext(request.Src, request.Dest, s.db, s.jobManager.GetFactory()) + ctx := &ccr.JobContext{ + Context: context.Background(), + Src: request.Src, + Dest: request.Dest, + SkipError: request.SkipError, + AllowTableExists: request.AllowTableExists, + Db: db, + Factory: jobManager.GetFactory(), + } job, err := ccr.NewJobFromService(request.Name, ctx) if err != nil { return err } // add to job manager - err = s.jobManager.AddJob(job) + err = jobManager.AddJob(job) if err != nil { return err } @@ -92,43 +134,62 @@ func (s *HttpService) createCcr(request *CreateCcrRequest) error { return nil } -func (s *HttpService) isRedirected(jobName string, w http.ResponseWriter) (bool, error) { - belong, err := s.db.GetJobBelong(jobName) +// return exit(bool) +func (s *HttpService) redirect(jobName string, w http.ResponseWriter, r *http.Request) bool { + if jobExist, err := s.db.IsJobExist(jobName); err != nil { + log.Warnf("get job %s exist failed: %+v, uri is %s", jobName, err, r.RequestURI) + result := newErrorResult(err.Error()) + writeJson(w, result) + return true + } else if !jobExist { + log.Warnf("job %s not exist, uri is %s", jobName, r.RequestURI) + result := newErrorResult(fmt.Sprintf("job %s not exist", jobName)) + writeJson(w, result) + return true + } + + belongHost, err := s.db.GetJobBelong(jobName) if err != nil { - return false, err + log.Warnf("get job %s belong failed: %+v, uri is %s", jobName, err, r.RequestURI) + result := newErrorResult(err.Error()) + writeJson(w, result) + return true } - if belong != s.hostInfo { - w.Write([]byte(fmt.Sprintf("%s is located in syncer %s, please redirect to %s", jobName, belong, belong))) - return true, nil + if belongHost == s.hostInfo { + return false } - return false, nil + log.Infof("%s is located in syncer %s, please redirect to %s", jobName, belongHost, belongHost) + redirectUrl := fmt.Sprintf("http://%s", belongHost+r.RequestURI) + http.Redirect(w, r, redirectUrl, http.StatusSeeOther) + log.Infof("the redirect url is %s", redirectUrl) + return true } // HttpServer serving /create_ccr by json http rpc func (s *HttpService) createHandler(w http.ResponseWriter, r *http.Request) { log.Infof("create ccr") + var createResult *defaultResult + defer func() { writeJson(w, createResult) }() + // Parse the JSON request body var request CreateCcrRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { + log.Warnf("create ccr failed: %+v", err) http.Error(w, err.Error(), http.StatusBadRequest) return } // Call the createCcr function to create the CCR - err = s.createCcr(&request) - if err != nil { - log.Errorf("create ccr failed: %+v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return + if err = createCcr(&request, s.db, s.jobManager); err != nil { + log.Warnf("create ccr failed: %+v", err) + createResult = newErrorResult(err.Error()) + } else { + createResult = newSuccessResult() } - - // Write a success response - w.WriteHeader(http.StatusOK) - w.Write([]byte("create ccr success")) } type CcrCommonRequest struct { @@ -140,181 +201,289 @@ type CcrCommonRequest struct { func (s *HttpService) getLagHandler(w http.ResponseWriter, r *http.Request) { log.Infof("get lag") + type result struct { + *defaultResult + Lag int64 `json:"lag"` + } + var lagResult *result + defer func() { writeJson(w, lagResult) }() + // Parse the JSON request body var request CcrCommonRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + log.Warnf("get lag failed: %+v", err) + + lagResult = &result{ + defaultResult: newErrorResult(err.Error()), + } return } + if request.Name == "" { - http.Error(w, "name is empty", http.StatusBadRequest) + log.Warnf("get lag failed: name is empty") + + lagResult = &result{ + defaultResult: newErrorResult("name is empty"), + } return } - if isRedirected, err := s.isRedirected(request.Name, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else if isRedirected { + if exit := s.redirect(request.Name, w, r); exit { return } - lag, err := s.jobManager.GetLag(request.Name) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + if lag, err := s.jobManager.GetLag(request.Name); err != nil { + log.Warnf("get lag failed: %+v", err) + + lagResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + } else { + lagResult = &result{ + defaultResult: newSuccessResult(), + Lag: lag, + } } - w.Write([]byte(fmt.Sprintf("lag: %d", lag))) } // Pause service func (s *HttpService) pauseHandler(w http.ResponseWriter, r *http.Request) { log.Infof("pause job") + var pauseResult *defaultResult + defer func() { writeJson(w, pauseResult) }() + // Parse the JSON request body var request CcrCommonRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + log.Warnf("pause job failed: %+v", err) + + pauseResult = newErrorResult(err.Error()) return } + if request.Name == "" { - http.Error(w, "name is empty", http.StatusBadRequest) + log.Warnf("pause job failed: name is empty") + + pauseResult = newErrorResult("name is empty") return } - if isRedirected, err := s.isRedirected(request.Name, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else if isRedirected { + if s.redirect(request.Name, w, r) { return } - err = s.jobManager.Pause(request.Name) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + if err = s.jobManager.Pause(request.Name); err != nil { + log.Warnf("pause job failed: %+v", err) + + pauseResult = newErrorResult(err.Error()) return + } else { + pauseResult = newSuccessResult() } - w.Write([]byte("pause success")) } // Resume service func (s *HttpService) resumeHandler(w http.ResponseWriter, r *http.Request) { log.Infof("resume job") + var resumeResult *defaultResult + defer func() { writeJson(w, resumeResult) }() + // Parse the JSON request body var request CcrCommonRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + log.Warnf("resume job failed: %+v", err) + + resumeResult = newErrorResult(err.Error()) return } + if request.Name == "" { - http.Error(w, "name is empty", http.StatusBadRequest) + log.Warnf("resume job failed: name is empty") + + resumeResult = newErrorResult("name is empty") return } - if isRedirected, err := s.isRedirected(request.Name, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else if isRedirected { + if s.redirect(request.Name, w, r) { return } - err = s.jobManager.Resume(request.Name) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + if err = s.jobManager.Resume(request.Name); err != nil { + log.Warnf("resume job failed: %+v", err) + + resumeResult = newErrorResult(err.Error()) return + } else { + resumeResult = newSuccessResult() } - w.Write([]byte("resume success")) } func (s *HttpService) deleteHandler(w http.ResponseWriter, r *http.Request) { log.Infof("delete job") + var deleteResult *defaultResult + defer func() { writeJson(w, deleteResult) }() + // Parse the JSON request body var request CcrCommonRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + log.Warnf("delete job failed: %+v", err) + + deleteResult = newErrorResult(err.Error()) return } + if request.Name == "" { - http.Error(w, "name is empty", http.StatusBadRequest) + log.Warnf("delete job failed: name is empty") + + deleteResult = newErrorResult("name is empty") return } - if isRedirected, err := s.isRedirected(request.Name, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else if isRedirected { + if s.redirect(request.Name, w, r) { return } - err = s.jobManager.RemoveJob(request.Name) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + if err = s.jobManager.RemoveJob(request.Name); err != nil { + log.Warnf("delete job failed: %+v", err) + + deleteResult = newErrorResult(err.Error()) return + } else { + deleteResult = newSuccessResult() } - w.Write([]byte("delete success")) } func (s *HttpService) statusHandler(w http.ResponseWriter, r *http.Request) { log.Infof("get job status") + type result struct { + *defaultResult + JobStatus *ccr.JobStatus `json:"status,omitempty"` + } + var jobStatusResult *result + defer func() { writeJson(w, jobStatusResult) }() + // Parse the JSON request body var request CcrCommonRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + log.Warnf("get job status failed: %+v", err) + + jobStatusResult = &result{ + defaultResult: newErrorResult(err.Error()), + } return } + if request.Name == "" { - http.Error(w, "name is empty", http.StatusBadRequest) - return - } + log.Warnf("get job status failed: name is empty") - if isRedirected, err := s.isRedirected(request.Name, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else if isRedirected { + jobStatusResult = &result{ + defaultResult: newErrorResult("name is empty"), + } return } - jobStatus, err := s.jobManager.GetJobStatus(request.Name) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + if s.redirect(request.Name, w, r) { return } - // write jobStatus as json - if data, err := json.Marshal(jobStatus); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + + if jobStatus, err := s.jobManager.GetJobStatus(request.Name); err != nil { + log.Warnf("get job status failed: %+v", err) + + jobStatusResult = &result{ + defaultResult: newErrorResult(err.Error()), + } } else { - w.Write(data) + jobStatusResult = &result{ + defaultResult: newSuccessResult(), + JobStatus: jobStatus, + } } } func (s *HttpService) desyncHandler(w http.ResponseWriter, r *http.Request) { log.Infof("desync job") + var desyncResult *defaultResult + defer func() { writeJson(w, desyncResult) }() + // Parse the JSON request body var request CcrCommonRequest err := json.NewDecoder(r.Body).Decode(&request) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + log.Warnf("desync job failed: %+v", err) + + desyncResult = newErrorResult(err.Error()) return } + if request.Name == "" { - http.Error(w, "name is empty", http.StatusBadRequest) + log.Warnf("desync job failed: name is empty") + + desyncResult = newErrorResult("name is empty") return } - if isRedirected, err := s.isRedirected(request.Name, w); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else if isRedirected { + if s.redirect(request.Name, w, r) { return } if err := s.jobManager.Desync(request.Name); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + log.Warnf("desync job failed: %+v", err) + + desyncResult = newErrorResult(err.Error()) + } else { + desyncResult = newSuccessResult() + } +} + +type UpdateJobRequest struct { + Name string `json:"name,required"` + SkipError bool `json:"skip_error"` +} + +func (s *HttpService) updateJobHandler(w http.ResponseWriter, r *http.Request) { + log.Infof("update job") + + var updateJobResult *defaultResult + defer func() { writeJson(w, updateJobResult) }() + + // Parse the JSON request body + var request UpdateJobRequest + err := json.NewDecoder(r.Body).Decode(&request) + if err != nil { + log.Warnf("update job failed: %+v", err) + + updateJobResult = newErrorResult(err.Error()) return } - w.Write([]byte("desync success")) + + if request.Name == "" { + log.Warnf("update job failed: name is empty") + + updateJobResult = newErrorResult("name is empty") + return + } + + if s.redirect(request.Name, w, r) { + return + } + + if err := s.jobManager.UpdateJobSkipError(request.Name, request.SkipError); err != nil { + log.Warnf("desync job failed: %+v", err) + + updateJobResult = newErrorResult(err.Error()) + } else { + updateJobResult = newSuccessResult() + } } // ListJobs service @@ -322,17 +491,254 @@ func (s *HttpService) listJobsHandler(w http.ResponseWriter, r *http.Request) { log.Infof("list jobs") type result struct { - Jobs []*ccr.JobStatus `json:"jobs"` + *defaultResult + Jobs []string `json:"jobs,omitempty"` } - jobs := s.jobManager.ListJobs() - jobResult := result{Jobs: jobs} + var jobResult *result + defer func() { writeJson(w, jobResult) }() - // write jobs as json - if data, err := json.Marshal(&jobResult); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // use GetAllData to get all jobs + if ans, err := s.db.GetAllData(); err != nil { + log.Warnf("when list jobs, get all data failed: %+v", err) + + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } } else { - w.Write(data) + var jobData []string + jobData = ans["jobs"] + allJobs := make([]string, 0) + for _, eachJob := range jobData { + allJobs = append(allJobs, strings.Trim(strings.Split(eachJob, ",")[0], " ")) + } + + jobResult = &result{ + defaultResult: newSuccessResult(), + Jobs: allJobs, + } + } +} + +// get job progress +func (s *HttpService) jobProgressHandler(w http.ResponseWriter, r *http.Request) { + log.Infof("get job progress") + + type result struct { + *defaultResult + JobProgress ccr.JobProgress `json:"job_progress"` + } + + var jobResult *result + defer func() { writeJson(w, jobResult) }() + + // Parse the JSON request body + var request CcrCommonRequest + err := json.NewDecoder(r.Body).Decode(&request) + if err != nil { + log.Warnf("get job progress failed: %+v", err) + + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + return + } + + if request.Name == "" { + log.Warnf("get job progress failed: name is empty") + + jobResult = &result{ + defaultResult: newErrorResult("name is empty"), + } + return + } + + if s.redirect(request.Name, w, r) { + return + } + + if jobProgressData, err := s.db.GetProgress(request.Name); err != nil { + log.Warnf("get job progress failed: %+v", err) + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + } else { + var jobProgress ccr.JobProgress + err := json.Unmarshal([]byte(jobProgressData), &jobProgress) + if err != nil { + log.Warnf("unmarshal get job progress error") + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + return + } + jobProgress.PersistData = "" + jobResult = &result{ + defaultResult: newSuccessResult(), + JobProgress: jobProgress, + } + } + +} + +// get job details +func (s *HttpService) jobDetailHandler(w http.ResponseWriter, r *http.Request) { + log.Infof("get job detail") + + type result struct { + *defaultResult + JobDetail *ccr.Job `json:"job_detail"` + } + + var jobResult *result + defer func() { writeJson(w, jobResult) }() + + // Parse the JSON request body + var request CcrCommonRequest + err := json.NewDecoder(r.Body).Decode(&request) + if err != nil { + log.Warnf("get job detail failed: %+v", err) + + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + return + } + + if request.Name == "" { + log.Warnf("get job detail failed: name is empty") + + jobResult = &result{ + defaultResult: newErrorResult("name is empty"), + } + return + } + + if s.redirect(request.Name, w, r) { + return + } + + var jobDetail ccr.Job + if jobDetailStr, err := s.db.GetJobInfo(request.Name); err != nil { + log.Warnf("get job info failed: %+v", err) + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + } else if err = json.Unmarshal([]byte(jobDetailStr), &jobDetail); err != nil { + log.Warnf("unmarshal job info failed: %+v", err) + jobResult = &result{ + defaultResult: newErrorResult(err.Error()), + } + } else { + jobResult = &result{ + defaultResult: newSuccessResult(), + JobDetail: &jobDetail, + } + } +} + +func (s *HttpService) forceFullsyncHandler(w http.ResponseWriter, r *http.Request) { + log.Infof("force job fullsync") + + var result *defaultResult + defer func() { writeJson(w, result) }() + + // Parse the JSON request body + var request CcrCommonRequest + err := json.NewDecoder(r.Body).Decode(&request) + if err != nil { + log.Warnf("force job fullsync failed: %+v", err) + result = newErrorResult(err.Error()) + return + } + + if request.Name == "" { + log.Warnf("force job fullsync: name is empty") + result = newErrorResult("job name is empty") + return + } + + if s.redirect(request.Name, w, r) { + return + } + + if err := s.jobManager.ForceFullsync(request.Name); err != nil { + log.Warnf("force fullsync failed: %+v", err) + result = newErrorResult(err.Error()) + } else { + result = newSuccessResult() + } +} + +func (s *HttpService) featuresHandler(w http.ResponseWriter, r *http.Request) { + type flagValue struct { + Feature string `json:"feature"` + Value bool `json:"value"` + DefValue string `json:"default"` + } + type flagListResult struct { + *defaultResult + Flags []flagValue `json:"flags"` + } + + var result flagListResult + result.defaultResult = newSuccessResult() + defer func() { writeJson(w, &result) }() + + flag.VisitAll(func(flag *flag.Flag) { + if !strings.HasPrefix(flag.Name, "feature") { + return + } + + valueStr := flag.Value.String() + value, err := strconv.ParseBool(valueStr) + if err != nil { + // ignore any non-bool flags + return + } + + result.Flags = append(result.Flags, flagValue{ + Feature: flag.Name, Value: value, DefValue: flag.DefValue, + }) + }) +} + +func (s *HttpService) updateHostMappingHandler(w http.ResponseWriter, r *http.Request) { + log.Infof("update host mapping") + + var result *defaultResult + defer func() { writeJson(w, result) }() + + // Parse the JSON request body + var request struct { + CcrCommonRequest + SrcHostMapping map[string]string `json:"src_host_mapping,required"` + DestHostMapping map[string]string `json:"dest_host_mapping,required"` + } + err := json.NewDecoder(r.Body).Decode(&request) + if err != nil { + log.Warnf("update host mapping failed: %+v", err) + result = newErrorResult(err.Error()) + return + } + + if request.Name == "" { + log.Warnf("update host mapping failed: name is empty") + result = newErrorResult("name is empty") + return + } + + if len(request.SrcHostMapping) == 0 && len(request.DestHostMapping) == 0 { + log.Warnf("update host mapping failed: src/dest_host_mapping is empty") + result = newErrorResult("host_mapping is empty") + return + } + + if err := s.jobManager.UpdateHostMapping(request.Name, request.SrcHostMapping, request.DestHostMapping); err != nil { + log.Warnf("update host mapping failed: %+v", err) + result = newErrorResult(err.Error()) + } else { + result = newSuccessResult() } } @@ -345,7 +751,14 @@ func (s *HttpService) RegisterHandlers() { s.mux.HandleFunc("/delete", s.deleteHandler) s.mux.HandleFunc("/job_status", s.statusHandler) s.mux.HandleFunc("/desync", s.desyncHandler) + s.mux.HandleFunc("/update_job", s.updateJobHandler) s.mux.HandleFunc("/list_jobs", s.listJobsHandler) + s.mux.HandleFunc("/job_detail", s.jobDetailHandler) + s.mux.HandleFunc("/job_progress", s.jobProgressHandler) + s.mux.HandleFunc("/force_fullsync", s.forceFullsyncHandler) + s.mux.HandleFunc("/features", s.featuresHandler) + s.mux.HandleFunc("/update_host_mapping", s.updateHostMappingHandler) + s.mux.Handle("/metrics", promhttp.Handler()) } func (s *HttpService) Start() error { @@ -366,6 +779,8 @@ func (s *HttpService) Start() error { } } +// Stop stops the HTTP server gracefully. +// It returns an error if the server shutdown fails. func (s *HttpService) Stop() error { if err := s.server.Shutdown(context.TODO()); err != nil { return xerror.Wrapf(err, xerror.Normal, "http server close failed") diff --git a/pkg/storage/db.go b/pkg/storage/db.go index adbcd88a..668e0e98 100644 --- a/pkg/storage/db.go +++ b/pkg/storage/db.go @@ -8,8 +8,8 @@ var ( ) const ( - InvalidCheckTimestamp int64 = -1 - remoteDBName string = "ccr" + InvalidCheckTimestamp int64 = -1 + remoteDBName string = "ccr" ) type DB interface { diff --git a/pkg/storage/mysql.go b/pkg/storage/mysql.go index 6d2f7553..4e15644c 100644 --- a/pkg/storage/mysql.go +++ b/pkg/storage/mysql.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "encoding/base64" + "flag" "fmt" "time" @@ -11,12 +12,23 @@ import ( "github.com/selectdb/ccr_syncer/pkg/xerror" ) +const ( + defaultMaxAllowedPacket = 1024 * 1024 * 1024 +) + +var maxAllowedPacket int64 + +func init() { + flag.Int64Var(&maxAllowedPacket, "mysql_max_allowed_packet", defaultMaxAllowedPacket, + "Config the max allowed packet to send to mysql server, the upper limit is 1GB") +} + type MysqlDB struct { db *sql.DB } func NewMysqlDB(host string, port int, user string, password string) (DB, error) { - dbForDDL, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port)) + dbForDDL, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/?maxAllowedPacket=%d", user, password, host, port, maxAllowedPacket)) if err != nil { return nil, xerror.Wrapf(err, xerror.DB, "mysql: open %s@tcp(%s:%s) failed", user, host, password) } @@ -26,7 +38,7 @@ func NewMysqlDB(host string, port int, user string, password string) (DB, error) } dbForDDL.Close() - db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", user, password, host, port, remoteDBName)) + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?maxAllowedPacket=%d", user, password, host, port, remoteDBName, maxAllowedPacket)) if err != nil { return nil, xerror.Wrapf(err, xerror.DB, "mysql: open mysql in db %s@tcp(%s:%d)/%s failed", user, host, port, remoteDBName) } @@ -210,7 +222,6 @@ func (s *MysqlDB) RefreshSyncer(hostInfo string, lastStamp int64) (int64, error) func (s *MysqlDB) GetStampAndJobs(hostInfo string) (int64, []string, error) { txn, err := s.db.BeginTx(context.Background(), &sql.TxOptions{ Isolation: sql.LevelRepeatableRead, - ReadOnly: true, }) if err != nil { return -1, nil, xerror.Wrapf(err, xerror.DB, "mysql: begin IMMEDIATE transaction failed.") diff --git a/pkg/storage/postgresql.go b/pkg/storage/postgresql.go new file mode 100644 index 00000000..8efbbb3d --- /dev/null +++ b/pkg/storage/postgresql.go @@ -0,0 +1,404 @@ +package storage + +import ( + "context" + "database/sql" + "encoding/base64" + "fmt" + "time" + + _ "github.com/lib/pq" + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +type PostgresqlDB struct { + db *sql.DB +} + +func NewPostgresqlDB(host string, port int, user string, password string) (DB, error) { + url := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=disable", user, password, host, port, "postgres") + db, err := sql.Open("postgres", url) + if err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: open %s:%d failed", host, port) + } + + if _, err := db.Exec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", remoteDBName)); err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: create schema %s failed", remoteDBName) + } + + if _, err = db.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.jobs (job_name VARCHAR(512) PRIMARY KEY, job_info TEXT, belong_to VARCHAR(96))", remoteDBName)); err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: create table jobs failed") + } + + if _, err = db.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.progresses (job_name VARCHAR(512) PRIMARY KEY, progress TEXT)", remoteDBName)); err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: create table progresses failed") + } + + if _, err = db.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.syncers (host_info VARCHAR(96) PRIMARY KEY, timestamp BIGINT)", remoteDBName)); err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: create table syncers failed") + } + + return &PostgresqlDB{db: db}, nil +} + +func (s *PostgresqlDB) AddJob(jobName string, jobInfo string, hostInfo string) error { + // check job name exists, if exists, return error + var count int + if err := s.db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s.jobs WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&count); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: query job name %s failed", jobName) + } + + if count > 0 { + return ErrJobExists + } + + // insert job info + insertSql := fmt.Sprintf("INSERT INTO %s.jobs (job_name, job_info, belong_to) VALUES ('%s', '%s', '%s')", remoteDBName, jobName, jobInfo, hostInfo) + if _, err := s.db.Exec(insertSql); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: insert job name %s failed", jobName) + } else { + return nil + } +} + +// Update Job +func (s *PostgresqlDB) UpdateJob(jobName string, jobInfo string) error { + // check job name exists, if not exists, return error + var count int + if err := s.db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s.jobs WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&count); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: query job name %s failed", jobName) + } + + if count == 0 { + return ErrJobNotExists + } + + // update job info + if _, err := s.db.Exec(fmt.Sprintf("UPDATE %s.jobs SET job_info = '%s' WHERE job_name = '%s'", remoteDBName, jobInfo, jobName)); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: update job name %s failed", jobName) + } else { + return nil + } +} + +func (s *PostgresqlDB) RemoveJob(jobName string) error { + txn, err := s.db.BeginTx(context.Background(), &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: false, + }) + if err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: remove job begin transaction failed, name: %s", jobName) + } + + if _, err := txn.Exec(fmt.Sprintf("DELETE FROM %s.jobs WHERE job_name = '%s'", remoteDBName, jobName)); err != nil { + if err := txn.Rollback(); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: remove job failed, name: %s, and rollback failed too", jobName) + } + return xerror.Wrapf(err, xerror.DB, "postgresql: remove job failed, name: %s", jobName) + } + + if _, err := txn.Exec(fmt.Sprintf("DELETE FROM %s.progresses WHERE job_name = '%s'", remoteDBName, jobName)); err != nil { + if err := txn.Rollback(); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: remove progresses failed, name: %s, and rollback failed too", jobName) + } + return xerror.Wrapf(err, xerror.DB, "postgresql: remove progresses failed, name: %s", jobName) + } + + if err := txn.Commit(); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: remove job txn commit failed.") + } + + return nil +} + +func (s *PostgresqlDB) IsJobExist(jobName string) (bool, error) { + var count int + if err := s.db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s.jobs WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&count); err != nil { + return false, xerror.Wrapf(err, xerror.DB, "postgresql: query job name %s failed", jobName) + } else { + return count > 0, nil + } +} + +func (s *PostgresqlDB) GetJobInfo(jobName string) (string, error) { + var jobInfo string + if err := s.db.QueryRow(fmt.Sprintf("SELECT job_info FROM %s.jobs WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&jobInfo); err != nil { + return "", xerror.Wrapf(err, xerror.DB, "postgresql: get job failed, name: %s", jobName) + } + return jobInfo, nil +} + +func (s *PostgresqlDB) GetJobBelong(jobName string) (string, error) { + var belong string + if err := s.db.QueryRow(fmt.Sprintf("SELECT belong_to FROM %s.jobs WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&belong); err != nil { + return "", xerror.Wrapf(err, xerror.DB, "postgresql: get job belong failed, name: %s", jobName) + } + return belong, nil +} + +func (s *PostgresqlDB) UpdateProgress(jobName string, progress string) error { + // quoteProgress := strings.ReplaceAll(progress, "\"", "\\\"") + encodeProgress := base64.StdEncoding.EncodeToString([]byte(progress)) + updateSql := fmt.Sprintf("INSERT INTO %s.progresses (job_name, progress) VALUES ('%s', '%s') ON CONFLICT (job_name) DO UPDATE SET progress = EXCLUDED.progress", remoteDBName, jobName, encodeProgress) + if result, err := s.db.Exec(updateSql); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: update progress failed") + } else if rowNum, err := result.RowsAffected(); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: update progress get affected rows failed") + } else if rowNum != 1 { + return xerror.Wrapf(err, xerror.DB, "postgresql: update progress affected rows error, rows: %d", rowNum) + } + + return nil +} + +func (s *PostgresqlDB) IsProgressExist(jobName string) (bool, error) { + var count int + if err := s.db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s.progresses WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&count); err != nil { + return false, xerror.Wrapf(err, xerror.DB, "postgresql: query job name %s failed", jobName) + } + + return count > 0, nil +} + +func (s *PostgresqlDB) GetProgress(jobName string) (string, error) { + var progress string + if err := s.db.QueryRow(fmt.Sprintf("SELECT progress FROM %s.progresses WHERE job_name = '%s'", remoteDBName, jobName)).Scan(&progress); err != nil { + return "", xerror.Wrapf(err, xerror.DB, "postgresql: query progress failed") + } + decodeProgress, err := base64.StdEncoding.DecodeString(progress) + if err != nil { + return "", xerror.Errorf(xerror.DB, "postgresql: base64 decode error") + } + + return string(decodeProgress), nil +} + +func (s *PostgresqlDB) AddSyncer(hostInfo string) error { + timestamp := time.Now().UnixNano() + addSql := fmt.Sprintf("INSERT INTO %s.syncers (host_info, timestamp) VALUES ('%s', %d) ON CONFLICT (host_info) DO UPDATE SET timestamp = EXCLUDED.timestamp", remoteDBName, hostInfo, timestamp) + if result, err := s.db.Exec(addSql); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: add syncer failed") + } else if rowNum, err := result.RowsAffected(); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: add syncer get affected rows failed") + } else if rowNum != 1 { + return xerror.Wrapf(err, xerror.DB, "postgresql: add syncer affected rows error, rows: %d", rowNum) + } + + return nil +} + +func (s *PostgresqlDB) RefreshSyncer(hostInfo string, lastStamp int64) (int64, error) { + nowTime := time.Now().UnixNano() + refreshSql := fmt.Sprintf("UPDATE %s.syncers SET timestamp = %d WHERE host_info = '%s' AND timestamp = %d", remoteDBName, nowTime, hostInfo, lastStamp) + result, err := s.db.Exec(refreshSql) + if err != nil { + return -1, xerror.Wrapf(err, xerror.DB, "postgresql: refresh syncer failed.") + } + + if rowNum, err := result.RowsAffected(); err != nil { + return -1, xerror.Wrapf(err, xerror.DB, "postgresql: get RowsAffected failed.") + } else if rowNum != 1 { + return -1, nil + } else { + return nowTime, nil + } +} + +func (s *PostgresqlDB) GetStampAndJobs(hostInfo string) (int64, []string, error) { + txn, err := s.db.BeginTx(context.Background(), &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + if err != nil { + return -1, nil, xerror.Wrapf(err, xerror.DB, "postgresql: begin IMMEDIATE transaction failed.") + } + + var timestamp int64 + if err := txn.QueryRow(fmt.Sprintf("SELECT timestamp FROM %s.syncers WHERE host_info = '%s'", remoteDBName, hostInfo)).Scan(×tamp); err != nil { + return -1, nil, xerror.Wrapf(err, xerror.DB, "postgresql: get stamp failed.") + } + + jobs := make([]string, 0) + rows, err := s.db.Query(fmt.Sprintf("SELECT job_name FROM %s.jobs WHERE belong_to = '%s'", remoteDBName, hostInfo)) + if err != nil { + return -1, nil, xerror.Wrapf(err, xerror.DB, "postgresql: get job_nums failed.") + } + defer rows.Close() + + for rows.Next() { + var jobName string + if err := rows.Scan(&jobName); err != nil { + return -1, nil, xerror.Wrapf(err, xerror.DB, "postgresql: scan job_name failed.") + } + jobs = append(jobs, jobName) + } + + if err := txn.Commit(); err != nil { + return -1, nil, xerror.Wrapf(err, xerror.DB, "postgresql: get jobs & stamp txn commit failed.") + } + + return timestamp, jobs, nil +} + +func (s *PostgresqlDB) GetDeadSyncers(expiredTime int64) ([]string, error) { + row, err := s.db.Query(fmt.Sprintf("SELECT host_info FROM %s.syncers WHERE timestamp < %d", remoteDBName, expiredTime)) + if err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: get orphan job info failed.") + } + defer row.Close() + deadSyncers := make([]string, 0) + for row.Next() { + var hostInfo string + if err := row.Scan(&hostInfo); err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: scan host_info and jobs failed") + } + deadSyncers = append(deadSyncers, hostInfo) + } + + return deadSyncers, nil +} + +func (s *PostgresqlDB) getOrphanJobs(txn *sql.Tx, syncers []string) ([]string, error) { + orphanJobs := make([]string, 0) + for _, deadSyncer := range syncers { + rows, err := txn.Query(fmt.Sprintf("SELECT job_name FROM %s.jobs WHERE belong_to = '%s'", remoteDBName, deadSyncer)) + if err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: get orphan jobs failed.") + } + + for rows.Next() { + var jobName string + if err := rows.Scan(&jobName); err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: scan orphan job name failed.") + } + orphanJobs = append(orphanJobs, jobName) + } + rows.Close() + + if _, err := txn.Exec(fmt.Sprintf("DELETE FROM %s.syncers WHERE host_info = '%s'", remoteDBName, deadSyncer)); err != nil { + return nil, xerror.Wrapf(err, xerror.DB, "postgresql: delete dead syncer failed, name: %s", deadSyncer) + } + } + + return orphanJobs, nil +} + +func (s *PostgresqlDB) getLoadInfo(txn *sql.Tx) (LoadSlice, int, error) { + load := make(LoadSlice, 0) + sumLoad := 0 + host_rows, err := txn.Query(fmt.Sprintf("SELECT host_info FROM %s.syncers", remoteDBName)) + if err != nil { + return nil, -1, xerror.Wrapf(err, xerror.DB, "postgresql: get all syncers failed.") + } + for host_rows.Next() { + loadInfo := LoadInfo{AddedLoad: 0} + if err := host_rows.Scan(&loadInfo.HostInfo); err != nil { + return nil, -1, xerror.Wrapf(err, xerror.DB, "postgresql: scan load info failed.") + } + load = append(load, loadInfo) + } + host_rows.Close() + + for i := range load { + if err := txn.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s.jobs WHERE belong_to = '%s'", remoteDBName, load[i].HostInfo)).Scan(&load[i].NowLoad); err != nil { + return nil, -1, xerror.Wrapf(err, xerror.DB, "postgresql: get syncer %s load failed.", load[i].HostInfo) + } + sumLoad += load[i].NowLoad + } + + return load, sumLoad, nil +} + +func (s *PostgresqlDB) dispatchJobs(txn *sql.Tx, hostInfo string, additionalJobs []string) error { + for _, jobName := range additionalJobs { + if _, err := txn.Exec(fmt.Sprintf("UPDATE %s.jobs SET belong_to = '%s' WHERE job_name = '%s'", remoteDBName, hostInfo, jobName)); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: update job belong_to failed, name: %s", jobName) + } + } + if _, err := txn.Exec(fmt.Sprintf("UPDATE %s.syncers SET timestamp = %d WHERE host_info = '%s'", remoteDBName, time.Now().UnixNano(), hostInfo)); err != nil { + return xerror.Wrapf(err, xerror.DB, "postgresql: update syncer timestamp failed, host: %s", hostInfo) + } + + return nil +} + +func (s *PostgresqlDB) RebalanceLoadFromDeadSyncers(syncers []string) error { + txn, err := s.db.BeginTx(context.Background(), &sql.TxOptions{ + Isolation: sql.LevelSerializable, + ReadOnly: false, + }) + if err != nil { + return xerror.Wrap(err, xerror.DB, "postgresql: rebalance load begin txn failed") + } + + orphanJobs, err := s.getOrphanJobs(txn, syncers) + if err != nil { + return err + } + + additionalLoad := len(orphanJobs) + loadList, currentLoad, err := s.getLoadInfo(txn) + if err != nil { + return err + } + + loadList, err = RebalanceLoad(additionalLoad, currentLoad, loadList) + if err != nil { + return err + } + for i := range loadList { + beginIdx := additionalLoad - loadList[i].AddedLoad + if err := s.dispatchJobs(txn, loadList[i].HostInfo, orphanJobs[beginIdx:additionalLoad]); err != nil { + if err := txn.Rollback(); err != nil { + return xerror.Wrap(err, xerror.DB, "postgresql: rebalance rollback failed.") + } + return err + } + additionalLoad = beginIdx + } + + if err := txn.Commit(); err != nil { + return xerror.Wrap(err, xerror.DB, "postgresql: rebalance txn commit failed.") + } + + return nil +} + +func (s *PostgresqlDB) GetAllData() (map[string][]string, error) { + ans := make(map[string][]string) + + jobRows, err := s.db.Query(fmt.Sprintf("SELECT job_name, belong_to FROM %s.jobs", remoteDBName)) + if err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: get jobs data failed.") + } + jobData := make([]string, 0) + for jobRows.Next() { + var jobName string + var belongTo string + if err := jobRows.Scan(&jobName, &belongTo); err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: scan jobs row failed.") + } + jobData = append(jobData, fmt.Sprintf("%s, %s", jobName, belongTo)) + } + ans["jobs"] = jobData + jobRows.Close() + + syncerRows, err := s.db.Query(fmt.Sprintf("SELECT * FROM %s.syncers", remoteDBName)) + if err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: get jobs data failed.") + } + + syncerData := make([]string, 0) + for syncerRows.Next() { + var hostInfo string + var timestamp int64 + if err := syncerRows.Scan(&hostInfo, ×tamp); err != nil { + return nil, xerror.Wrap(err, xerror.DB, "postgresql: scan syncers row failed.") + } + syncerData = append(syncerData, fmt.Sprintf("%s, %d", hostInfo, timestamp)) + } + ans["syncers"] = syncerData + syncerRows.Close() + + return ans, nil +} diff --git a/pkg/utils/array.go b/pkg/utils/array.go new file mode 100644 index 00000000..b3b194d4 --- /dev/null +++ b/pkg/utils/array.go @@ -0,0 +1,8 @@ +package utils + +func FirstOr[T any](array []T, def T) T { + if len(array) == 0 { + return def + } + return array[0] +} diff --git a/pkg/utils/gzip.go b/pkg/utils/gzip.go new file mode 100644 index 00000000..18d439b4 --- /dev/null +++ b/pkg/utils/gzip.go @@ -0,0 +1,31 @@ +package utils + +import ( + "bytes" + "compress/gzip" + "io" +) + +func GZIPDecompress(data []byte) ([]byte, error) { + buf := bytes.NewReader(data) + reader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + defer reader.Close() + + return io.ReadAll(reader) +} + +func GZIPCompress(data []byte) ([]byte, error) { + var buf bytes.Buffer + writer := gzip.NewWriter(&buf) + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/pkg/utils/log.go b/pkg/utils/log.go index 35b039b9..8949f774 100644 --- a/pkg/utils/log.go +++ b/pkg/utils/log.go @@ -33,18 +33,17 @@ func InitLog() { log.SetLevel(level) log.SetFormatter(&prefixed.TextFormatter{ FullTimestamp: true, - TimestampFormat: "2006-01-02 15:04:05", + TimestampFormat: "2006-01-02 15:04:05.000", ForceFormatting: true, }) syncHook := NewHook() log.AddHook(syncHook) - if level > log.InfoLevel { - // log.SetReportCaller(true), caller by filename - filenameHook := filename.NewHook() - filenameHook.Field = "line" - log.AddHook(filenameHook) - } + + // log.SetReportCaller(true), caller by filename + filenameHook := filename.NewHook() + filenameHook.Field = "line" + log.AddHook(filenameHook) if logFilename == "" { log.SetOutput(os.Stdout) @@ -54,7 +53,7 @@ func InitLog() { // TODO: Add write permission check output := &lumberjack.Logger{ Filename: logFilename, - MaxSize: 100, + MaxSize: 1024, // 1GB MaxAge: 7, MaxBackups: 30, LocalTime: true, diff --git a/pkg/utils/map.go b/pkg/utils/map.go new file mode 100644 index 00000000..6eb87142 --- /dev/null +++ b/pkg/utils/map.go @@ -0,0 +1,23 @@ +package utils + +// CopyMap returns a new map with the same key-value pairs as the input map. +// The input map must have keys and values of comparable types. +// but key and value is not deep copy +func CopyMap[K, V comparable](m map[K]V) map[K]V { + result := make(map[K]V) + for k, v := range m { + result[k] = v + } + return result +} + +// MergeMap returns a new map with all key-value pairs from both input maps. +func MergeMap[K comparable, V any](m1, m2 map[K]V) map[K]V { + if m1 == nil { + m1 = make(map[K]V, len(m2)) + } + for k, v := range m2 { + m1[k] = v + } + return m1 +} diff --git a/pkg/utils/map_test.go b/pkg/utils/map_test.go new file mode 100644 index 00000000..518c7ebd --- /dev/null +++ b/pkg/utils/map_test.go @@ -0,0 +1,33 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCopyMap(t *testing.T) { + // Test with string keys and int values + m1 := map[string]int{"a": 1, "b": 2, "c": 3} + m2 := CopyMap(m1) + assert.Equal(t, m1, m2) + // update + m1["c"] = 4 + assert.NotEqual(t, m1, m2) + + // Test with int keys and string values + m3 := map[int]string{1: "a", 2: "b", 3: "c"} + m4 := CopyMap(m3) + assert.Equal(t, m3, m4) + // update + m3[3] = "d" + assert.NotEqual(t, m3, m4) + + // Test with float keys and bool values + m5 := map[float64]bool{1.1: true, 2.2: false, 3.3: true} + m6 := CopyMap(m5) + assert.Equal(t, m5, m6) + // update + m5[3.3] = false + assert.NotEqual(t, m5, m6) +} diff --git a/pkg/utils/sql.go b/pkg/utils/sql.go index b612413a..6d3c4983 100644 --- a/pkg/utils/sql.go +++ b/pkg/utils/sql.go @@ -3,6 +3,7 @@ package utils import ( "database/sql" "strconv" + "strings" "github.com/selectdb/ccr_syncer/pkg/xerror" ) @@ -83,3 +84,14 @@ func (r *RowParser) GetString(columnName string) (string, error) { return string(*resBytes), nil } + +func FormatKeywordName(name string) string { + return "`" + strings.TrimSpace(name) + "`" +} + +func EscapeStringValue(value string) string { + escaped := strings.ReplaceAll(value, "\\", "\\\\") + escaped = strings.ReplaceAll(escaped, "\"", "\\\"") + escaped = strings.ReplaceAll(escaped, "'", "\\'") + return escaped +} diff --git a/pkg/utils/thrift_wrapper.go b/pkg/utils/thrift_wrapper.go index 613d16fe..6fadd42a 100644 --- a/pkg/utils/thrift_wrapper.go +++ b/pkg/utils/thrift_wrapper.go @@ -7,7 +7,7 @@ import ( ) type WrapperType interface { - ~int64 | ~string + ~int64 | ~string | ~bool } func ThriftValueWrapper[T WrapperType](value T) *T { @@ -17,7 +17,7 @@ func ThriftValueWrapper[T WrapperType](value T) *T { func ThriftToJsonStr(obj thrift.TStruct) (string, error) { transport := thrift.NewTMemoryBuffer() protocol := thrift.NewTJSONProtocolFactory().GetProtocol(transport) - ts := &thrift.TSerializer{transport, protocol} + ts := &thrift.TSerializer{Transport: transport, Protocol: protocol} if jsonBytes, err := ts.Write(context.Background(), obj); err != nil { return "", nil } else { diff --git a/pkg/version/version.go b/pkg/version/version.go index 00118546..4f5b36db 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -1,9 +1,7 @@ package version -var ( - // Git SHA Value will be set during build - GitTagSha = "Git tag sha: Not provided, use Makefile to build" -) +// Git SHA Value will be set during build +var GitTagSha = "Git tag sha: Not provided, use Makefile to build" func GetVersion() string { return GitTagSha diff --git a/pkg/xerror/stack.go b/pkg/xerror/stack.go new file mode 100644 index 00000000..43f2e083 --- /dev/null +++ b/pkg/xerror/stack.go @@ -0,0 +1,179 @@ +// copy from github.com/pkg/errors/stack.go + +package xerror + +import ( + "fmt" + "io" + "path" + "runtime" + "strconv" + "strings" +) + +// Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + io.WriteString(s, strconv.Itoa(f.line())) + case 'n': + io.WriteString(s, funcname(f.name())) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + io.WriteString(s, "\n") + f.Format(s, verb) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + st.formatSlice(s, verb) + } + case 's': + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) + } + io.WriteString(s, "]") +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers(skipStackLevel int) *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(skipStackLevel, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} diff --git a/pkg/xerror/withMessage.go b/pkg/xerror/withMessage.go new file mode 100644 index 00000000..5cdc180e --- /dev/null +++ b/pkg/xerror/withMessage.go @@ -0,0 +1,33 @@ +// copy from github.com/pkg/errors/errors.go + +package xerror + +import ( + "fmt" + "io" +) + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} diff --git a/pkg/xerror/withstack.go b/pkg/xerror/withstack.go new file mode 100644 index 00000000..5aa2bf88 --- /dev/null +++ b/pkg/xerror/withstack.go @@ -0,0 +1,34 @@ +// copy from github.com/pkg/errors/errors.go + +package xerror + +import ( + "fmt" + "io" +) + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} diff --git a/pkg/xerror/xerror.go b/pkg/xerror/xerror.go index 541e705a..95781d18 100644 --- a/pkg/xerror/xerror.go +++ b/pkg/xerror/xerror.go @@ -3,8 +3,6 @@ package xerror import ( stderrors "errors" "fmt" - - "github.com/pkg/errors" ) // type ErrorCategory int @@ -19,7 +17,7 @@ var ( DB = newErrorCategory("db") FE = newErrorCategory("fe") BE = newErrorCategory("be") - Meta = newErrorCategory("meta") + Meta = newErrorCategory("meta") // The error is related to meta, so a new snapshot will be created. ) type xErrorCategory struct { @@ -48,7 +46,7 @@ func (e errType) String() string { case xrecoverable: return "Recoverable" case xpanic: - return "panic" + return "Panic" default: panic("unknown error level") } @@ -67,11 +65,18 @@ func (e *XError) Category() ErrorCategory { return e.category } +// return the innerest xerror, unwrap stack && xerror func (e *XError) Error() string { + if err, ok := e.err.(*withStack); ok { + return err.error.Error() + } + + // If the error is an XError, recursively call Error() on the inner error if xerr, ok := e.err.(*XError); ok { return xerr.Error() } + // Otherwise, format the error message with the category name and error message return fmt.Sprintf("[%s] %s", e.category.Name(), e.err.Error()) } @@ -87,49 +92,51 @@ func (e *XError) IsPanic() bool { return e.errType == xpanic } -func New(errCategory ErrorCategory, message string) error { +func NewWithoutStack(errCategory ErrorCategory, message string) *XError { err := &XError{ category: errCategory, errType: xrecoverable, err: stderrors.New(message), } - return errors.WithStack(err) + return err +} + +func New(errCategory ErrorCategory, message string) error { + err := NewWithoutStack(errCategory, message) + return newWithStack(err) } -func XNew(errCategory ErrorCategory, message string) *XError { +func PanicWithoutStack(errCategory ErrorCategory, message string) error { err := &XError{ category: errCategory, - errType: xrecoverable, + errType: xpanic, err: stderrors.New(message), } return err } func Panic(errCategory ErrorCategory, message string) error { - err := &XError{ - category: errCategory, - errType: xpanic, - err: stderrors.New(message), - } - return errors.WithStack(err) + err := PanicWithoutStack(errCategory, message) + return newWithStack(err) } -func Errorf(errCategory ErrorCategory, format string, args ...interface{}) error { +func errorf(errCategory ErrorCategory, errtype errType, format string, args ...interface{}) *XError { err := &XError{ category: errCategory, - errType: xrecoverable, + errType: errtype, err: fmt.Errorf(format, args...), } - return errors.WithStack(err) + return err +} + +func Errorf(errCategory ErrorCategory, format string, args ...interface{}) error { + err := errorf(errCategory, xrecoverable, format, args...) + return newWithStack(err) } func Panicf(errCategory ErrorCategory, format string, args ...interface{}) error { - err := &XError{ - category: errCategory, - errType: xpanic, - err: fmt.Errorf(format, args...), - } - return errors.WithStack(err) + err := errorf(errCategory, xpanic, format, args...) + return newWithStack(err) } func wrap(err error, errCategory ErrorCategory, errLevel errType, message string) error { @@ -142,7 +149,14 @@ func wrap(err error, errCategory ErrorCategory, errLevel errType, message string errType: errLevel, err: err, } - return errors.Wrap(err, message) + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(4), + } } func Wrap(err error, errCategory ErrorCategory, message string) error { @@ -163,7 +177,15 @@ func wrapf(err error, errCategory ErrorCategory, errLevel errType, format string errType: errLevel, err: err, } - return errors.Wrapf(err, format, args...) + + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(4), + } } func Wrapf(err error, errCategory ErrorCategory, format string, args ...interface{}) error { @@ -178,6 +200,21 @@ func PanicWrapf(err error, errCategory ErrorCategory, format string, args ...int return wrapf(err, errCategory, xpanic, format, args...) } +func XPanicWrapf(xerr *XError, format string, args ...interface{}) error { + return wrapf(xerr, xerr.category, xpanic, format, args...) +} + +func newWithStack(err error) error { + if err == nil { + return nil + } + + return &withStack{ + err, + callers(4), + } +} + func WithStack(err error) error { if err == nil { return nil @@ -189,5 +226,20 @@ func WithStack(err error) error { err: err, } - return errors.WithStack(err) + return &withStack{ + err, + callers(4), + } +} + +func IsCategory(err error, category ErrorCategory) bool { + if err == nil { + return false + } + + if xerr, ok := err.(*XError); ok { + return xerr.category == category + } + + return false } diff --git a/pkg/xerror/xerror_test.go b/pkg/xerror/xerror_test.go index 4b67052d..2770f08f 100644 --- a/pkg/xerror/xerror_test.go +++ b/pkg/xerror/xerror_test.go @@ -2,6 +2,7 @@ package xerror import ( "errors" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -19,44 +20,67 @@ func TestXCategory(t *testing.T) { assert.Equal(t, Meta.Name(), "meta") } +func TestXError_Error(t *testing.T) { + errMsg := "test error" + err := Errorf(Normal, errMsg) + assert.NotNil(t, err) + + var xerr *XError + assert.True(t, errors.As(err, &xerr)) + assert.Equal(t, xerr.Error(), fmt.Sprintf("[%s] %s", Normal.Name(), errMsg)) + + err = Wrap(err, DB, "wrapped error") + // t.Logf("err: %+v", err) + assert.NotNil(t, err) + + assert.True(t, errors.As(err, &xerr)) + assert.Equal(t, xerr.Error(), fmt.Sprintf("[%s] %s", Normal.Name(), errMsg)) +} + // UnitTest for XError func TestErrorf(t *testing.T) { - err := Errorf(Normal, "test error") + errMsg := "test error" + err := Errorf(Normal, errMsg) assert.NotNil(t, err) // t.Logf("err: %+v", err) var xerr *XError assert.True(t, errors.As(err, &xerr)) + assert.True(t, xerr.IsRecoverable()) assert.Equal(t, xerr.Category(), Normal) - assert.Equal(t, xerr.err.Error(), "test error") + assert.Equal(t, xerr.err.Error(), errMsg) } func TestWrap(t *testing.T) { - err := errors.New("db open error") + errMsg := "db open error" + err := errors.New(errMsg) wrappedErr := Wrap(err, DB, "wrapped error") assert.NotNil(t, wrappedErr) // t.Logf("wrappedErr: %+v", wrappedErr) var xerr *XError assert.True(t, errors.As(wrappedErr, &xerr)) + assert.True(t, xerr.IsRecoverable()) assert.Equal(t, xerr.Category(), DB) - assert.Equal(t, xerr.err.Error(), "db open error") + assert.Equal(t, xerr.err.Error(), errMsg) } func TestWrapf(t *testing.T) { - err := errors.New("fe test error") + errMsg := "fe test error" + err := errors.New(errMsg) wrappedErr := Wrapf(err, FE, "wrapped error: %s", "foo") assert.NotNil(t, wrappedErr) // t.Logf("wrappedErr: %+v", wrappedErr) var xerr *XError assert.True(t, errors.As(wrappedErr, &xerr)) + assert.True(t, xerr.IsRecoverable()) assert.Equal(t, xerr.Category(), FE) - assert.Equal(t, xerr.err.Error(), "fe test error") + assert.Equal(t, xerr.err.Error(), errMsg) } func TestIs(t *testing.T) { - errBackendNotFound := XNew(Meta, "backend not found") + errBackendNotFound := NewWithoutStack(Meta, "backend not found") wrappedErr := XWrapf(errBackendNotFound, "backend id: %d", 33415) assert.NotNil(t, wrappedErr) // t.Logf("wrappedErr: %+v", wrappedErr) @@ -65,7 +89,34 @@ func TestIs(t *testing.T) { var xerr *XError assert.True(t, errors.As(wrappedErr, &xerr)) + assert.True(t, xerr.IsRecoverable()) assert.Equal(t, xerr.Category(), Meta) // t.Logf("xerr: %s", xerr.Error()) assert.Equal(t, errBackendNotFound.Error(), errBackendNotFound.Error()) } + +func TestPanic(t *testing.T) { + errMsg := "test panic" + err := Panic(Normal, errMsg) + // t.Logf("err: %+v", err) + assert.NotNil(t, err) + + var xerr *XError + assert.True(t, errors.As(err, &xerr)) + assert.True(t, xerr.IsPanic()) + assert.Equal(t, xerr.Category(), Normal) + assert.Equal(t, xerr.err.Error(), errMsg) +} + +func TestPanicf(t *testing.T) { + errMsg := "test panicf" + err := Panicf(Normal, errMsg) + // t.Logf("err: %+v", err) + assert.NotNil(t, err) + + var xerr *XError + assert.True(t, errors.As(err, &xerr)) + assert.True(t, xerr.IsPanic()) + assert.Equal(t, xerr.Category(), Normal) + assert.Equal(t, xerr.err.Error(), errMsg) +} diff --git a/pkg/xmetrics/tags.go b/pkg/xmetrics/tags.go new file mode 100644 index 00000000..251db73c --- /dev/null +++ b/pkg/xmetrics/tags.go @@ -0,0 +1,95 @@ +package xmetrics + +import "github.com/selectdb/ccr_syncer/pkg/xerror" + +type IMetricsTag interface { + Tag() []string +} + +type metricsTag struct { + tags []string +} + +// dashboard metrics +type dashboardMetrics struct { + metricsTag +} + +func DashboardMetrics() *dashboardMetrics { + return &dashboardMetrics{ + metricsTag: metricsTag{[]string{"dashboard"}}, + } +} + +func (d *dashboardMetrics) Tag() []string { + return d.tags +} + +func (d *dashboardMetrics) JobNum() IMetricsTag { + d.tags = append(d.tags, "jobNum") + return d +} + +func (d *dashboardMetrics) BinlogNum() IMetricsTag { + d.tags = append(d.tags, "binlogNum") + return d +} + +// job metrics +type jobMetrics struct { + metricsTag + name string +} + +func JobMetrics(jobName string) *jobMetrics { + return &jobMetrics{ + metricsTag: metricsTag{[]string{"job"}}, + name: jobName, + } +} + +func (j *jobMetrics) Tag() []string { + j.tags = append(j.tags, j.name) + return j.tags +} + +func (j *jobMetrics) PrevCommitSeq() IMetricsTag { + j.tags = append(j.tags, "prevCommitSeq") + return j +} + +func (j *jobMetrics) HandlingCommitSeq() IMetricsTag { + j.tags = append(j.tags, "handlingCommitSeq") + return j +} + +func (j *jobMetrics) HandledBinlogNum() IMetricsTag { + j.tags = append(j.tags, "handledBinlogNum") + return j +} + +// error metrics +type errorMetrics struct { + metricsTag +} + +func ErrorMetrics(err *xerror.XError) IMetricsTag { + errMetrics := &errorMetrics{ + metricsTag: metricsTag{[]string{"error", err.Category().Name()}}, + } + + // use switch instead of ifelse maybe + if err.IsRecoverable() { + errMetrics.tags = append(errMetrics.tags, "recoverable") + } else if err.IsPanic() { + errMetrics.tags = append(errMetrics.tags, "panic") + } else { + errMetrics.tags = append(errMetrics.tags, "unknown") + } + + return errMetrics +} + +func (e *errorMetrics) Tag() []string { + return e.tags +} diff --git a/pkg/xmetrics/xmetrics.go b/pkg/xmetrics/xmetrics.go new file mode 100644 index 00000000..87c5247e --- /dev/null +++ b/pkg/xmetrics/xmetrics.go @@ -0,0 +1,46 @@ +package xmetrics + +import ( + "github.com/hashicorp/go-metrics" + "github.com/hashicorp/go-metrics/prometheus" + "github.com/selectdb/ccr_syncer/pkg/xerror" +) + +func InitGlobal(serviceName string) error { + sink, err := prometheus.NewPrometheusSink() + if err != nil { + return xerror.Wrap(err, xerror.Normal, "init prometheus sink falied") + } + + if _, err := metrics.NewGlobal(metrics.DefaultConfig(serviceName), sink); err != nil { + return xerror.Wrap(err, xerror.Normal, "new global metrics falied") + } + + return nil +} + +func AddError(err *xerror.XError) { + metrics.IncrCounter(ErrorMetrics(err).Tag(), 1) +} + +func AddNewJob(jobName string) { + metrics.SetGauge(JobMetrics(jobName).HandlingCommitSeq().Tag(), -1) + + metrics.IncrCounter(DashboardMetrics().JobNum().Tag(), 1) +} + +func HandlingBinlog(jobName string, commitSeq int64) { + metrics.SetGauge(JobMetrics(jobName).HandlingCommitSeq().Tag(), float32(commitSeq)) +} + +func Rollback(jobName string, commitSeq int64) { + metrics.SetGauge(JobMetrics(jobName).HandlingCommitSeq().Tag(), float32(commitSeq)) + metrics.SetGauge(JobMetrics(jobName).PrevCommitSeq().Tag(), float32(commitSeq)) +} + +func ConsumeBinlog(jobName string, commitSeq int64) { + metrics.SetGauge(JobMetrics(jobName).PrevCommitSeq().Tag(), float32(commitSeq)) + metrics.IncrCounter(JobMetrics(jobName).HandledBinlogNum().Tag(), 1) + + metrics.IncrCounter(DashboardMetrics().BinlogNum().Tag(), 1) +} diff --git a/regression-test/common/helper.groovy b/regression-test/common/helper.groovy new file mode 100644 index 00000000..c8deb392 --- /dev/null +++ b/regression-test/common/helper.groovy @@ -0,0 +1,500 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import com.google.common.collect.Maps + +import java.util.Map +import java.util.List + +class Describe { + String index + String field + String type + Boolean is_key + + Describe(String index, String field, String type, Boolean is_key) { + this.index = index + this.field = field + this.type = type + this.is_key = is_key + } + + String toString() { + return "index: ${index}, field: ${field}, type: ${type}, is_key: ${is_key}" + } +} + +class Helper { + def suite + def context + def logger + String alias = null + + // the configurations about ccr syncer. + def sync_gap_time = 5000 + def syncerAddress = "127.0.0.1:9190" + + Helper(suite) { + this.suite = suite + this.context = suite.context + this.logger = suite.logger + } + + void set_alias(String alias) { + this.alias = alias + } + + String randomSuffix() { + def hashCode = UUID.randomUUID().toString().replace("-", "").hashCode() + if (hashCode < 0) { + hashCode *= -1; + } + return Integer.toString(hashCode) + } + + def get_backup_label_prefix(String table = "") { + return "ccrs_" + get_ccr_job_name(table) + } + + def get_ccr_job_name(String table = "") { + def name = context.suiteName + if (!table.equals("")) { + name = name + "_" + table + } + return name + } + + def get_ccr_body(String table, String db = null) { + if (db == null) { + db = context.dbName + } + + def gson = new com.google.gson.Gson() + + Map srcSpec = context.getSrcSpec(db) + srcSpec.put("table", table) + + Map destSpec = context.getDestSpec(db) + if (alias != null) { + destSpec.put("table", alias) + } else { + destSpec.put("table", table) + } + + Map body = Maps.newHashMap() + String name = context.suiteName + if (!table.equals("")) { + name = name + "_" + table + } + body.put("name", name) + body.put("src", srcSpec) + body.put("dest", destSpec) + + return gson.toJson(body) + } + + void ccrJobDelete(table = "") { + def bodyJson = get_ccr_body "${table}" + suite.httpTest { + uri "/delete" + endpoint syncerAddress + body "${bodyJson}" + op "post" + } + } + + void ccrJobCreate(table = "") { + def bodyJson = get_ccr_body "${table}" + suite.httpTest { + uri "/create_ccr" + endpoint syncerAddress + body "${bodyJson}" + op "post" + check { code, body -> + if (!"${code}".toString().equals("200")) { + throw new Exception("request failed, code: ${code}, body: ${body}") + } + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${body}" + if (!object.success) { + throw new Exception("request failed, error msg: ${object.error_msg}") + } + } + } + } + + void ccrJobCreateAllowTableExists(table = "") { + def bodyJson = get_ccr_body "${table}" + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${bodyJson}" + object['allow_table_exists'] = true + logger.info("json object ${object}") + + bodyJson = new groovy.json.JsonBuilder(object).toString() + suite.httpTest { + uri "/create_ccr" + endpoint syncerAddress + body "${bodyJson}" + op "post" + } + } + + void ccrJobPause(table = "") { + def bodyJson = get_ccr_body "${table}" + suite.httpTest { + uri "/pause" + endpoint syncerAddress + body "${bodyJson}" + op "post" + } + } + + void ccrJobResume(table = "") { + def bodyJson = get_ccr_body "${table}" + suite.httpTest { + uri "/resume" + endpoint syncerAddress + body "${bodyJson}" + op "post" + } + } + + void ccrJobDesync(table = "") { + def bodyJson = get_ccr_body "${table}" + suite.httpTest { + uri "/desync" + endpoint syncerAddress + body "${bodyJson}" + op "post" + } + } + + void enableDbBinlog() { + suite.sql """ + ALTER DATABASE ${context.dbName} SET properties ("binlog.enable" = "true") + """ + } + + void disableDbBinlog() { + suite.sql """ + ALTER DATABASE ${context.dbName} SET properties ("binlog.enable" = "false") + """ + } + + Boolean checkShowTimesOf(sqlString, myClosure, times, func = "sql") { + Boolean ret = false + List> res + while (times > 0) { + try { + if (func == "sql") { + res = suite.sql "${sqlString}" + } else { + res = suite.target_sql "${sqlString}" + } + if (myClosure.call(res)) { + ret = true + } + } catch (Exception e) {} + + if (ret) { + break + } else if (--times > 0) { + sleep(sync_gap_time) + } + } + + if (!ret) { + logger.info("last select result: ${res}") + } + + return ret + } + + // wait until all restore tasks of the dest cluster are finished. + Boolean checkRestoreFinishTimesOf(checkTable, times) { + Boolean ret = false + while (times > 0) { + def sqlInfo = suite.target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + for (List row : sqlInfo) { + if ((row[10] as String).contains(checkTable)) { + logger.info("SHOW RESTORE result: ${row}") + ret = (row[4] as String) == "FINISHED" + } + } + + if (ret) { + break + } else if (--times > 0) { + sleep(sync_gap_time) + } + } + + return ret + } + + // Check N times whether the num of rows of the downstream data is expected. + Boolean checkSelectTimesOf(sqlString, rowSize, times) { + def tmpRes = suite.target_sql "${sqlString}" + while (tmpRes.size() != rowSize) { + sleep(sync_gap_time) + if (--times > 0) { + tmpRes = suite.target_sql "${sqlString}" + } else { + logger.info("last select result: ${tmpRes}") + logger.info("expected row size: ${rowSize}, actual row size: ${tmpRes.size()}") + break + } + } + return tmpRes.size() == rowSize + } + + Boolean checkSelectColTimesOf(sqlString, colSize, times) { + def tmpRes = suite.target_sql "${sqlString}" + while (tmpRes.size() == 0 || tmpRes[0].size() != colSize) { + sleep(sync_gap_time) + if (--times > 0) { + tmpRes = suite.target_sql "${sqlString}" + } else { + break + } + } + return tmpRes.size() > 0 && tmpRes[0].size() == colSize + } + + Boolean checkData(data, beginCol, value) { + if (data.size() < beginCol + value.size()) { + return false + } + + for (int i = 0; i < value.size(); ++i) { + if ((data[beginCol + i]) as int != value[i]) { + return false + } + } + + return true + } + + Integer getRestoreRowSize(checkTable) { + def result = suite.target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + def size = 0 + for (List row : result) { + if ((row[10] as String).contains(checkTable)) { + size += 1 + } + } + + return size + } + + Boolean checkRestoreNumAndFinishedTimesOf(checkTable, expectedRestoreRows, times) { + while (times > 0) { + def restore_size = getRestoreRowSize(checkTable) + if (restore_size >= expectedRestoreRows) { + return checkRestoreFinishTimesOf(checkTable, times) + } + if (--times > 0) { + sleep(sync_gap_time) + } + } + + return false + } + + void force_fullsync(tableName = "") { + def bodyJson = get_ccr_body "${tableName}" + suite.httpTest { + uri "/force_fullsync" + endpoint syncerAddress + body "${bodyJson}" + op "post" + } + } + + Object get_job_progress(tableName = "") { + def request_body = get_ccr_body(tableName) + def get_job_progress_uri = { check_func -> + suite.httpTest { + uri "/job_progress" + endpoint syncerAddress + body request_body + op "post" + check check_func + } + } + + def result = null + get_job_progress_uri.call() { code, body -> + if (!"${code}".toString().equals("200")) { + throw "request failed, code: ${code}, body: ${body}" + } + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${body}" + if (!object.success) { + throw "request failed, error msg: ${object.error_msg}" + } + logger.info("job progress: ${object.job_progress}") + result = object.job_progress + } + return result + } + + // test whether the ccr syncer has set a feature flag? + Boolean has_feature(name) { + def features_uri = { check_func -> + suite.httpTest { + uri "/features" + endpoint syncerAddress + body "" + op "get" + check check_func + } + } + + def result = null + features_uri.call() { code, body -> + if (!"${code}".toString().equals("200")) { + throw "request failed, code: ${code}, body: ${body}" + } + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${body}" + if (!object.success) { + throw "request failed, error msg: ${object.error_msg}" + } + logger.info("features: ${object.flags}") + result = object.flags + } + + for (def flag in result) { + if (flag.feature == name && flag.value) { + return true + } + } + return false + } + + String upstream_version() { + def version_variables = suite.sql_return_maparray "show variables like 'version_comment'" + return version_variables[0].Value + } + + Boolean is_version_supported(versions) { + def version_variables = suite.sql_return_maparray "show variables like 'version_comment'" + def matcher = version_variables[0].Value =~ /doris-(\d+\.\d+\.\d+)/ + if (matcher.find()) { + def parts = matcher.group(1).tokenize('.') + def major = parts[0].toLong() + def minor = parts[1].toLong() + def patch = parts[2].toLong() + def version = String.format("%d%02d%02d", major, minor, patch).toLong() + for (long expect : versions) { + logger.info("current version ${version}, expect version ${expect}") + def expect_version_set = expect / 100 + def got_version_set = version / 100 + if (expect_version_set == got_version_set && version < expect) { + return false + } + } + } + return true + } + + Map> get_table_describe(String table, String source = "sql") { + def res + if (source == "sql") { + res = suite.sql_return_maparray "DESC ${table} ALL" + } else { + res = suite.target_sql_return_maparray "DESC ${table} ALL" + } + + def map = Maps.newHashMap() + def index = "" + for (def row : res) { + if (row.IndexName != "") { + index = row.IndexName + } + if (row.Field == "") { + continue + } + + if (!map.containsKey(index)) { + map.put(index, []) + } + def is_key = false + if (row.Key == "true" || row.Key == "YES") { + is_key = true + } + map.get(index).add(new Describe(index, row.Field, row.Type, is_key)) + } + return map + } + + Boolean check_describes(Map> expect, Map> actual) { + if (actual.size() != expect.size()) { + return false + } + + for (def key : expect.keySet()) { + if (!actual.containsKey(key)) { + return false + } + def expect_list = expect.get(key) + def actual_list = actual.get(key) + if (expect_list.size() != actual_list.size()) { + return false + } + for (int i = 0; i < expect_list.size(); ++i) { + if (expect_list[i].toString() != actual_list[i].toString()) { + return false + } + } + } + return true + } + + Boolean check_table_describe_times(String table, times = 30) { + while (times > 0) { + def upstream_describe = get_table_describe(table) + def downstream_describe = get_table_describe(table, "target") + if (check_describes(upstream_describe, downstream_describe)) { + return true + } + sleep(sync_gap_time) + times-- + } + + def upstream_describe = get_table_describe(table) + def downstream_describe = get_table_describe(table, "target") + logger.info("upstream describe: ${upstream_describe}") + logger.info("downstream describe: ${downstream_describe}") + return false + } + + Boolean check_table_exists(String table, times = 30) { + while (times > 0) { + def res = suite.target_sql "SHOW TABLES LIKE '${table}'" + if (res.size() > 0) { + return true + } + sleep(sync_gap_time) + times-- + } + return false + } +} + +new Helper(suite) diff --git a/regression-test/data/ccr_user_sync/test_common_sync.out b/regression-test/data/ccr_user_sync/test_common_sync.out deleted file mode 100644 index c33d3b5c..00000000 --- a/regression-test/data/ccr_user_sync/test_common_sync.out +++ /dev/null @@ -1 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this diff --git a/regression-test/data/db_sync/partition/drop_1/test_ds_part_drop_1.out b/regression-test/data/db_sync/partition/drop_1/test_ds_part_drop_1.out new file mode 100644 index 00000000..f9ba0eef --- /dev/null +++ b/regression-test/data/db_sync/partition/drop_1/test_ds_part_drop_1.out @@ -0,0 +1,28 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +2020-01-10 3 103 15 225.0 +2020-01-20 4 104 30 450.0 + +-- !target_sql -- +2020-01-10 3 103 15 225.0 +2020-01-20 4 104 30 450.0 + +-- !sql -- + +-- !target_sql -- +2020-01-10 3 103 15 225.0 +2020-01-20 4 104 30 450.0 + +-- !sql -- + +-- !target_sql -- +2020-01-10 3 103 15 225.0 +2020-01-20 4 104 30 450.0 + +-- !sql -- +2020-02-20 5 105 50 550.0 + +-- !target_sql -- +2020-01-10 3 103 15 225.0 +2020-01-20 4 104 30 450.0 + diff --git a/regression-test/data/db_sync/partition/recover/test_ds_part_recover.out b/regression-test/data/db_sync/partition/recover/test_ds_part_recover.out new file mode 100644 index 00000000..44d17364 --- /dev/null +++ b/regression-test/data/db_sync/partition/recover/test_ds_part_recover.out @@ -0,0 +1,17 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + +-- !sql_source_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + diff --git a/regression-test/data/db_sync/partition/recover1/test_ds_part_recover_new.out b/regression-test/data/db_sync/partition/recover1/test_ds_part_recover_new.out new file mode 100644 index 00000000..44d17364 --- /dev/null +++ b/regression-test/data/db_sync/partition/recover1/test_ds_part_recover_new.out @@ -0,0 +1,17 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + +-- !sql_source_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + diff --git a/regression-test/data/db_sync/restore/test_db_sync_table_restore.out b/regression-test/data/db_sync/restore/test_db_sync_table_restore.out new file mode 100644 index 00000000..96968907 --- /dev/null +++ b/regression-test/data/db_sync/restore/test_db_sync_table_restore.out @@ -0,0 +1,25 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + diff --git a/regression-test/data/db_sync/restore_1/test_db_sync_table_restore1.out b/regression-test/data/db_sync/restore_1/test_db_sync_table_restore1.out new file mode 100644 index 00000000..d8a9524d --- /dev/null +++ b/regression-test/data/db_sync/restore_1/test_db_sync_table_restore1.out @@ -0,0 +1,37 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_source_content_backup -- +0 0 +0 1 +0 2 + +-- !target_sql_content_backup -- +0 0 +0 1 +0 2 + +-- !sql_source_content_new -- +0 0 +0 1 +0 2 +9 0 +9 1 +9 2 + +-- !target_sql_content_new -- +0 0 +0 1 +0 2 +9 0 +9 1 +9 2 + +-- !sql_source_content_restore -- +0 0 +0 1 +0 2 + +-- !target_sql_content_restore -- +0 0 +0 1 +0 2 + diff --git a/regression-test/data/db_sync/restore_multi/test_db_sync_table_restore_multi.out b/regression-test/data/db_sync/restore_multi/test_db_sync_table_restore_multi.out new file mode 100644 index 00000000..52201d4f --- /dev/null +++ b/regression-test/data/db_sync/restore_multi/test_db_sync_table_restore_multi.out @@ -0,0 +1,49 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + diff --git a/regression-test/data/db_sync/table/recover/test_ds_tbl_drop_recover.out b/regression-test/data/db_sync/table/recover/test_ds_tbl_drop_recover.out new file mode 100644 index 00000000..28abc0ba --- /dev/null +++ b/regression-test/data/db_sync/table/recover/test_ds_tbl_drop_recover.out @@ -0,0 +1,39 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 + +-- !sql_source_content -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 + +-- !target_sql_content_2 -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 +3 0 +3 1 +3 2 + +-- !sql_source_content_2 -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 +3 0 +3 1 +3 2 + diff --git a/regression-test/data/db_sync/table/recover1/test_ds_tbl_drop_recover_new.out b/regression-test/data/db_sync/table/recover1/test_ds_tbl_drop_recover_new.out new file mode 100644 index 00000000..28abc0ba --- /dev/null +++ b/regression-test/data/db_sync/table/recover1/test_ds_tbl_drop_recover_new.out @@ -0,0 +1,39 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 + +-- !sql_source_content -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 + +-- !target_sql_content_2 -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 +3 0 +3 1 +3 2 + +-- !sql_source_content_2 -- +0 0 +0 1 +0 2 +2 0 +2 1 +2 2 +3 0 +3 1 +3 2 + diff --git a/regression-test/data/db_sync/table/recover2/test_ds_tbl_drop_recover2.out b/regression-test/data/db_sync/table/recover2/test_ds_tbl_drop_recover2.out new file mode 100644 index 00000000..75dd0ac9 --- /dev/null +++ b/regression-test/data/db_sync/table/recover2/test_ds_tbl_drop_recover2.out @@ -0,0 +1,77 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content_2 -- +0 0 +0 1 +0 2 +10 0 +10 1 +10 2 +11 0 +11 1 +11 2 +12 0 +12 1 +12 2 +13 0 +13 1 +13 2 +14 0 +14 1 +14 2 +15 0 +15 1 +15 2 +16 0 +16 1 +16 2 +17 0 +17 1 +17 2 +18 0 +18 1 +18 2 +19 0 +19 1 +19 2 +20 0 +20 1 +20 2 + +-- !sql_source_content_2 -- +0 0 +0 1 +0 2 +10 0 +10 1 +10 2 +11 0 +11 1 +11 2 +12 0 +12 1 +12 2 +13 0 +13 1 +13 2 +14 0 +14 1 +14 2 +15 0 +15 1 +15 2 +16 0 +16 1 +16 2 +17 0 +17 1 +17 2 +18 0 +18 1 +18 2 +19 0 +19 1 +19 2 +20 0 +20 1 +20 2 + diff --git a/regression-test/data/db_sync/table/recover3/test_ds_tbl_drop_recover3.out b/regression-test/data/db_sync/table/recover3/test_ds_tbl_drop_recover3.out new file mode 100644 index 00000000..75dd0ac9 --- /dev/null +++ b/regression-test/data/db_sync/table/recover3/test_ds_tbl_drop_recover3.out @@ -0,0 +1,77 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content_2 -- +0 0 +0 1 +0 2 +10 0 +10 1 +10 2 +11 0 +11 1 +11 2 +12 0 +12 1 +12 2 +13 0 +13 1 +13 2 +14 0 +14 1 +14 2 +15 0 +15 1 +15 2 +16 0 +16 1 +16 2 +17 0 +17 1 +17 2 +18 0 +18 1 +18 2 +19 0 +19 1 +19 2 +20 0 +20 1 +20 2 + +-- !sql_source_content_2 -- +0 0 +0 1 +0 2 +10 0 +10 1 +10 2 +11 0 +11 1 +11 2 +12 0 +12 1 +12 2 +13 0 +13 1 +13 2 +14 0 +14 1 +14 2 +15 0 +15 1 +15 2 +16 0 +16 1 +16 2 +17 0 +17 1 +17 2 +18 0 +18 1 +18 2 +19 0 +19 1 +19 2 +20 0 +20 1 +20 2 + diff --git a/regression-test/data/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.out b/regression-test/data/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.out new file mode 100644 index 00000000..4750a7fe --- /dev/null +++ b/regression-test/data/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.out @@ -0,0 +1,43 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 0 +1 1 +1 2 +1 3 +1 4 + +-- !target_sql -- +1 0 +1 1 +1 2 +1 3 +1 4 + +-- !sql -- +2 0 +2 1 +2 2 +2 3 +2 4 + +-- !target_sql -- +2 0 +2 1 +2 2 +2 3 +2 4 + +-- !sql -- +3 0 +3 1 +3 2 +3 3 +3 4 + +-- !target_sql -- +3 0 +3 1 +3 2 +3 3 +3 4 + diff --git a/regression-test/data/table_sync/partition/recover/test_tbl_part_recover.out b/regression-test/data/table_sync/partition/recover/test_tbl_part_recover.out new file mode 100644 index 00000000..44d17364 --- /dev/null +++ b/regression-test/data/table_sync/partition/recover/test_tbl_part_recover.out @@ -0,0 +1,17 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + +-- !sql_source_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + diff --git a/regression-test/data/table_sync/partition/recover1/test_tbl_part_recover_new.out b/regression-test/data/table_sync/partition/recover1/test_tbl_part_recover_new.out new file mode 100644 index 00000000..44d17364 --- /dev/null +++ b/regression-test/data/table_sync/partition/recover1/test_tbl_part_recover_new.out @@ -0,0 +1,17 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !target_sql_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + +-- !sql_source_content -- +3 0 +3 1 +3 2 +5 0 +5 1 +5 2 + diff --git a/regression-test/data/table_sync/restore/test_tbl_restore.out b/regression-test/data/table_sync/restore/test_tbl_restore.out new file mode 100644 index 00000000..ae5e1722 --- /dev/null +++ b/regression-test/data/table_sync/restore/test_tbl_restore.out @@ -0,0 +1,69 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 +1 0 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +1 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 +1 0 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +1 9 + +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + diff --git a/regression-test/data/table_sync/restore_multi/test_tbl_restore_multi.out b/regression-test/data/table_sync/restore_multi/test_tbl_restore_multi.out new file mode 100644 index 00000000..6bba4fb5 --- /dev/null +++ b/regression-test/data/table_sync/restore_multi/test_tbl_restore_multi.out @@ -0,0 +1,113 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 +1 0 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +1 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 +1 0 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +1 9 + +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 +1 0 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +1 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 +1 0 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 +1 7 +1 8 +1 9 + +-- !sql_source_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + +-- !target_sql_content -- +0 0 +0 1 +0 2 +0 3 +0 4 +0 5 +0 6 +0 7 +0 8 +0 9 + diff --git a/regression-test/data/table_sync/table/res_inverted_idx/test_ts_tbl_res_inverted_idx.out b/regression-test/data/table_sync/table/res_inverted_idx/test_ts_tbl_res_inverted_idx.out new file mode 100644 index 00000000..7f3f2b8f --- /dev/null +++ b/regression-test/data/table_sync/table/res_inverted_idx/test_ts_tbl_res_inverted_idx.out @@ -0,0 +1,275 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 andy andy love apple 100 +1 bason bason hate pear 100 +2 andy andy love apple 100 +2 bason bason hate pear 98 +3 andy andy love apple 100 +3 bason bason hate pear 99 +4 andy andy love apple 100 +4 bason bason hate pear 99 + +-- !sql -- +1 andy andy love apple 100 +2 andy andy love apple 100 +3 andy andy love apple 100 +4 andy andy love apple 100 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 bason bason hate pear 99 + +-- !sql -- +2 bason bason hate pear 98 + +-- !target_sql -- +1 andy andy love apple 100 +1 bason bason hate pear 100 +2 andy andy love apple 100 +2 bason bason hate pear 98 +3 andy andy love apple 100 +3 bason bason hate pear 99 +4 andy andy love apple 100 +4 bason bason hate pear 99 + +-- !target_sql -- +1 andy andy love apple 100 +2 andy andy love apple 100 +3 andy andy love apple 100 +4 andy andy love apple 100 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 bason bason hate pear 99 + +-- !target_sql -- +2 bason bason hate pear 98 + +-- !sql -- +1 andy andy love apple 100 +1 bason bason hate pear 100 +2 andy andy love apple 100 +2 bason bason hate pear 98 +3 andy andy love apple 100 +3 bason bason hate pear 99 +4 andy andy love apple 100 +4 bason bason hate pear 99 +5 andy andy love apple 100 +5 bason bason hate pear 99 +6 andy andy love apple 98 +6 bason bason hate pear 99 + +-- !sql -- +1 andy andy love apple 100 +2 andy andy love apple 100 +3 andy andy love apple 100 +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 bason bason hate pear 99 +5 bason bason hate pear 99 +6 bason bason hate pear 99 + +-- !sql -- +2 bason bason hate pear 98 +6 andy andy love apple 98 + +-- !target_sql -- +1 andy andy love apple 100 +1 bason bason hate pear 100 +2 andy andy love apple 100 +2 bason bason hate pear 98 +3 andy andy love apple 100 +3 bason bason hate pear 99 +4 andy andy love apple 100 +4 bason bason hate pear 99 +5 andy andy love apple 100 +5 bason bason hate pear 99 +6 andy andy love apple 98 +6 bason bason hate pear 99 + +-- !target_sql -- +1 andy andy love apple 100 +2 andy andy love apple 100 +3 andy andy love apple 100 +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 bason bason hate pear 99 +5 bason bason hate pear 99 +6 bason bason hate pear 99 + +-- !target_sql -- +2 bason bason hate pear 98 +6 andy andy love apple 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 + +-- !sql -- +4 andy andy love apple 100 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !sql -- +2 bason bason hate pear 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 + +-- !target_sql -- +4 andy andy love apple 100 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !target_sql -- +2 bason bason hate pear 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !sql -- +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !sql -- +2 bason bason hate pear 98 +6 andy andy love apple 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !target_sql -- +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !target_sql -- +2 bason bason hate pear 98 +6 andy andy love apple 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 + +-- !sql -- +4 andy andy love apple 100 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !sql -- +2 bason bason hate pear 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 + +-- !target_sql -- +4 andy andy love apple 100 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !target_sql -- +2 bason bason hate pear 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !sql -- +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !sql -- +2 bason bason hate pear 98 +6 andy andy love apple 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !target_sql -- +4 andy andy love apple 100 +5 andy andy love apple 100 +6 andy andy love apple 98 + +-- !target_sql -- +1 bason bason hate pear 100 +2 bason bason hate pear 98 +3 bason bason hate pear 99 + +-- !target_sql -- +2 bason bason hate pear 98 +6 andy andy love apple 98 + diff --git a/regression-test/suites/cross_ds/fullsync/tbl_drop_create/test_cds_fullsync_tbl_drop_create.groovy b/regression-test/suites/cross_ds/fullsync/tbl_drop_create/test_cds_fullsync_tbl_drop_create.groovy new file mode 100644 index 00000000..41feddcb --- /dev/null +++ b/regression-test/suites/cross_ds/fullsync/tbl_drop_create/test_cds_fullsync_tbl_drop_create.groovy @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_fullsync_tbl_drop_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_replace_not_matched_with_alias")) { + logger.info("this case only works with feature_replace_not_matched_with_alias") + return + } + + // Case description + // 1. Create two tables + // 2. Pause ccr job, drop table1, then trigger fullsync + // 3. Resume ccr job, insert data into table1 + // 4. Create table1 again, insert data into table1 + // 5. Check data in table1 and table2 + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 20 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + logger.info("pause ccr job, drop table1, then trigger fullsync") + helper.ccrJobPause() + + sql "DROP TABLE ${tableName}_1" + helper.force_fullsync() + + values.clear(); + for (int index = insert_num; index < insert_num * 2; index++) { + values.add("(${test_num}, ${index})") + } + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql "sync" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num * 2, 60)) + + logger.info("create table ${tableName}_1 again") + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + values.clear(); + for (int index = 0; index < insert_num * 2; index++) { + values.add("(${test_num}, ${index})") + } + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + sql "sync" + + def has_expect_rows = { res -> + return res.size() == insert_num * 2 + } + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}_1", has_expect_rows, 60)) +} + + + + diff --git a/regression-test/suites/cross_ds/fullsync/with_alias/test_cds_fullsync_with_alias.groovy b/regression-test/suites/cross_ds/fullsync/with_alias/test_cds_fullsync_with_alias.groovy new file mode 100644 index 00000000..212c3448 --- /dev/null +++ b/regression-test/suites/cross_ds/fullsync/with_alias/test_cds_fullsync_with_alias.groovy @@ -0,0 +1,158 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_fullsync_with_alias") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_replace_not_matched_with_alias")) { + logger.info("this case only works with feature_replace_not_matched_with_alias") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 20 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + + logger.info("create two tables") + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + logger.info("pause ccr job, change table1 schema and trigger fullsync, then the upsert of table2 should be synced") + helper.ccrJobPause() + helper.force_fullsync() + + values.clear(); + for (int index = insert_num; index < insert_num * 2; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + + sql """ + CREATE VIEW ${tableName}_view (k1, k2) + AS + SELECT test as k1, sum(id) as k2 FROM ${tableName} + GROUP BY test; + """ + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + sql "sync" + + logger.info("resume job, then the upserts both table1 and table2 will be synced to downstream") + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num * 2, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1", insert_num * 2, 60)) + def view_size = target_sql "SHOW VIEW FROM ${tableName}" + assertTrue(view_size.size() == 1); +} + + + diff --git a/regression-test/suites/cross_ds/part/drop/add/test_cds_part_add_drop.groovy b/regression-test/suites/cross_ds/part/drop/add/test_cds_part_add_drop.groovy new file mode 100644 index 00000000..9a01bc1f --- /dev/null +++ b/regression-test/suites/cross_ds/part/drop/add/test_cds_part_add_drop.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_part_add_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "test_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Test 1: Add range partition ===") + def tableName = "${baseTableName}_range" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT NOT NULL + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + logger.info("Add partition and drop") + + def first_job_progress = helper.get_job_progress() + + sql """ + ALTER TABLE ${tableName} ADD PARTITION p3 VALUES LESS THAN ("200") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p3" + """, + exist, 60, "sql")) + + sql "INSERT INTO ${tableName} VALUES (1, 150)" + + sql """ + ALTER TABLE ${tableName} DROP PARTITION p3 + """ + + sql "INSERT INTO ${tableName} VALUES (2, 10)" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf(""" + SELECT * FROM ${tableName} + WHERE id = 150 + """, + 0, 60)) + assertTrue(helper.checkSelectTimesOf(""" + SELECT * FROM ${tableName} + WHERE id = 10 + """, + 1, 60)) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ds/part/drop/replace/test_cds_part_replace_drop.groovy b/regression-test/suites/cross_ds/part/drop/replace/test_cds_part_replace_drop.groovy new file mode 100644 index 00000000..0f29d957 --- /dev/null +++ b/regression-test/suites/cross_ds/part/drop/replace/test_cds_part_replace_drop.groovy @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_part_replace_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "tbl_replace_partition_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create table ===") + tableName = "${baseTableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + def first_job_progress = helper.get_job_progress() + helper.ccrJobPause() + + logger.info("=== Add temp partition p5 ===") + + sql """ + ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p5 VALUES [("0"), ("100")) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TEMPORARY PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p5" + """, + exist, 60, "sql")) + + sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p5) VALUES (1, 50)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + TEMPORARY PARTITION (p5) + WHERE id = 50 + """, + exist, 60, "sql")) + + logger.info("=== Replace partition p2 by p5 ===") + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + notExist, 60, "sql")) + + sql "ALTER TABLE ${tableName} REPLACE PARTITION (p2) WITH TEMPORARY PARTITION (p5)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + exist, 60, "sql")) + + sql "ALTER TABLE ${tableName} DROP PARTITION p2" + + sql "INSERT INTO ${tableName} VALUES (250, 250)" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id = 250", 1, 60)) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/cross_ds/signature_not_matched/test_cds_signature_not_matched.groovy b/regression-test/suites/cross_ds/signature_not_matched/test_cds_signature_not_matched.groovy new file mode 100644 index 00000000..6ee38190 --- /dev/null +++ b/regression-test/suites/cross_ds/signature_not_matched/test_cds_signature_not_matched.groovy @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_signature_not_matched") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 20 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + logger.info("create table with different schema") + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql "CREATE DATABASE IF NOT EXISTS TEST_${context.dbName}" + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` VARCHAR(12), + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql "sync" + + def v = sql "SELECT * FROM ${tableName}" + assertEquals(v.size(), insert_num); + + helper.ccrJobDelete() + helper.ccrJobCreate() + + logger.info("dest cluster drop unmatched tables") + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + if (helper.has_feature("feature_replace_not_matched_with_alias")) { + def restore_finished = false; + for (int j = 0; j < 10; j++) { + def progress = helper.get_job_progress() + + // sync_state == DBIncrementalSync or DBTablesIncrementalSync + if (progress.sync_state == 3 || progress.sync_state == 1) { + restore_finished = true + break + } + sleep(3000) + } + assertTrue(restore_finished) + } + + v = target_sql "SELECT * FROM ${tableName}" + assertTrue(v.size() == insert_num); + + logger.info("Insert new records, need to be synced") + + values.clear(); + for (int index = insert_num; index < insert_num * 2; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num * 2, 60)) +} + + diff --git a/regression-test/suites/cross_ds/sync_view_twice/test_cds_sync_view_twice.groovy b/regression-test/suites/cross_ds/sync_view_twice/test_cds_sync_view_twice.groovy new file mode 100644 index 00000000..d9fb13f7 --- /dev/null +++ b/regression-test/suites/cross_ds/sync_view_twice/test_cds_sync_view_twice.groovy @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_sync_view_twice") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.') || versions[0].Value.contains('doris-2.1.')) { + logger.info("2.0/2.1 not support this case, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + user_id BIGINT NOT NULL COMMENT "用户 ID", + name VARCHAR(20) COMMENT "用户姓名", + age INT COMMENT "用户年龄" + ) + ENGINE=OLAP + DUPLICATE KEY(user_id) + DISTRIBUTED BY HASH(user_id) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + createDuplicateTable(tableDuplicate0) + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (1, "Emily", 25), + (2, "Benjamin", 35), + (3, "Olivia", 28), + (4, "Alexander", 60), + (5, "Ava", 17); + """ + + helper.enableDbBinlog() + + logger.info("=== Test1: create view ===") + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 5, 30)) + + // the view will be restored again. + logger.info("=== Test 2: delete job and create it again ===") + test_num = 5 + helper.ccrJobDelete() + + sql """ + INSERT INTO ${tableDuplicate0} VALUES (6, "Zhangsan", 31) + """ + sql "sync" + + num_restore = helper.getRestoreRowSize(tableDuplicate0) + helper.ccrJobCreate() + assertTrue(helper.checkRestoreNumAndFinishedTimesOf("${tableDuplicate0}", num_restore + 1, 30)) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 6, 50)) + def view_size = target_sql "SHOW VIEW FROM ${tableDuplicate0}" + assertTrue(view_size.size() == 1); +} + diff --git a/regression-test/suites/cross_ds/table/backup/create_drop/test_cds_tbl_backup_create_drop.groovy b/regression-test/suites/cross_ds/table/backup/create_drop/test_cds_tbl_backup_create_drop.groovy new file mode 100644 index 00000000..09601146 --- /dev/null +++ b/regression-test/suites/cross_ds/table/backup/create_drop/test_cds_tbl_backup_create_drop.groovy @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_cds_tbl_backup_create_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 10 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def prefix = helper.get_backup_label_prefix() + GetDebugPoint().enableDebugPointForAllFEs("FE.PAUSE_PENDING_BACKUP_JOB", [value: prefix]) + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + // Wait for backup job to running + def is_backup_running = { res -> + for (int i = 0; i < res.size(); i++) { + logger.info("backup job status: ${res[i]}") + if (res[i][3] != "CANCELLED" && res[i][3] != "FINISHED") { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf( + """ SHOW BACKUP WHERE SnapshotName LIKE "${prefix}%" """, + is_backup_running, 60)) + + logger.info("create new table and drop it immediatelly") + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE ${tableName}_2 FORCE" + sql "INSERT INTO ${tableName}_1 VALUES (1, 1)" + sql "sync" + + GetDebugPoint().disableDebugPointForAllFEs("FE.PAUSE_PENDING_BACKUP_JOB") + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + sql "INSERT INTO ${tableName}_1 VALUES (2, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1", 2, 60)) + + logger.info("drop table and create it again, during full sync") + // 1. pause fullsync backup job + // 2. drop table A + // 3. create table A again + // 4. resume fullsync backup job + // The drop table A should be skipped. + + GetDebugPoint().enableDebugPointForAllFEs("FE.PAUSE_PENDING_BACKUP_JOB", [value: prefix]) + helper.ccrJobDelete() + + target_sql "DROP DATABASE TEST_${context.DbName}" + helper.ccrJobCreate() + + assertTrue(helper.checkShowTimesOf( + """ SHOW BACKUP WHERE SnapshotName LIKE "${prefix}%" """, + is_backup_running, 60)) + sql "DROP TABLE IF EXISTS ${tableName}_1 FORCE" + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "INSERT INTO ${tableName}_1 VALUES (1, 1)" + sql "sync" + + GetDebugPoint().disableDebugPointForAllFEs("FE.PAUSE_PENDING_BACKUP_JOB") + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + sql "INSERT INTO ${tableName}_1 VALUES (2, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1", 2, 60)) +} + diff --git a/regression-test/suites/cross_ds/table/drop/alter/test_cds_tbl_alter_drop.groovy b/regression-test/suites/cross_ds/table/drop/alter/test_cds_tbl_alter_drop.groovy new file mode 100644 index 00000000..32d6509e --- /dev/null +++ b/regression-test/suites/cross_ds/table/drop/alter/test_cds_tbl_alter_drop.groovy @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_alter_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create a fake table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName}_fake + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}_fake", 60)) + + logger.info(" ==== create table and drop ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${oldTableName} VALUES (5, 500, 1)" + sql "DROP TABLE ${oldTableName} FORCE" + sql "INSERT INTO ${oldTableName}_fake VALUES (5, 500)" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}_fake", 1, 60)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${oldTableName}\"", notExist, 60, "target")) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ds/table/drop/alter_create/test_cds_tbl_alter_drop_create.groovy b/regression-test/suites/cross_ds/table/drop/alter_create/test_cds_tbl_alter_drop_create.groovy new file mode 100644 index 00000000..04866308 --- /dev/null +++ b/regression-test/suites/cross_ds/table/drop/alter_create/test_cds_tbl_alter_drop_create.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_alter_drop_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create a fake table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName}_fake + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}_fake", 60)) + + logger.info(" ==== create table and drop ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${oldTableName} VALUES (5, 500, 1)" + sql "DROP TABLE ${oldTableName} FORCE" + sql "INSERT INTO ${oldTableName}_fake VALUES (5, 500)" + + logger.info("create table ${oldTableName} again ") + + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + sql "INSERT INTO ${oldTableName}_fake VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}_fake", 5, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${oldTableName}\"", exist, 60, "target")) + + // no fullsync are triggered + // def last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ds/table/drop/create/test_cds_tbl_create_drop.groovy b/regression-test/suites/cross_ds/table/drop/create/test_cds_tbl_create_drop.groovy new file mode 100644 index 00000000..6896de03 --- /dev/null +++ b/regression-test/suites/cross_ds/table/drop/create/test_cds_tbl_create_drop.groovy @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_create_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create a fake table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName}_fake + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}_fake", 60)) + + logger.info(" ==== create table and drop ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + sql "INSERT INTO ${oldTableName} VALUES (5, 500)" + sql "DROP TABLE ${oldTableName} FORCE" + + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${oldTableName}\"", notExist, 60, "target")) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + + + diff --git a/regression-test/suites/cross_ds/table/rename/alter/test_cds_tbl_rename_alter.groovy b/regression-test/suites/cross_ds/table/rename/alter/test_cds_tbl_rename_alter.groovy new file mode 100644 index 00000000..e4f9a473 --- /dev/null +++ b/regression-test/suites/cross_ds/table/rename/alter/test_cds_tbl_rename_alter.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_rename_alter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== alter table and rename ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${oldTableName} VALUES (5, 500, 1)" + sql "ALTER TABLE ${oldTableName} RENAME ${newTableName}" + sql "INSERT INTO ${newTableName} VALUES (6, 600, 2)" + + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${newTableName}\"", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 6, 60)) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + diff --git a/regression-test/suites/cross_ds/table/rename/alter_create/test_cds_tbl_rename_alter_create.groovy b/regression-test/suites/cross_ds/table/rename/alter_create/test_cds_tbl_rename_alter_create.groovy new file mode 100644 index 00000000..a77ae6cd --- /dev/null +++ b/regression-test/suites/cross_ds/table/rename/alter_create/test_cds_tbl_rename_alter_create.groovy @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_rename_alter_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== alter table and rename ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${oldTableName} VALUES (5, 500, 1)" + sql "ALTER TABLE ${oldTableName} RENAME ${newTableName}" + sql "INSERT INTO ${newTableName} VALUES (6, 600, 2)" + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${newTableName}\"", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 6, 60)) + + logger.info("create table ${oldTableName} again") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${oldTableName}\"", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 2, 60)) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/cross_ds/table/rename/alter_create_1/test_cds_tbl_rename_alter_create_1.groovy b/regression-test/suites/cross_ds/table/rename/alter_create_1/test_cds_tbl_rename_alter_create_1.groovy new file mode 100644 index 00000000..51bf11e6 --- /dev/null +++ b/regression-test/suites/cross_ds/table/rename/alter_create_1/test_cds_tbl_rename_alter_create_1.groovy @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_rename_alter_create_1") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + // Like test_cds_tbl_rename_alter_create, but create table when job is paused + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== alter table and rename ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${oldTableName} VALUES (5, 500, 1)" + sql "ALTER TABLE ${oldTableName} RENAME ${newTableName}" + sql "INSERT INTO ${newTableName} VALUES (6, 600, 2)" + + logger.info("create table ${oldTableName} again") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${newTableName}\"", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 6, 60)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${oldTableName}\"", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 2, 60)) + + // FIXME(walter) full sync is triggered + // // no fullsync are triggered + // def last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ds/table/rename/create/test_cds_tbl_rename_create.groovy b/regression-test/suites/cross_ds/table/rename/create/test_cds_tbl_rename_create.groovy new file mode 100644 index 00000000..599e6968 --- /dev/null +++ b/regression-test/suites/cross_ds/table/rename/create/test_cds_tbl_rename_create.groovy @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_rename_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create a fake table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName}_fake + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}_fake", 60)) + + logger.info(" ==== create table and rename ==== ") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + sql "INSERT INTO ${oldTableName} VALUES (5, 500)" + sql "ALTER TABLE ${oldTableName} RENAME ${newTableName}" + sql "INSERT INTO ${newTableName} VALUES (6, 600)" + + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${newTableName}\"", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 6, 60)) + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/cross_ds/table/replace/alter/test_cds_tbl_alter_replace.groovy b/regression-test/suites/cross_ds/table/replace/alter/test_cds_tbl_alter_replace.groovy new file mode 100644 index 00000000..bc7d7f3b --- /dev/null +++ b/regression-test/suites/cross_ds/table/replace/alter/test_cds_tbl_alter_replace.groovy @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_alter_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + logger.info("replace part and replace table without swap") + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== add key column and replace without swap ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300, 3), (300, 3, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + + // FIXME(walter) ALTER TABLE COLUMN + REPLACE will trigger full sync, which the dropped tables are not dropped + // new table are dropped + // assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newTableName}" """, notExist, 60, "target")) + + // // no fullsync are triggered + // def last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + + diff --git a/regression-test/suites/cross_ds/table/replace/alter_create/test_cds_tbl_alter_replace_create.groovy b/regression-test/suites/cross_ds/table/replace/alter_create/test_cds_tbl_alter_replace_create.groovy new file mode 100644 index 00000000..2938f62f --- /dev/null +++ b/regression-test/suites/cross_ds/table/replace/alter_create/test_cds_tbl_alter_replace_create.groovy @@ -0,0 +1,140 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_alter_replace_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + logger.info("replace part and replace table without swap") + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== add key column and replace without swap ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300, 3), (300, 3, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + + logger.info("create new table again") + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 2, 60)) + + // no fullsync are triggered + // def last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ds/table/replace/alter_create_1/test_cds_tbl_alter_replace_create_1.groovy b/regression-test/suites/cross_ds/table/replace/alter_create_1/test_cds_tbl_alter_replace_create_1.groovy new file mode 100644 index 00000000..7f8dcbba --- /dev/null +++ b/regression-test/suites/cross_ds/table/replace/alter_create_1/test_cds_tbl_alter_replace_create_1.groovy @@ -0,0 +1,143 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_alter_replace_create_1") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + logger.info("replace part and replace table without swap") + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== add key column and replace without swap ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300, 3), (300, 3, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + + logger.info("create new table again") + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" + + def expect_res = { res -> + return res.size() == 2 + } + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${newTableName}", expect_res, 60, "target")) + + // no fullsync are triggered + // def last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ds/table/replace/alter_swap/test_cds_tbl_alter_replace_swap.groovy b/regression-test/suites/cross_ds/table/replace/alter_swap/test_cds_tbl_alter_replace_swap.groovy new file mode 100644 index 00000000..f02d177f --- /dev/null +++ b/regression-test/suites/cross_ds/table/replace/alter_swap/test_cds_tbl_alter_replace_swap.groovy @@ -0,0 +1,206 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cds_tbl_alter_replace_swap") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + logger.info("replace part and replace table without swap") + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== add key column and replace with swap ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300, 3), (300, 3, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"true\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + sql "INSERT INTO ${newTableName} VALUES (4, 400, 4)" // o:n, 3:7 + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 7, 60)) + + // no fullsync are triggered + // def last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) + + logger.info("alter new table and swap again") + + helper.ccrJobDelete() + target_sql "DROP DATABASE TEST_${context.DbName}" + + sql "DROP TABLE ${oldTableName}" + sql "DROP TABLE ${newTableName}" + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== add key column and replace with swap ==== ") + first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "ALTER TABLE ${newTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${newTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${newTableName} VALUES (3, 300, 3), (300, 3, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"true\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400, 4)" // o:n, 3:6 + sql "INSERT INTO ${newTableName} VALUES (4, 400)" // o:n, 3:7 + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 7, 60)) + + // // no fullsync are triggered + // last_job_progress = helper.get_job_progress() + // assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/cross_ts/fullsync/replace/test_cts_fullsync_replace.groovy b/regression-test/suites/cross_ts/fullsync/replace/test_cts_fullsync_replace.groovy new file mode 100644 index 00000000..f4411b80 --- /dev/null +++ b/regression-test/suites/cross_ts/fullsync/replace/test_cts_fullsync_replace.groovy @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cts_fullsync_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + logger.info("replace part and replace table without swap") + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(oldTableName) + helper.ccrJobCreate(oldTableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== replace without swap and trigger fullsync ==== ") + helper.ccrJobPause(oldTableName) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + + helper.force_fullsync(oldTableName) + helper.ccrJobResume(oldTableName) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + +} diff --git a/regression-test/suites/cross_ts/keyword_name/test_cts_keyword.groovy b/regression-test/suites/cross_ts/keyword_name/test_cts_keyword.groovy new file mode 100644 index 00000000..ea9a0779 --- /dev/null +++ b/regression-test/suites/cross_ts/keyword_name/test_cts_keyword.groovy @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cts_keyword_name") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "roles" + def newTableName = "test-hyphen" + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS `${tableName}` FORCE" + target_sql "DROP TABLE IF EXISTS `${tableName}` FORCE" + sql """ + CREATE TABLE `${tableName}` ( + `role` INT, + occupation VARCHAR(32), + camp VARCHAR(32), + register_time DATE + ) + UNIQUE KEY(`role`) + PARTITION BY RANGE (`role`) + ( + PARTITION p1 VALUES LESS THAN ("10") + ) + DISTRIBUTED BY HASH(`role`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ); + """ + + sql "DROP TABLE IF EXISTS `${newTableName}` FORCE" + target_sql "DROP TABLE IF EXISTS `${newTableName}` FORCE" + sql """ + CREATE TABLE `${newTableName}` ( + id INT, + name VARCHAR(10) + ) + UNIQUE KEY(id) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ); + """ + + sql """ + INSERT INTO `${tableName}` VALUES + (0, 'who am I', NULL, NULL), + (1, 'mage', 'alliance', '2018-12-03 16:11:28'), + (2, 'paladin', 'alliance', '2018-11-30 16:11:28'), + (3, 'rogue', 'horde', '2018-12-01 16:11:28'), + (4, 'priest', 'alliance', '2018-12-02 16:11:28'), + (5, 'shaman', 'horde', NULL), + (6, 'warrior', 'alliance', NULL), + (7, 'warlock', 'horde', '2018-12-04 16:11:28'), + (8, 'hunter', 'horde', NULL); + """ + sql """ + INSERT INTO `${newTableName}` VALUES + (1, 'a'), + (2, 'b'), + (3, 'c'); + """ + + // delete the exists ccr job first. + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + helper.ccrJobDelete(newTableName) + helper.ccrJobCreate(newTableName) + assertTrue(helper.checkRestoreFinishTimesOf("${newTableName}", 30)) + + logger.info("=== Test 1: Check keyword name table ===") + // def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE `TEST_${context.dbName}`.`${tableName}` + """, + exist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE `TEST_${context.dbName}`.`${newTableName}` + """, + exist, 30, "target")) + + logger.info("=== Test 2: Add new partition ===") + sql """ + ALTER TABLE `${tableName}` ADD PARTITION p2 + VALUES LESS THAN ("20") + """ + + sql """ + INSERT INTO `${tableName}` VALUES + (11, 'who am I', NULL, NULL), + (12, 'mage', 'alliance', '2018-12-03 16:11:28'); + """ + + def checkNewPartition = { inputRes -> Boolean + for (List row : inputRes) { + if ((row[1] as String).contains("PARTITION p2")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE `TEST_${context.dbName}`.`${tableName}` + """, + checkNewPartition, 30, "target")) + + logger.info("=== Test 3: Truncate table ===") + sql "TRUNCATE TABLE `${tableName}`" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * FROM `TEST_${context.dbName}`.`${tableName}` + """, + notExist, 30, "target")) + + logger.info("=== Test 4: Add column with keyword name ===") + // index is a keyword + sql "ALTER TABLE `${tableName}` ADD COLUMN `index` INT DEFAULT \"0\"" + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def has_column_index = { res -> Boolean + // Field == 'index' && 'Key' == 'NO' + return res[4][0] == 'index' && (res[4][3] == 'NO' || res[4][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_index, 60, "target_sql")) +} diff --git a/regression-test/suites/cross_ts/table/alter_replace/test_cts_tbl_alter_replace.groovy b/regression-test/suites/cross_ts/table/alter_replace/test_cts_tbl_alter_replace.groovy new file mode 100644 index 00000000..8432c188 --- /dev/null +++ b/regression-test/suites/cross_ts/table/alter_replace/test_cts_tbl_alter_replace.groovy @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cts_tbl_alter_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(oldTableName) + helper.ccrJobCreate(oldTableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== replace without swap and trigger fullsync ==== ") + helper.ccrJobPause(oldTableName) + + sql "ALTER TABLE ${oldTableName} ADD COLUMN `new_col` INT KEY DEFAULT \"0\"" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${oldTableName}" AND State = "FINISHED" + """, + exist, 30)) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300, 3), (300, 3, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + + helper.ccrJobResume(oldTableName) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + +} + diff --git a/regression-test/suites/db-sync/test_db_sync.groovy b/regression-test/suites/db-sync/test_db_sync.groovy deleted file mode 100644 index d7702cc1..00000000 --- a/regression-test/suites/db-sync/test_db_sync.groovy +++ /dev/null @@ -1,353 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_db_sync") { - - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - - def createUniqueTable = { tableName -> - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `test` INT, - `id` INT - ) - ENGINE=OLAP - UNIQUE KEY(`test`, `id`) - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "binlog.enable" = "true" - ) - """ - } - def createAggergateTable = { tableName -> - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `test` INT, - `last` INT REPLACE DEFAULT "0", - `cost` INT SUM DEFAULT "0", - `max` INT MAX DEFAULT "0", - `min` INT MIN DEFAULT "0" - ) - ENGINE=OLAP - AGGREGATE KEY(`test`) - DISTRIBUTED BY HASH(`test`) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "binlog.enable" = "true" - ) - """ - } - - def createDuplicateTable = { tableName -> - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `test` INT, - `id` INT - ) - ENGINE=OLAP - DUPLICATE KEY(`test`, `id`) - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "binlog.enable" = "true" - ) - """ - } - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkSelectTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def exist = { res -> Boolean - return res.size() != 0 - } - def notExist = { res -> Boolean - return res.size() == 0 - } - - def tableUnique0 = "tbl_common_0_" + UUID.randomUUID().toString().replace("-", "") - def tableAggregate0 = "tbl_aggregate_0_" + UUID.randomUUID().toString().replace("-", "") - def tableDuplicate0 = "tbl_duplicate_0_" + UUID.randomUUID().toString().replace("-", "") - - createUniqueTable(tableUnique0) - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}) - """ - } - - createAggergateTable(tableAggregate0) - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableAggregate0} VALUES (${test_num}, ${index}, ${index}, ${index}, ${index}) - """ - } - - createDuplicateTable(tableDuplicate0) - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableDuplicate0} VALUES (0, 99) - """ - } - - sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" - - String respone - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "" - body "${bodyJson}" - op "post" - result respone - } - - assertTrue(checkRestoreFinishTimesOf("${tableUnique0}", 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", - insert_num, 30)) - - assertTrue(checkRestoreFinishTimesOf("${tableAggregate0}", 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableAggregate0} WHERE test=${test_num}", - 1, 30)) - - assertTrue(checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableDuplicate0} WHERE test=${test_num}", - insert_num, 30)) - - logger.info("=== Test 1: dest cluster follow source cluster case ===") - test_num = 1 - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}) - """ - } - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableAggregate0} VALUES (${test_num}, ${index}, ${index}, ${index}, ${index}) - """ - } - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableDuplicate0} VALUES (0, 99) - """ - } - - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", - insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableAggregate0} WHERE test=${test_num}", - 1, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableDuplicate0} WHERE test=0", - insert_num * (test_num + 1), 30)) - - - - logger.info("=== Test 2: create table case ===") - test_num = 2 - def tableUnique1 = "tbl_common_1_" + UUID.randomUUID().toString().replace("-", "") - def tableAggregate1 = "tbl_aggregate_1_" + UUID.randomUUID().toString().replace("-", "") - def tableDuplicate1 = "tbl_duplicate_1_" + UUID.randomUUID().toString().replace("-", "") - - createUniqueTable(tableUnique1) - createAggergateTable(tableAggregate1) - createDuplicateTable(tableDuplicate1) - - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableUnique1} VALUES (${test_num}, ${index}) - """ - } - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableAggregate1} VALUES (${test_num}, ${index}, ${index}, ${index}, ${index}) - """ - } - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableDuplicate1} VALUES (0, 99) - """ - } - - assertTrue(checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableUnique1}", - exist, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique1} WHERE test=${test_num}", - insert_num, 30)) - - assertTrue(checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableAggregate1}", - exist, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableAggregate1} WHERE test=${test_num}", - 1, 30)) - - assertTrue(checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableDuplicate1}", - exist, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableDuplicate1} WHERE test=0", - insert_num, 30)) - - logger.info("=== Test 3: drop table case ===") - sql "DROP TABLE ${tableUnique1}" - sql "DROP TABLE ${tableAggregate1}" - sql "DROP TABLE ${tableDuplicate1}" - - assertTrue(checkShowTimesOf("SHOW TABLES LIKE '${tableUnique1}'", - notExist, 30, "target")) - assertTrue(checkShowTimesOf("SHOW TABLES LIKE '${tableAggregate1}'", - notExist, 30, "target")) - assertTrue(checkShowTimesOf("SHOW TABLES LIKE '${tableDuplicate1}'", - notExist, 30, "target")) - - logger.info("=== Test 4: pause and resume ===") - httpTest { - uri "/pause" - endpoint syncerAddress - def bodyJson = get_ccr_body "" - body "${bodyJson}" - op "post" - result respone - } - - test_num = 4 - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}) - """ - } - - assertTrue(!checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", - insert_num, 3)) - - httpTest { - uri "/resume" - endpoint syncerAddress - def bodyJson = get_ccr_body "" - body "${bodyJson}" - op "post" - result respone - } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", - insert_num, 30)) - - - logger.info("=== Test 5: desync job ===") - test_num = 5 - httpTest { - uri "/desync" - endpoint syncerAddress - def bodyJson = get_ccr_body "" - body "${bodyJson}" - op "post" - result respone - } - - sleep(sync_gap_time) - - def checkDesynced = {tableName -> - def res = target_sql "SHOW CREATE TABLE TEST_${context.dbName}.${tableName}" - def desynced = false - for (List row : res) { - if ((row[0] as String) == "${tableName}") { - desynced = (row[1] as String).contains("\"is_being_synced\" = \"false\"") - break - } - } - assertTrue(desynced) - } - - checkDesynced(tableUnique0) - checkDesynced(tableAggregate0) - checkDesynced(tableDuplicate0) - - - logger.info("=== Test 5: delete job ===") - test_num = 5 - httpTest { - uri "/delete" - endpoint syncerAddress - def bodyJson = get_ccr_body "" - body "${bodyJson}" - op "post" - result respone - } - - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}) - """ - } - - assertTrue(!checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", - insert_num, 5)) -} \ No newline at end of file diff --git a/regression-test/suites/db_ps_inc/add_partition/test_db_partial_sync_inc_add_partition.groovy b/regression-test/suites/db_ps_inc/add_partition/test_db_partial_sync_inc_add_partition.groovy new file mode 100644 index 00000000..f98e1493 --- /dev/null +++ b/regression-test/suites/db_ps_inc/add_partition/test_db_partial_sync_inc_add_partition.groovy @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_add_partition") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + + sql "DROP TABLE IF EXISTS ${tableName}" + target_sql "DROP TABLE IF EXISTS ${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "DROP TABLE IF EXISTS ${tableName1}" + target_sql "DROP TABLE IF EXISTS ${tableName1}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target_sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and add new partition") + helper.ccrJobPause() + + def column = sql " SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = \"${tableName}\" " + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(column.size() + 1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (124, 124, 2)" + sql "INSERT INTO ${tableName1} VALUES (125, 125, 3)" + + sql """ + ALTER TABLE ${tableName} ADD PARTITION p4 VALUES LESS THAN("4000") + """ + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 3, 60)) + assertTrue(helper.checkShowTimesOf("SHOW PARTITIONS FROM ${tableName} WHERE PartitionName = \"p4\"", exist, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (126, 126, 126, 3)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/db_ps_inc/alter/test_db_partial_sync_inc_alter.groovy b/regression-test/suites/db_ps_inc/alter/test_db_partial_sync_inc_alter.groovy new file mode 100644 index 00000000..7c5da37b --- /dev/null +++ b/regression-test/suites/db_ps_inc/alter/test_db_partial_sync_inc_alter.groovy @@ -0,0 +1,153 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_alter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def notExist = { res -> Boolean + return res.size() == 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and drop again") + helper.ccrJobPause() + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (124, 124, 2)" + sql "INSERT INTO ${tableName1} VALUES (125, 125, 3)" + + sql """ ALTER TABLE ${tableName} DROP COLUMN `first` """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 3, 60)) + + sql "INSERT INTO ${tableName} VALUES (126, 126, 126)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/db_ps_inc/cache/test_db_partial_sync_cache.groovy b/regression-test/suites/db_ps_inc/cache/test_db_partial_sync_cache.groovy new file mode 100644 index 00000000..a64afe09 --- /dev/null +++ b/regression-test/suites/db_ps_inc/cache/test_db_partial_sync_cache.groovy @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_cache") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def get_ccr_name = { ccr_body_json -> + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${ccr_body_json}" + return object.name + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + + first_job_progress = helper.get_job_progress() + + logger.info("=== Test 1: add first column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 123)" + + // cache must be clear and reload. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 1, 60)) + + // no full sync triggered. + last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/db_ps_inc/drop_partition/test_db_partial_sync_inc_drop_partition.groovy b/regression-test/suites/db_ps_inc/drop_partition/test_db_partial_sync_inc_drop_partition.groovy new file mode 100644 index 00000000..c62f61fb --- /dev/null +++ b/regression-test/suites/db_ps_inc/drop_partition/test_db_partial_sync_inc_drop_partition.groovy @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_drop_partition") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def notExist = { res -> Boolean + return res.size() == 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql "DROP TABLE IF EXISTS ${tableName1}" + target_sql "DROP TABLE IF EXISTS ${tableName}" + target_sql "DROP TABLE IF EXISTS ${tableName1}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target_sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and drop a partition") + helper.ccrJobPause() + + def column = sql " SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = \"${tableName}\" " + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(column.size() + 1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (124, 124, 2)" + sql "INSERT INTO ${tableName1} VALUES (125, 125, 3)" + + sql """ + ALTER TABLE ${tableName} DROP PARTITION p3 + """ + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 3, 60)) + assertTrue(helper.checkShowTimesOf("SHOW PARTITIONS FROM ${tableName} WHERE PartitionName = \"p3\"", notExist, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (126, 126, 126, 4)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/db_ps_inc/lightning_sc/test_db_partial_sync_inc_lightning_sc.groovy b/regression-test/suites/db_ps_inc/lightning_sc/test_db_partial_sync_inc_lightning_sc.groovy new file mode 100644 index 00000000..a9e7151a --- /dev/null +++ b/regression-test/suites/db_ps_inc/lightning_sc/test_db_partial_sync_inc_lightning_sc.groovy @@ -0,0 +1,161 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_lightning_sc") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and add value column") + helper.ccrJobPause() + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (124, 124, 2)" + sql "INSERT INTO ${tableName1} VALUES (125, 125, 3)" + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last` INT DEFAULT "0" + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + def has_column_last = { res -> Boolean + // Field == 'last' && 'Key' == 'NO' + return res[4][0] == 'last' && (res[4][3] == 'NO' || res[4][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 3, 60)) + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (126, 126, 126, 4, 5)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/db_ps_inc/merge/test_db_partial_sync_merge.groovy b/regression-test/suites/db_ps_inc/merge/test_db_partial_sync_merge.groovy new file mode 100644 index 00000000..c3190c24 --- /dev/null +++ b/regression-test/suites/db_ps_inc/merge/test_db_partial_sync_merge.groovy @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_merge") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql "DROP TABLE IF EXISTS ${tableName1}" + target_sql "DROP TABLE IF EXISTS ${tableName}" + target_sql "DROP TABLE IF EXISTS ${tableName1}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT SUM + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT SUM + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName1} VALUES ${values.join(",")} """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target_sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + // the change flow of the sync states: + // + // db incremental sync pause CCR job + // -> db partial sync table A add column to A + // -> db tables incremental with table A insert some data into A + // -> db partial sync table B add column to B + // -> db tables incremental with table A/B insert some data into A/B, resume CCR job + // -> db incremental sync + helper.ccrJobPause() + + def column = sql " SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = \"${tableName}\" " + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(column.size() + 1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + + column = sql " SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = \"${tableName1}\" " + + sql """ + ALTER TABLE ${tableName1} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName1}" AND State = "FINISHED" + """, + has_count(column.size() + 1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 2)" + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 123, 2)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 123, 3)" + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName1}`", has_column_first, 60, "target_sql")) + + logger.info("the aggregate keys inserted should be synced accurately") + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 1, 60)) + def last_record = target_sql "SELECT value FROM ${tableName} WHERE id = 123 AND test = 123" + logger.info("last record is ${last_record}") + assertTrue(last_record.size() == 1 && last_record[0][0] == 6) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 1, 60)) + last_record = target_sql "SELECT value FROM ${tableName1} WHERE id = 123 AND test = 123" + logger.info("last record of table ${tableName1} is ${last_record}") + assertTrue(last_record.size() == 1 && last_record[0][0] == 6) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/db_ps_inc/replace_partition/test_db_partial_sync_inc_replace_partition.groovy b/regression-test/suites/db_ps_inc/replace_partition/test_db_partial_sync_inc_replace_partition.groovy new file mode 100644 index 00000000..6ae48fec --- /dev/null +++ b/regression-test/suites/db_ps_inc/replace_partition/test_db_partial_sync_inc_replace_partition.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_replace_partition") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and replace partition") + helper.ccrJobPause() + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (124, 124, 2)" + sql "INSERT INTO ${tableName1} VALUES (125, 125, 3)" + + sql "ALTER TABLE ${tableName} ADD TEMPORARY PARTITION tp3 VALUES [(\"2000\"), (\"3000\"))" + sql "INSERT INTO ${tableName} TEMPORARY PARTITION (tp3) VALUES (2500, 2500, 2500, 1)" + sql "ALTER TABLE ${tableName} REPLACE PARTITION (p3) WITH TEMPORARY PARTITION (tp3)" + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 3, 60)) + + sql "INSERT INTO ${tableName} VALUES (126, 126, 126, 4)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 5, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/db_ps_inc/truncate_table/test_db_partial_sync_inc_trunc_table.groovy b/regression-test/suites/db_ps_inc/truncate_table/test_db_partial_sync_inc_trunc_table.groovy new file mode 100644 index 00000000..de5819aa --- /dev/null +++ b/regression-test/suites/db_ps_inc/truncate_table/test_db_partial_sync_inc_trunc_table.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_trunc_table") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("1000"), + PARTITION p2 VALUES LESS THAN ("2000"), + PARTITION p3 VALUES LESS THAN ("3000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and truncate table") + helper.ccrJobPause() + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (124, 124, 2)" + sql "INSERT INTO ${tableName1} VALUES (125, 125, 3)" + + sql "TRUNCATE TABLE ${tableName}" + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (124, 124, 124, 2)" + sql "INSERT INTO ${tableName} VALUES (125, 125, 125, 3)" + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 3, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 3, 60)) + + sql "INSERT INTO ${tableName} VALUES (126, 126, 126, 4)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 4, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + diff --git a/regression-test/suites/db_ps_inc/upsert/test_db_partial_sync_inc_upsert.groovy b/regression-test/suites/db_ps_inc/upsert/test_db_partial_sync_inc_upsert.groovy new file mode 100644 index 00000000..975bc909 --- /dev/null +++ b/regression-test/suites/db_ps_inc/upsert/test_db_partial_sync_inc_upsert.groovy @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_db_partial_sync_inc_upsert") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def tableName1 = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT SUM + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql "DROP TABLE IF EXISTS ${tableName1}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT, + `value` INT SUM + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql """ + INSERT INTO ${tableName1} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress() + + logger.info("=== pause job, add column and insert data") + helper.ccrJobPause() + + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 2)" + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 3)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 1)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 2)" + sql "INSERT INTO ${tableName1} VALUES (123, 123, 3)" + + helper.ccrJobResume() + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + logger.info("the aggregate keys inserted should be synced accurately") + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 1, 60)) + def last_record = target_sql "SELECT value FROM ${tableName} WHERE id = 123 AND test = 123" + logger.info("last record is ${last_record}") + assertTrue(last_record.size() == 1 && last_record[0][0] == 6) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", insert_num + 1, 60)) + last_record = target_sql "SELECT value FROM ${tableName1} WHERE id = 123 AND test = 123" + logger.info("last record of table ${tableName1} is ${last_record}") + assertTrue(last_record.size() == 1 && last_record[0][0] == 6) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + + diff --git a/regression-test/suites/db_sync/alt_prop/bloom_filter/test_ds_alt_prop_bloom_filter.groovy b/regression-test/suites/db_sync/alt_prop/bloom_filter/test_ds_alt_prop_bloom_filter.groovy new file mode 100644 index 00000000..66cef2fa --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/bloom_filter/test_ds_alt_prop_bloom_filter.groovy @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_bloom_filter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existBF = { res -> Boolean + return checkShowResult(res, "\"bloom_filter_columns\" = \"test, id\"") + } + + def notExistBF = { res -> Boolean + return !checkShowResult(res, "\"bloom_filter_columns\" = \"test, id\"") + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBF, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBF, 60, "target")) + + logger.info("=== Test 2: alter table set property bloom filter columns ===") + + def state = sql """ SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = "${tableName}" AND State = "FINISHED" """ + + sql """ + ALTER TABLE ${tableName} SET ("bloom_filter_columns" = "test, id"); + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(state.size() + 1), 30)) + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existBF, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existBF, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/bucket/test_ds_alt_prop_bucket.groovy b/regression-test/suites/db_sync/alt_prop/bucket/test_ds_alt_prop_bucket.groovy new file mode 100644 index 00000000..fe5d248f --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/bucket/test_ds_alt_prop_bucket.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def existOldBucket = { res -> Boolean + return res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 1") + } + + def existNewBucket = { res -> Boolean + return res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 20") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldBucket, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldBucket, 60, "target")) + + logger.info("=== Test 2: alter table set property bucket num ===") + + sql """ + ALTER TABLE ${tableName} MODIFY DISTRIBUTION DISTRIBUTED BY HASH(`id`) BUCKETS 20 + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existNewBucket, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldBucket, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/colocate/test_ds_alt_prop_colocate_with.groovy b/regression-test/suites/db_sync/alt_prop/colocate/test_ds_alt_prop_colocate_with.groovy new file mode 100644 index 00000000..ccdceefd --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/colocate/test_ds_alt_prop_colocate_with.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_colocate_with") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existGrooup1 = { res -> Boolean + return res[0][1].contains("\"colocate_with\" = \"test_group_1\"") + } + + def notExistGrooup1 = { res -> Boolean + return !res[0][1].contains("\"colocate_with\" = \"test_group_1\"") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistGrooup1, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistGrooup1, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + ALTER TABLE ${tableName} SET ("colocate_with" = "test_group_1") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existGrooup1, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistGrooup1, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/comment/test_ds_alt_prop_comment.groovy b/regression-test/suites/db_sync/alt_prop/comment/test_ds_alt_prop_comment.groovy new file mode 100644 index 00000000..ad6db591 --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/comment/test_ds_alt_prop_comment.groovy @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_comment") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existComment = { res -> Boolean + return res[0][1].contains("COMMENT 'test_comment'") + } + + def notExistComment = { res -> Boolean + return !res[0][1].contains("COMMENT 'test_comment'") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistComment, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistComment, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + ALTER TABLE ${tableName} MODIFY COMMENT "test_comment" + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existComment, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existComment, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/compaction/test_ds_alt_prop_compaction.groovy b/regression-test/suites/db_sync/alt_prop/compaction/test_ds_alt_prop_compaction.groovy new file mode 100644 index 00000000..3faee5d7 --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/compaction/test_ds_alt_prop_compaction.groovy @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_compaction") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { res, property -> Boolean + if(!res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existNewCompaction = { res -> Boolean + Boolean result = checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"2048\"") && + checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"3000\"") && + checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"4000\"") && + checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"6\"") && + checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"2\"") + return result + } + + def existOldCompaction = { res -> Boolean + Boolean result = checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"1024\"") && + checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"2000\"") && + checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"3600\"") && + checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"5\"") && + checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"1\"") + return result + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldCompaction, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldCompaction, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + alter table ${tableName} set ("compaction_policy" = "time_series") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_goal_size_mbytes" = "2048") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_file_count_threshold" = "3000") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_time_threshold_seconds" = "4000") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_empty_rowsets_threshold" = "6") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_level_threshold" = "2") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existNewCompaction, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldCompaction, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/distribution_type/test_ds_alt_prop_distr_type.groovy b/regression-test/suites/db_sync/alt_prop/distribution_type/test_ds_alt_prop_distr_type.groovy new file mode 100644 index 00000000..cca1bb1a --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/distribution_type/test_ds_alt_prop_distr_type.groovy @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_distr_type") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existBucketNew = { res -> Boolean + return res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 20") + } + + def notExistBucketNew = { res -> Boolean + return !res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 20") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBucketNew, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBucketNew, 60, "target")) + + logger.info("=== Test 2: alter table set property distribution ===") + + sql """ + ALTER TABLE ${tableName} MODIFY DISTRIBUTION DISTRIBUTED BY HASH(id) BUCKETS 20; + """ + + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existBucketNew, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBucketNew, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/dy_part/test_ds_alt_prop_dy_pary.groovy b/regression-test/suites/db_sync/alt_prop/dy_part/test_ds_alt_prop_dy_pary.groovy new file mode 100644 index 00000000..f5225206 --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/dy_part/test_ds_alt_prop_dy_pary.groovy @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_dy_pary") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existNewPartitionProperty = { target_res -> Boolean + Boolean result = checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"WEEK\"") && + checkShowResult(target_res, "\"dynamic_partition.start\" = \"-3\"") && + checkShowResult(target_res, "\"dynamic_partition.end\" = \"3\"") && + checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"pp\"") && + checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"64\"") && + checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"1\"") && + checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"false\"") && + checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2023-01-01,2023-12-31],[2024-01-01,2024-12-31]\"") + return result + } + + def existOldPartitionProperty = { res -> Boolean + Boolean result = checkShowResult(res, "\"dynamic_partition.time_unit\" = \"DAY\"") && + checkShowResult(res, "\"dynamic_partition.start\" = \"-2\"") && + checkShowResult(res, "\"dynamic_partition.end\" = \"2\"") && + checkShowResult(res, "\"dynamic_partition.prefix\" = \"p\"") && + checkShowResult(res, "\"dynamic_partition.buckets\" = \"32\"") && + checkShowResult(res, "\"dynamic_partition.history_partition_num\" = \"2\"") && + checkShowResult(res, "\"dynamic_partition.create_history_partition\" = \"true\"") && + checkShowResult(res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"") + return result + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldPartitionProperty, 60, "sql")) + + logger.info("=== Test 2: alter table set property dynamic partition ===") + + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.time_unit" = "WEEK") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.start" = "-3") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.end" = "3") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.prefix" = "pp") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.create_history_partition" = "false") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.buckets" = "64") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.history_partition_num" = "1") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.reserved_history_periods" = "[2023-01-01,2023-12-31],[2024-01-01,2024-12-31]") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existNewPartitionProperty, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldPartitionProperty, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/light_schema_change/test_ds_alt_prop_light_schema_change.groovy b/regression-test/suites/db_sync/alt_prop/light_schema_change/test_ds_alt_prop_light_schema_change.groovy new file mode 100644 index 00000000..1c211662 --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/light_schema_change/test_ds_alt_prop_light_schema_change.groovy @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_light_schema_change") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def lightSchemaChange = { res -> Boolean + return res[0][1].contains("\"light_schema_change\" = \"true\"") + } + + def notLightSchemaChange = { res -> Boolean + return !res[0][1].contains("\"light_schema_change\" = \"false\"") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "light_schema_change" = "false" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notLightSchemaChange, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notLightSchemaChange, 60, "target")) + + logger.info("=== Test 2: alter table set property light_schema_change ===") + + sql """ + ALTER TABLE ${tableName} SET ("light_schema_change" = "true"); + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", lightSchemaChange, 60, "sql")) + + // todo + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notLightSchemaChange, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/row_store/test_ds_alt_prop_row_store.groovy b/regression-test/suites/db_sync/alt_prop/row_store/test_ds_alt_prop_row_store.groovy new file mode 100644 index 00000000..960d8d4d --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/row_store/test_ds_alt_prop_row_store.groovy @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_row_store") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existRowStore = { res -> Boolean + if(!checkShowResult(res, "\"row_store_columns\" = \"test,id\"")) { + return false + } + if(!checkShowResult(res, "\"row_store_page_size\" = \"16384\"")) { + return false + } + return true + } + + def notExistRowStore = { res -> Boolean + if(!checkShowResult(res, "\"row_store_columns\" = \"test,id\"")) { + return true; + } + if(!checkShowResult(res, "\"row_store_page_size\" = \"16384\"")) { + return true; + } + return false + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistRowStore, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistRowStore, 60, "target")) + + logger.info("=== Test 2: alter table set property row store ===") + + def state = sql """ SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = "${tableName}" AND State = "FINISHED" """ + + sql """ + ALTER TABLE ${tableName} SET ("store_row_column" = "true") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(state.size() + 1), 30)) + + sql """ + ALTER TABLE ${tableName} SET ("row_store_columns" = "test,id") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(state.size() + 2), 30)) + // mysql> ALTER TABLE t SET ("row_store_page_size" = "32768"); + // ERROR 1105 (HY000): errCode = 2, detailMessage = Unknown table property: [row_store_page_size] + // sql """ + // ALTER TABLE ${tableName} SET ("row_store_page_size" = "16348") + // """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existRowStore, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existRowStore, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/storage_policy/test_ds_alt_prop_stor_policy.groovy b/regression-test/suites/db_sync/alt_prop/storage_policy/test_ds_alt_prop_stor_policy.groovy new file mode 100644 index 00000000..a8d5e8c7 --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/storage_policy/test_ds_alt_prop_stor_policy.groovy @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_stor_policy") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def existPolicy = { res -> Boolean + return res[0][1].contains("\"storage_policy\" = \"test_policy\"") + } + + def notexistPolicy = { res -> Boolean + return !res[0][1].contains("\"storage_policy\" = \"test_policy\"") + } + + def resource_name = "test_ts_tbl_storage_policy_resource" + def policy_name= "test_policy" + + def check_storage_policy_exist = { name-> + def polices = sql""" + show storage policy; + """ + for (p in polices) { + if (name == p[0]) { + return true; + } + } + return false; + } + + if (check_storage_policy_exist(policy_name)) { + sql """ + DROP STORAGE POLICY ${policy_name} + """ + } + + def has_resouce = sql """ + SHOW RESOURCES WHERE NAME = "${resource_name}"; + """ + + if (has_resouce.size() > 0) { + sql """ + DROP RESOURCE ${resource_name} + """ + } + + sql """ + CREATE RESOURCE IF NOT EXISTS "${resource_name}" + PROPERTIES( + "type"="s3", + "AWS_ENDPOINT" = "${getS3Endpoint()}", + "AWS_REGION" = "${getS3Region()}", + "AWS_ROOT_PATH" = "regression/cooldown", + "AWS_ACCESS_KEY" = "${getS3AK()}", + "AWS_SECRET_KEY" = "${getS3SK()}", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "${getS3BucketName()}", + "s3_validity_check" = "true" + ); + """ + + sql """ + CREATE STORAGE POLICY IF NOT EXISTS ${policy_name} + PROPERTIES( + "storage_resource" = "${resource_name}", + "cooldown_ttl" = "300" + ) + """ + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notexistPolicy, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notexistPolicy, 60, "target")) + + logger.info("=== Test 2: alter table set property storage_policy ===") + + sql """ + ALTER TABLE ${tableName} set ("storage_policy" = "${policy_name}"); + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existPolicy, 60, "sql")) + + // don't synced + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notexistPolicy, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/alt_prop/synced/test_ds_alt_prop_synced.groovy b/regression-test/suites/db_sync/alt_prop/synced/test_ds_alt_prop_synced.groovy new file mode 100644 index 00000000..7a1929a0 --- /dev/null +++ b/regression-test/suites/db_sync/alt_prop/synced/test_ds_alt_prop_synced.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_alt_prop_synced") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existSynced = { res -> Boolean + return res[0][1].contains("\"is_being_synced\" = \"true\"") + } + + def notExistSynced = { res -> Boolean + return res[0][1].contains("\"is_being_synced\" = \"false\"") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistSynced, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existSynced, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + ALTER TABLE ${tableName} SET ("is_being_synced" = "false") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistSynced, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existSynced, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/column/alter_type/test_ds_col_alter_type.groovy b/regression-test/suites/db_sync/column/alter_type/test_ds_col_alter_type.groovy new file mode 100644 index 00000000..dc6f108e --- /dev/null +++ b/regression-test/suites/db_sync/column/alter_type/test_ds_col_alter_type.groovy @@ -0,0 +1,121 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_col_alter_type") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def value_is_big_int = { res -> Boolean + // Field == 'value' && 'Type' == 'bigint' + return res[2][0] == 'value' && res[2][1] == 'bigint' + } + + def id_is_big_int = { res -> Boolean + // Field == 'id' && 'Type' == 'bigint' + return res[1][0] == 'id' && res[1][1] == 'bigint' + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: add key column type ===") + + sql """ + ALTER TABLE ${tableName} + MODIFY COLUMN `id` BIGINT KEY + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", id_is_big_int, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", id_is_big_int, 60, "target_sql")) + + logger.info("=== Test 2: alter value column type ===") + + sql """ + ALTER TABLE ${tableName} + MODIFY COLUMN `value` BIGINT + """ + sql "sync" + + logger.info("=== Test 2: Check column type ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_is_big_int, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_is_big_int, 60, "target_sql")) +} + diff --git a/regression-test/suites/db_sync/column/basic/test_ds_col_basic.groovy b/regression-test/suites/db_sync/column/basic/test_ds_col_basic.groovy new file mode 100644 index 00000000..7c7039b5 --- /dev/null +++ b/regression-test/suites/db_sync/column/basic/test_ds_col_basic.groovy @@ -0,0 +1,204 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_col_basic") { + // 1. add first key column + // 2. add last key column + // 3. add value column + // 4. add last value column + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + first_job_progress = helper.get_job_progress() + + logger.info("=== Test 1: add first column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + logger.info("=== Test 2: add column after last key ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 11049, + // "tableId": 11058, + // "tableName": "tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId": 11100, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `last` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `id`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last` INT KEY DEFAULT "0" AFTER `id` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + def has_column_last = { res -> Boolean + // Field == 'last' && 'Key' == 'YES' + return res[3][0] == 'last' && (res[3][3] == 'YES' || res[3][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last, 60, "target_sql")) + + logger.info("=== Test 3: add value column after last key ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11058, + // "indexSchemaMap": { + // "11101": [...] + // }, + // "indexes": [], + // "jobId": 11117, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first_value` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `last`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first_value` INT DEFAULT "0" AFTER `last` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(3), 30)) + + def has_column_first_value = { res -> Boolean + // Field == 'first_value' && 'Key' == 'NO' + return res[4][0] == 'first_value' && (res[4][3] == 'NO' || res[4][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first_value, 60, "target_sql")) + + logger.info("=== Test 4: add value column last ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11150, + // "indexSchemaMap": { + // "11180": [] + // }, + // "indexes": [], + // "jobId": 11197, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column5f9a63de97fc4b5fb7a001f778dd180d` ADD COLUMN `last_value` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `value`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last_value` INT DEFAULT "0" AFTER `value` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(4), 30)) + + def has_column_last_value = { res -> Boolean + // Field == 'last_value' && 'Key' == 'NO' + return res[6][0] == 'last_value' && (res[6][3] == 'NO' || res[6][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last_value, 60, "target_sql")) + + // no full sync triggered. + last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/db_sync/column/drop_key/test_ds_col_drop_key_col.groovy b/regression-test/suites/db_sync/column/drop_key/test_ds_col_drop_key_col.groovy new file mode 100644 index 00000000..a02c0f5a --- /dev/null +++ b/regression-test/suites/db_sync/column/drop_key/test_ds_col_drop_key_col.groovy @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_col_drop_key") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def id_column_not_exists = { res -> Boolean + def not_exists = true + for (int i = 0; i < res.size(); i++) { + if (res[i][0] == 'id') { + not_exists = false + } + } + return not_exists + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + logger.info("=== Test 1: add data and sync create ===") + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate() + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 2: drop key column ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` DROP COLUMN `id`" + // } + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `id` + """ + sql "sync" + + logger.info("=== Test 2: Check key column ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", id_column_not_exists, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", id_column_not_exists, 60, "target_sql")) +} + diff --git a/regression-test/suites/db_sync/column/drop_val/test_ds_col_drop_val_col.groovy b/regression-test/suites/db_sync/column/drop_val/test_ds_col_drop_val_col.groovy new file mode 100644 index 00000000..d893edda --- /dev/null +++ b/regression-test/suites/db_sync/column/drop_val/test_ds_col_drop_val_col.groovy @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_col_drop_val") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def value_column_not_exists = { res -> Boolean + def not_exists = true + for (int i = 0; i < res.size(); i++) { + if (res[i][0] == 'value') { + not_exists = false + } + } + return not_exists + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + logger.info("=== Test 1: add data and sync create ===") + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate() + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 2: drop value column ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11415, + // "indexSchemaMap": { + // "11433": [ + // { + // "name": "test", + // "type": { + // "clazz": "ScalarType", + // "type": "INT", + // "len": -1, + // "precision": 0, + // "scale": 0 + // }, + // "isAggregationTypeImplicit": false, + // "isKey": true, + // "isAllowNull": true, + // "isAutoInc": false, + // "autoIncInitValue": -1, + // "comment": "", + // "stats": { + // "avgSerializedSize": -1.0, + // "maxSize": -1, + // "numDistinctValues": -1, + // "numNulls": -1 + // }, + // "children": [], + // "visible": true, + // "uniqueId": 0, + // "clusterKeyId": -1, + // "hasOnUpdateDefaultValue": false, + // "gctt": [] + // } + // ] + // }, + // "indexes": [], + // "jobId": 11444, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_drop_columnc84979beb0484120a5057fb2a3eeee6b` DROP COLUMN `value`" + // } + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `value` + """ + sql "sync" + + logger.info("=== Test 2: Check value column ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_column_not_exists, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_column_not_exists, 60, "target_sql")) +} + diff --git a/regression-test/suites/db_sync/column/order_by/test_ds_col_order_by.groovy b/regression-test/suites/db_sync/column/order_by/test_ds_col_order_by.groovy new file mode 100644 index 00000000..9ce181ec --- /dev/null +++ b/regression-test/suites/db_sync/column/order_by/test_ds_col_order_by.groovy @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_col_order_by") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def key_columns_order = { res -> Boolean + return res[0][0] == 'id' && (res[0][3] == 'YES' || res[0][3] == 'true') && + res[1][0] == 'test' && (res[1][3] == 'YES' || res[1][3] == 'true') && + res[2][0] == 'value1' && (res[2][3] == 'NO' || res[2][3] == 'false') && + res[3][0] == 'value' && (res[3][3] == 'NO' || res[3][3] == 'false') + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT, + `value1` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + logger.info("=== Test 1: add data and sync create ===") + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate() + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 2: order by column case ===") + + sql """ + ALTER TABLE ${tableName} + ORDER BY (`id`, `test`, `value1`, `value`) + """ + sql "sync" + + logger.info("=== Test 3: Check ordered column ===") + + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + exist, 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", key_columns_order, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", key_columns_order, 60, "target_sql")) +} + diff --git a/regression-test/suites/db_sync/column/rename/test_ds_col_rename.groovy b/regression-test/suites/db_sync/column/rename/test_ds_col_rename.groovy new file mode 100644 index 00000000..590a1c6d --- /dev/null +++ b/regression-test/suites/db_sync/column/rename/test_ds_col_rename.groovy @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_col_rename") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "test_ds_col_rename_tbl" + def newColName = 'test_ds_col_rename_new_col' + def oldColName = 'test_ds_col_rename_old_col' + def test_num = 0 + def insert_num = 5 + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def has_column = { column -> + return { res -> Boolean + res[0][0] == column + } + } + + def not_has_column = { column -> + return { res -> Boolean + res[0][0] != column + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + ${oldColName} INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(${oldColName}, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + + result = sql "select * from ${tableName}" + + assertEquals(result.size(), insert_num) + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: Check old column exist and new column not exist ===") + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(oldColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(newColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(oldColName), 60, "target_sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(newColName), 60, "target_sql")) + + logger.info("=== Test 2: Alter table rename column and insert data ===") + + sql "ALTER TABLE ${dbName}.${tableName} RENAME COLUMN ${oldColName} ${newColName} " + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(newColName), 60, "sql")) + + values = []; + for (int index = insert_num; index < insert_num * 2; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + logger.info("=== Test 3: Check old column not exist and new column exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(oldColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(newColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(oldColName), 60, "target_sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(newColName), 60, "target_sql")) + + logger.info("=== Test 4: Check inserted data ===") + + result = sql " select * from ${tableName} " + + result_target = target_sql " select * from ${tableName} " + + assertEquals(result, result_target) + +} + diff --git a/regression-test/suites/db_sync/common/test_ds_common.groovy b/regression-test/suites/db_sync/common/test_ds_common.groovy new file mode 100644 index 00000000..bcddb664 --- /dev/null +++ b/regression-test/suites/db_sync/common/test_ds_common.groovy @@ -0,0 +1,297 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_common") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.')) { + logger.info("2.0 not support AUTO PARTITION, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def test_num = 0 + def insert_num = 5 + def date_num = "2021-01-02" + + def createUniqueTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `date_time` date NOT NULL + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`, `date_time`) + AUTO PARTITION BY RANGE (date_trunc(`date_time`, 'day')) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "estimate_partition_size" = "10G", + "binlog.enable" = "true" + ) + """ + } + def createAggergateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `date_time` date NOT NULL, + `last` INT REPLACE DEFAULT "0", + `cost` INT SUM DEFAULT "0", + `max` INT MAX DEFAULT "0", + `min` INT MIN DEFAULT "0" + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `date_time`) + AUTO PARTITION BY RANGE (date_trunc(`date_time`, 'day')) + ( + ) + DISTRIBUTED BY HASH(`test`) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "estimate_partition_size" = "10G", + "binlog.enable" = "true" + ) + """ + } + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `date_time` date NOT NULL + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`, `date_time`) + AUTO PARTITION BY RANGE (date_trunc(`date_time`, 'day')) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "estimate_partition_size" = "10G", + "binlog.enable" = "true" + ) + """ + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableUnique0 = "tbl_common_0_${suffix}" + def tableAggregate0 = "tbl_aggregate_0_${suffix}" + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + + createUniqueTable(tableUnique0) + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}') + """ + } + + createAggergateTable(tableAggregate0) + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableAggregate0} VALUES (${test_num}, '${date_num}', ${index}, ${index}, ${index}, ${index}) + """ + } + + createDuplicateTable(tableDuplicate0) + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableDuplicate0} VALUES (0, 99, '${date_num}') + """ + } + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableUnique0}", 130)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", + insert_num, 50)) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableAggregate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableAggregate0} WHERE test=${test_num}", + 1, 30)) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0} WHERE test=${test_num}", + insert_num, 30)) + + logger.info("=== Test 1: dest cluster follow source cluster case ===") + test_num = 1 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}') + """ + } + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableAggregate0} VALUES (${test_num}, '${date_num}', ${index}, ${index}, ${index}, ${index}) + """ + } + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableDuplicate0} VALUES (0, 99, '${date_num}') + """ + } + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", + insert_num, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableAggregate0} WHERE test=${test_num}", + 1, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0} WHERE test=0", + insert_num * (test_num + 1), 30)) + + + + logger.info("=== Test 2: create table case ===") + test_num = 2 + def tableUnique1 = "tbl_common_1_${suffix}" + def tableAggregate1 = "tbl_aggregate_1_${suffix}" + def tableDuplicate1 = "tbl_duplicate_1_${suffix}" + def keywordTableName = "`roles`" + + createUniqueTable(tableUnique1) + createAggergateTable(tableAggregate1) + createDuplicateTable(tableDuplicate1) + createUniqueTable(keywordTableName) + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableUnique1} VALUES (${test_num}, ${index}, '${date_num}') + """ + } + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableAggregate1} VALUES (${test_num}, '${date_num}', ${index}, ${index}, ${index}, ${index}) + """ + } + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableDuplicate1} VALUES (0, 99, '${date_num}') + """ + } + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${keywordTableName} VALUES (${test_num}, ${index}, '${date_num}') + """ + } + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableUnique1}", + exist, 30, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableUnique1} WHERE test=${test_num}", + insert_num, 30)) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableAggregate1}", + exist, 30, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableAggregate1} WHERE test=${test_num}", + 1, 30)) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableDuplicate1}", + exist, 30, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate1} WHERE test=0", + insert_num, 30)) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${keywordTableName}", + exist, 30, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${keywordTableName} WHERE test=${test_num}", + insert_num, 30)) + + logger.info("=== Test 3: drop table case ===") + sql "DROP TABLE ${tableUnique1}" + sql "DROP TABLE ${tableAggregate1}" + sql "DROP TABLE ${tableDuplicate1}" + sql "DROP TABLE ${keywordTableName}" + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE '${tableUnique1}'", + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE '${tableAggregate1}'", + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE '${tableDuplicate1}'", + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE '${keywordTableName}'", + notExist, 30, "target")) + + logger.info("=== Test 4: pause and resume ===") + helper.ccrJobPause() + + test_num = 4 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}') + """ + } + + assertTrue(!helper.checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", + insert_num, 3)) + + helper.ccrJobResume() + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", + insert_num, 30)) + + + logger.info("=== Test 5: desync job ===") + test_num = 5 + helper.ccrJobDesync() + + sleep(helper.sync_gap_time) + + def checkDesynced = {tableName -> + def res = target_sql "SHOW CREATE TABLE TEST_${context.dbName}.${tableName}" + def desynced = false + for (List row : res) { + if ((row[0] as String) == "${tableName}") { + desynced = (row[1] as String).contains("\"is_being_synced\" = \"false\"") + break + } + } + assertTrue(desynced) + } + + checkDesynced(tableUnique0) + checkDesynced(tableAggregate0) + checkDesynced(tableDuplicate0) + + + logger.info("=== Test 5: delete job ===") + test_num = 5 + helper.ccrJobDelete() + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}') + """ + } + + assertTrue(!helper.checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}", + insert_num, 5)) +} diff --git a/regression-test/suites/db_sync/dml/insert_overwrite/test_ds_dml_insert_overwrite.groovy b/regression-test/suites/db_sync/dml/insert_overwrite/test_ds_dml_insert_overwrite.groovy new file mode 100644 index 00000000..15ff805e --- /dev/null +++ b/regression-test/suites/db_sync/dml/insert_overwrite/test_ds_dml_insert_overwrite.groovy @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_dml_insert_overwrite") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.')) { + logger.info("2.0 not support INSERT OVERWRITE yet, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + // The doris has two kind of insert overwrite handle logic: leagcy and nereids. + // The first will + // 1. create temp table + // 2. insert into temp table + // 3. replace table + // The second will + // 1. create temp partitions + // 2. insert into temp partitions + // 3. replace overlap partitions + def tableName = "tbl_" + helper.randomSuffix() + def uniqueTable = "${tableName}_unique" + def test_num = 0 + def insert_num = 5 + String response + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${uniqueTable} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(id) + ( + PARTITION `p1` VALUES LESS THAN ("100"), + PARTITION `p2` VALUES LESS THAN ("200") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "binlog.ttl_seconds" = "180" + ) + """ + + sql """ + INSERT INTO ${uniqueTable} VALUES + (1, 0), + (1, 1), + (1, 2), + (1, 3), + (1, 4) + """ + sql "sync" + + // test 1: target cluster follow source cluster + logger.info("=== Test 1: backup/restore case ===") + + helper.ccrJobDelete() + helper.ccrJobCreate() + assertTrue(helper.checkRestoreFinishTimesOf("${uniqueTable}", 60)) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test = 1", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test = 1", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test = 1 ORDER BY id", 5, 60)) + + logger.info("=== Test 2: dest cluster follow source cluster case ===") + + sql """ + INSERT INTO ${uniqueTable} VALUES + (2, 0), + (2, 1), + (2, 2), + (2, 3), + (2, 4) + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test=2", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test=2", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=2", 5, 60)) + + logger.info("=== Test 3: insert overwrite source table ===") + + sql """ + INSERT OVERWRITE TABLE ${uniqueTable} VALUES + (3, 0), + (3, 1), + (3, 2), + (3, 3), + (3, 4) + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test=3", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test=2", notExist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test=3", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${uniqueTable} WHERE test=2", notExist, 60, "target")) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=3", 5, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable}", 5, 60)) +} + + diff --git a/regression-test/suites/db_sync/mv/basic/test_ds_mv_basic.groovy b/regression-test/suites/db_sync/mv/basic/test_ds_mv_basic.groovy new file mode 100644 index 00000000..cba173cd --- /dev/null +++ b/regression-test/suites/db_sync/mv/basic/test_ds_mv_basic.groovy @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_mv_basic") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + user_id BIGINT NOT NULL COMMENT "用户 ID", + name VARCHAR(20) COMMENT "用户姓名", + age INT COMMENT "用户年龄" + ) + ENGINE=OLAP + DUPLICATE KEY(user_id) + DISTRIBUTED BY HASH(user_id) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + } + + def checkRestoreRowsTimesOf = {rowSize, times -> Boolean + Boolean ret = false + while (times > 0) { + def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + if (sqlInfo.size() == rowSize) { + ret = true + break + } else if (--times > 0 && sqlInfo.size < rowSize) { + sleep(sync_gap_time) + } + } + + return ret + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + createDuplicateTable(tableDuplicate0) + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (1, "Emily", 25), + (2, "Benjamin", 35), + (3, "Olivia", 28), + (4, "Alexander", 60), + (5, "Ava", 17); + """ + + sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 5, 30)) + + logger.info("=== Test1: create view and materialized view ===") + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + sql """ + create materialized view user_id_name_${suffix} as + select user_id, name from ${tableDuplicate0}; + """ + + assertTrue(helper.checkShowTimesOf("SHOW VIEW FROM ${tableDuplicate0}", exist, 30, "target")) + assertTrue(helper.checkRestoreFinishTimesOf("view_test_${suffix}", 30)) + + explain { + sql("select user_id, name from ${tableDuplicate0}") + contains "user_id_name" + } + + logger.info("=== Test 2: delete job ===") + test_num = 5 + helper.ccrJobDelete() + + sql """ + INSERT INTO ${tableDuplicate0} VALUES (6, "Zhangsan", 31) + """ + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 5, 5)) +} + diff --git a/regression-test/suites/db_sync/partition/drop/test_ds_part_drop.groovy b/regression-test/suites/db_sync/partition/drop/test_ds_part_drop.groovy new file mode 100644 index 00000000..573d3841 --- /dev/null +++ b/regression-test/suites/db_sync/partition/drop/test_ds_part_drop.groovy @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_part_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 90 // insert into last partition + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20"), + PARTITION `${opPartitonName}_3` VALUES LESS THAN ("30"), + PARTITION `${opPartitonName}_4` VALUES LESS THAN ("40"), + PARTITION `${opPartitonName}_5` VALUES LESS THAN ("50"), + PARTITION `${opPartitonName}_6` VALUES LESS THAN ("60"), + PARTITION `${opPartitonName}_7` VALUES LESS THAN ("70"), + PARTITION `${opPartitonName}_8` VALUES LESS THAN ("80"), + PARTITION `${opPartitonName}_9` VALUES LESS THAN ("90") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: Check partitions in src before sync case ===") + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_9\" + """, + exist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_8\" + """, + exist, 30, "target")) + + // save the backup num of source cluster + def show_backup_result = sql "SHOW BACKUP" + def backup_num = show_backup_result.size() + logger.info("backups before drop partition: ${show_backup_result}") + + logger.info("=== Test 2: Insert data in valid partitions case ===") + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + + logger.info("=== Test 3: pause ===") + + helper.ccrJobPause() + + test_num = 4 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + + sql "sync" + + logger.info("=== Test 4: Drop partitions case ===") + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_9 + """ + + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_8 + """ + sql "sync" + + logger.info("=== Test 5: pause and verify ===") + + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_9\" + """, + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_8\" + """, + notExist, 30, "target")) + + show_backup_result = sql "SHOW BACKUP" + logger.info("backups after drop partition: ${show_backup_result}") + assertTrue(show_backup_result.size() == backup_num) +} + + diff --git a/regression-test/suites/db_sync/partition/drop_1/test_ds_part_drop_1.groovy b/regression-test/suites/db_sync/partition/drop_1/test_ds_part_drop_1.groovy new file mode 100644 index 00000000..76761d18 --- /dev/null +++ b/regression-test/suites/db_sync/partition/drop_1/test_ds_part_drop_1.groovy @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_part_drop_1") { + // Case description + // Insert data and drop a partition, then the ccr syncer wouldn't get the partition ids from the source cluster. + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def caseName = "usercases_cir_8537" + def tableName = "tbl_" + helper.randomSuffix() + def syncerAddress = "127.0.0.1:9190" + def test_num = 0 + def insert_num = 5 + def sync_gap_time = 5000 + String response + + sql """ + CREATE TABLE ${tableName} ( + sale_date DATE, + id INT, + product_id INT, + quantity INT, + revenue FLOAT + ) + DUPLICATE KEY(sale_date, id) + PARTITION BY RANGE(sale_date) ( + PARTITION p202001 VALUES [('2020-01-01'), ('2020-02-01')), + PARTITION p202002 VALUES [('2020-02-01'), ('2020-03-01')) + ) + DISTRIBUTED BY HASH(id) BUCKETS auto + PROPERTIES ( + "replication_num" = "1", + "binlog.enable" = "true" + ) + """ + + sql """ + INSERT INTO ${tableName} (id, product_id, sale_date, quantity, revenue) + VALUES + (3, 103, '2020-01-10', 15, 225.0), + (4, 104, '2020-01-20', 30, 450.0); + """ + + sql "sync" + + logger.info("=== 1. create ccr ===") + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + qt_sql "SELECT * FROM ${tableName} ORDER BY id" + qt_target_sql "SELECT * FROM ${tableName} ORDER BY id" + + logger.info("=== 2. pause ccr ===") + helper.ccrJobPause(tableName) + + sql """ + INSERT INTO ${tableName} (id, product_id, sale_date, quantity, revenue) + VALUES + (3, 103, '2020-01-10', 15, 225.0), + (4, 104, '2020-01-20', 30, 450.0); + """ + + sql """ + ALTER TABLE ${tableName} DROP PARTITION p202001; + """ + sql "sync" + + qt_sql "SELECT * FROM ${tableName} ORDER BY id" + qt_target_sql "SELECT * FROM ${tableName} ORDER BY id" + + logger.info("=== 3. resume ccr ===") + helper.ccrJobResume(tableName) + + qt_sql "SELECT * FROM ${tableName} ORDER BY id" + qt_target_sql "SELECT * FROM ${tableName} ORDER BY id" + + logger.info("=== 4. insert and query again ===") + sql """ + INSERT INTO ${tableName} (id, product_id, sale_date, quantity, revenue) + VALUES + (5, 105, '2020-02-20', 50, 550.0); + """ + + // FIXME(walter) sync drop partition via backup/restore + // assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 1, 30)) + + qt_sql "SELECT * FROM ${tableName} ORDER BY id" + qt_target_sql "SELECT * FROM ${tableName} ORDER BY id" +} diff --git a/regression-test/suites/db_sync/partition/recover/test_ds_part_recover.groovy b/regression-test/suites/db_sync/partition/recover/test_ds_part_recover.groovy new file mode 100644 index 00000000..44350930 --- /dev/null +++ b/regression-test/suites/db_sync/partition/recover/test_ds_part_recover.groovy @@ -0,0 +1,147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_part_recover") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "part" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("100") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: Check partitions in src before sync case ===") + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + exist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + exist, 30, "target")) + + + + logger.info("=== Test 3: Insert data in valid partitions case ===") + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + + + logger.info("=== Test 4: Drop partitions case ===") + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_1 + """ + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_2 + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + notExist, 30, "target")) + + logger.info("=== Test 4: recover partitions case ===") + sql """ + RECOVER PARTITION ${opPartitonName}_1 from ${tableName} + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + exist, 30, "target")) + sql """ + RECOVER PARTITION ${opPartitonName}_2 from ${tableName} + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + exist, 30, "target")) + + test_num = 5 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + order_qt_target_sql_content("SELECT * FROM ${tableName}") + order_qt_sql_source_content("SELECT * FROM ${tableName}") +} diff --git a/regression-test/suites/db_sync/partition/recover1/test_ds_part_recover_new.groovy b/regression-test/suites/db_sync/partition/recover1/test_ds_part_recover_new.groovy new file mode 100644 index 00000000..dca2b8ca --- /dev/null +++ b/regression-test/suites/db_sync/partition/recover1/test_ds_part_recover_new.groovy @@ -0,0 +1,147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_part_recover_new") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "part" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("100") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: Check partitions in src before sync case ===") + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + exist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + exist, 30, "target")) + + + + logger.info("=== Test 3: Insert data in valid partitions case ===") + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + + + logger.info("=== Test 4: Drop partitions case ===") + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_1 + """ + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_2 + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + notExist, 30, "target")) + + logger.info("=== Test 4: recover partitions case ===") + sql """ + RECOVER PARTITION ${opPartitonName}_1 as ${opPartitonName}_11 from ${tableName} + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_11\" + """, + exist, 30, "target")) + sql """ + RECOVER PARTITION ${opPartitonName}_2 as ${opPartitonName}_21 from ${tableName} + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_21\" + """, + exist, 30, "target")) + + test_num = 5 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + order_qt_target_sql_content("SELECT * FROM ${tableName}") + order_qt_sql_source_content("SELECT * FROM ${tableName}") +} diff --git a/regression-test/suites/db_sync/partition/rename/test_ds_part_rename.groovy b/regression-test/suites/db_sync/partition/rename/test_ds_part_rename.groovy new file mode 100644 index 00000000..ee10d478 --- /dev/null +++ b/regression-test/suites/db_sync/partition/rename/test_ds_part_rename.groovy @@ -0,0 +1,176 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_part_rename") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + // only works on 3.0.4/2.1.8/2.0.16 + if (!helper.is_version_supported([30004, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def tableName = "test_ds_rename_partition_tbl" + def test_num = 0 + def insert_num = 5 + def opPartitonNameOrigin = "partitionName_1" + def opPartitonNameNew = "partitionName_2" + + + def exist = { res -> Boolean + return res.size() != 0 + } + + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql "DROP TABLE IF EXISTS ${context.dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${context.dbName}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: Add partitions case ===") + + sql """ + ALTER TABLE ${tableName} + ADD PARTITION ${opPartitonNameOrigin} + VALUES [('0'), ('5')) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameOrigin}\" + """, + exist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + notExist, 30, "target")) + + logger.info("=== Test 2: Check new partitions not exist ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + notExist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + notExist, 30, "target")) + + logger.info("=== Test 3: Rename partitions name ===") + + sql """ + ALTER TABLE ${tableName} RENAME PARTITION ${opPartitonNameOrigin} ${opPartitonNameNew} + """ + + logger.info("=== Test 4: Check new partitions exist and origin partition not exist ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameOrigin}\" + """, + notExist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + exist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameOrigin}\" + """, + notExist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + exist, 30, "target")) + + logger.info("=== Test 5: Check new partitions key and range ===") + + show_result = target_sql_return_maparray """SHOW PARTITIONS FROM TEST_${context.dbName}.${tableName} WHERE PartitionName = \"${opPartitonNameNew}\" """ + /* + *************************** 1. row *************************** + PartitionId: 13021 + PartitionName: partitionName_2 + VisibleVersion: 1 + VisibleVersionTime: 2024-11-11 11:40:54 + State: NORMAL + PartitionKey: id + Range: [types: [INT]; keys: [0]; ..types: [INT]; keys: [5]; ) + DistributionKey: id + Buckets: 1 + ReplicationNum: 1 + StorageMedium: HDD + CooldownTime: 9999-12-31 23:59:59 + RemoteStoragePolicy: + LastConsistencyCheckTime: NULL + DataSize: 0.000 + IsInMemory: false + ReplicaAllocation: tag.location.default: 1 + IsMutable: true + SyncWithBaseTables: true + UnsyncTables: NULL + CommittedVersion: 1 + RowCount: 0 + */ + assertEquals(show_result[0].Range, "[types: [INT]; keys: [0]; ..types: [INT]; keys: [5]; )") +} diff --git a/regression-test/suites/db_sync/partition/replace/test_ds_part_replace.groovy b/regression-test/suites/db_sync/partition/replace/test_ds_part_replace.groovy new file mode 100644 index 00000000..d7333a33 --- /dev/null +++ b/regression-test/suites/db_sync/partition/replace/test_ds_part_replace.groovy @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_part_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "tbl_replace_partition_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create table ===") + tableName = "${baseTableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + logger.info("=== Add temp partition p5 ===") + + sql """ + ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p5 VALUES [("0"), ("100")) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TEMPORARY PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p5" + """, + exist, 60, "sql")) + + sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p5) VALUES (1, 50)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + TEMPORARY PARTITION (p5) + WHERE id = 50 + """, + exist, 60, "sql")) + + logger.info("=== Replace partition p2 by p5 ===") + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + notExist, 60, "target")) + + sql "ALTER TABLE ${tableName} REPLACE PARTITION (p2) WITH TEMPORARY PARTITION (p5)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + exist, 60, "target")) + + // The last restore should contains only partition p2 + def show_restore_result = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + def restore_num = show_restore_result.size() + def last_restore_result = show_restore_result[restore_num-1] + def restore_objects = last_restore_result[10] // RestoreObjs + logger.info("The restore result: ${last_restore_result}") + logger.info("The restore objects: ${restore_objects}") + + // { + // "name": "ccrp_regression_test_table_sync_test_replace_partial_p_02f747eda70e4f768afd613e074e790d_1722983645", + // "database": "regression_test_table_sync", + // "backup_time": 1722983645667, + // "content": "ALL", + // "olap_table_list": [ + // { + // "name": "test_replace_partial_p_02f747eda70e4f768afd613e074e790d", + // "partition_names": [ + // "p2" + // ] + // } + // ], + // "view_list": [], + // "odbc_table_list": [], + // "odbc_resource_list": [] + // } + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${restore_objects}" + assertTrue(object.olap_table_list[0].partition_names.size() == 1) + assertTrue(object.olap_table_list[0].partition_names[0] == "p2"); +} + diff --git a/regression-test/suites/db_sync/prop/auto_bucket/test_ds_prop_auto_bucket.groovy b/regression-test/suites/db_sync/prop/auto_bucket/test_ds_prop_auto_bucket.groovy new file mode 100644 index 00000000..49ae085f --- /dev/null +++ b/regression-test/suites/db_sync/prop/auto_bucket/test_ds_prop_auto_bucket.groovy @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_auto_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS AUTO")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/auto_compaction/test_ds_prop_auto_compaction.groovy b/regression-test/suites/db_sync/prop/auto_compaction/test_ds_prop_auto_compaction.groovy new file mode 100644 index 00000000..5fc9f33d --- /dev/null +++ b/regression-test/suites/db_sync/prop/auto_compaction/test_ds_prop_auto_compaction.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_auto_compaction") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "disable_auto_compaction" = "false" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"false\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/auto_increment/test_ds_prop_auto_increment.groovy b/regression-test/suites/db_sync/prop/auto_increment/test_ds_prop_auto_increment.groovy new file mode 100644 index 00000000..a4598719 --- /dev/null +++ b/regression-test/suites/db_sync/prop/auto_increment/test_ds_prop_auto_increment.groovy @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_auto_increment") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE ${tableName} ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `value` int(11) NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql "INSERT INTO ${tableName} (value) VALUES (${insert_num})" + } + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("`id` bigint NOT NULL AUTO_INCREMENT(1)")) + + res = sql "select * from ${tableName} order by id" + + target_res = target_sql "select * from ${tableName} order by id" + + assertEquals(target_res, res) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/binlog/test_ds_prop_binlog.groovy b/regression-test/suites/db_sync/prop/binlog/test_ds_prop_binlog.groovy new file mode 100644 index 00000000..1491f92c --- /dev/null +++ b/regression-test/suites/db_sync/prop/binlog/test_ds_prop_binlog.groovy @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_binlog") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "binlog.ttl_seconds" = "86401", + "binlog.max_bytes" = "9223372036854775806", + "binlog.max_history_nums" = "9223372036854775806" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"binlog.enable\" = \"true\"")) + assertTrue(target_res[0][1].contains("\"binlog.ttl_seconds\" = \"86401\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_bytes\" = \"9223372036854775806\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_history_nums\" = \"9223372036854775806\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/bloom_filter/test_ds_prop_bloom_filter.groovy b/regression-test/suites/db_sync/prop/bloom_filter/test_ds_prop_bloom_filter.groovy new file mode 100644 index 00000000..41501b13 --- /dev/null +++ b/regression-test/suites/db_sync/prop/bloom_filter/test_ds_prop_bloom_filter.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_bloom_filter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index' + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "bloom_filter_columns" = "test" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"bloom_filter_columns\" = \"test\"")) + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index'")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/colocate_with/test_ds_prop_colocate_with.groovy b/regression-test/suites/db_sync/prop/colocate_with/test_ds_prop_colocate_with.groovy new file mode 100644 index 00000000..29a2ded4 --- /dev/null +++ b/regression-test/suites/db_sync/prop/colocate_with/test_ds_prop_colocate_with.groovy @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_colocate_with") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "colocate_with" = "group1" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(res[0][1].contains("\"colocate_with\" = \"group1\"")) + + assertTrue(!target_res[0][1].contains("\"colocate_with\" = \"group1\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/compaction_policy/test_ds_prop_compaction_policy.groovy b/regression-test/suites/db_sync/prop/compaction_policy/test_ds_prop_compaction_policy.groovy new file mode 100644 index 00000000..e1bc9887 --- /dev/null +++ b/regression-test/suites/db_sync/prop/compaction_policy/test_ds_prop_compaction_policy.groovy @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_compaction_policy") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { res, property -> Boolean + if(!res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existCompaction = { res -> Boolean + assertTrue(checkShowResult(res, "\"compaction_policy\" = \"time_series\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"2048\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"3000\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"4000\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"6\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"2\"")) + return true + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "2048", + "time_series_compaction_file_count_threshold" = "3000", + "time_series_compaction_time_threshold_seconds" = "4000", + "time_series_compaction_empty_rowsets_threshold" = "6", + "time_series_compaction_level_threshold" = "2" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existCompaction, 60, "sql")) + +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/compression/test_ds_prop_compression.groovy b/regression-test/suites/db_sync/prop/compression/test_ds_prop_compression.groovy new file mode 100644 index 00000000..e943ac15 --- /dev/null +++ b/regression-test/suites/db_sync/prop/compression/test_ds_prop_compression.groovy @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_compression") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compression"="zstd" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"compression\" = \"ZSTD\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/dynamic_partition/test_ds_prop_dynamic_partition.groovy b/regression-test/suites/db_sync/prop/dynamic_partition/test_ds_prop_dynamic_partition.groovy new file mode 100644 index 00000000..1966ad23 --- /dev/null +++ b/regression-test/suites/db_sync/prop/dynamic_partition/test_ds_prop_dynamic_partition.groovy @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_dynamic_partition") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}_range_by_day" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}_range_by_week" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}_range_by_month" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}_range_by_day" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}_range_by_week" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}_range_by_month" + + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_range_by_day + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_range_by_week + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "WEEK", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.start_day_of_week" = "2", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ); + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_range_by_month + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "MONTH", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.start_day_of_month" = "1", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_range_by_day", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_range_by_week", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_range_by_month", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_day\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_week\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_month\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_day\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_week\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_month\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}_range_by_day" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"DAY\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableName}_range_by_week" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"WEEK\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_week\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableName}_range_by_month" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"MONTH\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_month\" = \"1\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/generated_column/test_ds_prop_generated_column.groovy b/regression-test/suites/db_sync/prop/generated_column/test_ds_prop_generated_column.groovy new file mode 100644 index 00000000..973839be --- /dev/null +++ b/regression-test/suites/db_sync/prop/generated_column/test_ds_prop_generated_column.groovy @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_generated_column") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE ${tableName} ( + product_id INT, + price DECIMAL(10,2), + quantity INT, + total_value DECIMAL(10,2) GENERATED ALWAYS AS (price * quantity) + ) DUPLICATE KEY(product_id) + DISTRIBUTED BY HASH(product_id) PROPERTIES ("replication_num" = "1") + """ + + sql """ + INSERT INTO ${tableName} VALUES(1, 10.00, 10, default); + """ + + sql """ + INSERT INTO ${tableName} (product_id, price, quantity) VALUES(1, 20.00, 10); + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("`total_value` decimal(10,2) AS ((`price` * CAST(`quantity` AS decimalv3(10,0)))) NULL")) + + target_res = target_sql_return_maparray "select * from ${tableName} order by total_value" + + assertEquals(target_res[0].total_value,100.00) + assertEquals(target_res[1].total_value,200.00) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/group_commit/test_ds_prop_group_commit.groovy b/regression-test/suites/db_sync/prop/group_commit/test_ds_prop_group_commit.groovy new file mode 100644 index 00000000..26998ba7 --- /dev/null +++ b/regression-test/suites/db_sync/prop/group_commit/test_ds_prop_group_commit.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_group_commit") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "group_commit_interval_ms" = "10000", + "group_commit_data_bytes" = "134217728" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"group_commit_interval_ms\" = \"10000\"")) + assertTrue(target_res[0][1].contains("\"group_commit_data_bytes\" = \"134217728\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/index/test_ds_prop_index.groovy b/regression-test/suites/db_sync/prop/index/test_ds_prop_index.groovy new file mode 100644 index 00000000..93547bbd --- /dev/null +++ b/regression-test/suites/db_sync/prop/index/test_ds_prop_index.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_index") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + INDEX id_idx (id) USING INVERTED COMMENT 'test_id_idx' + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_id_idx'")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/repi_alloc/test_ds_prop_repli_alloc.groovy b/regression-test/suites/db_sync/prop/repi_alloc/test_ds_prop_repli_alloc.groovy new file mode 100644 index 00000000..ac99eb00 --- /dev/null +++ b/regression-test/suites/db_sync/prop/repi_alloc/test_ds_prop_repli_alloc.groovy @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_repli_alloc") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def extractReplicationAllocation = { createTableStatement -> String + def matcher = createTableStatement[0][1] =~ /"replication_allocation" = "([^"]+)"/ + if (matcher) { + return matcher[0][1] + } + return null + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + def res_replication_allocation = extractReplicationAllocation(res) + + def target_res_replication_allocation = extractReplicationAllocation(target_res) + + assertTrue(res_replication_allocation == target_res_replication_allocation) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/row_store/test_ds_prop_row_store.groovy b/regression-test/suites/db_sync/prop/row_store/test_ds_prop_row_store.groovy new file mode 100644 index 00000000..2d507b87 --- /dev/null +++ b/regression-test/suites/db_sync/prop/row_store/test_ds_prop_row_store.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_row_store") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "row_store_columns" = "test,id", + "row_store_page_size" = "4096" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"row_store_columns\" = \"test,id\"")) + assertTrue(target_res[0][1].contains("\"row_store_page_size\" = \"4096\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/schema_change/test_ds_prop_schema_change.groovy b/regression-test/suites/db_sync/prop/schema_change/test_ds_prop_schema_change.groovy new file mode 100644 index 00000000..368a103a --- /dev/null +++ b/regression-test/suites/db_sync/prop/schema_change/test_ds_prop_schema_change.groovy @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_schema_change") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "light_schema_change" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"light_schema_change\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/seq_col/test_ds_prop_seq_col.groovy b/regression-test/suites/db_sync/prop/seq_col/test_ds_prop_seq_col.groovy new file mode 100644 index 00000000..3c9ea3cb --- /dev/null +++ b/regression-test/suites/db_sync/prop/seq_col/test_ds_prop_seq_col.groovy @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_seq_col") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName1 = "tbl_" + helper.randomSuffix() + def tableName2 = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName1}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName2}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName1}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName2}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_col" = "test" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableName2} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_type" = "int" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName1}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName2}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName2}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName2}\"", exist, 60, "target")) + + def target_res_1 = target_sql "SHOW CREATE TABLE ${tableName1}" + def target_res_2 = target_sql "SHOW CREATE TABLE ${tableName2}" + + assertTrue(target_res_1[0][1].contains("\"function_column.sequence_col\" = \"test\"")) + assertTrue(target_res_2[0][1].contains("\"function_column.sequence_type\" = \"int\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/single_compact/test_ds_prop_single_compact.groovy b/regression-test/suites/db_sync/prop/single_compact/test_ds_prop_single_compact.groovy new file mode 100644 index 00000000..c72a8dc0 --- /dev/null +++ b/regression-test/suites/db_sync/prop/single_compact/test_ds_prop_single_compact.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_single_compact") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_single_replica_compaction" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"enable_single_replica_compaction\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/storage_medium/test_ds_prop_storage_medium.groovy b/regression-test/suites/db_sync/prop/storage_medium/test_ds_prop_storage_medium.groovy new file mode 100644 index 00000000..392f7322 --- /dev/null +++ b/regression-test/suites/db_sync/prop/storage_medium/test_ds_prop_storage_medium.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_storage_medium") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_medium" = "SSD" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"storage_medium\" = \"ssd\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/storage_policy/test_ds_prop_storage_policy.groovy b/regression-test/suites/db_sync/prop/storage_policy/test_ds_prop_storage_policy.groovy new file mode 100644 index 00000000..4f4308b1 --- /dev/null +++ b/regression-test/suites/db_sync/prop/storage_policy/test_ds_prop_storage_policy.groovy @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_storage_policy") { + + logger.info("don't support this case, storage_policy can't be synchronized") + return + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + def resource_name = "test_ds_tbl_storage_policy_resource" + def policy_name= "test_ds_tbl_storage_policy" + + def check_storage_policy_exist = { name-> + def polices = sql""" + show storage policy; + """ + for (p in polices) { + if (name == p[0]) { + return true; + } + } + return false; + } + + if (check_storage_policy_exist(policy_name)) { + sql """ + DROP STORAGE POLICY ${policy_name} + """ + } + + def has_resouce = sql """ + SHOW RESOURCES WHERE NAME = "${resource_name}"; + """ + + if (has_resouce.size() > 0) { + sql """ + DROP RESOURCE ${resource_name} + """ + } + + sql """ + CREATE RESOURCE IF NOT EXISTS "${resource_name}" + PROPERTIES( + "type"="s3", + "AWS_ENDPOINT" = "${getS3Endpoint()}", + "AWS_REGION" = "${getS3Region()}", + "AWS_ROOT_PATH" = "regression/cooldown", + "AWS_ACCESS_KEY" = "${getS3AK()}", + "AWS_SECRET_KEY" = "${getS3SK()}", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "${getS3BucketName()}", + "s3_validity_check" = "true" + ); + """ + + sql """ + CREATE STORAGE POLICY IF NOT EXISTS ${policy_name} + PROPERTIES( + "storage_resource" = "${resource_name}", + "cooldown_ttl" = "300" + ) + """ + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_policy" = "${policy_name}" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + // storage_policy should't be synchronized + // def res = sql "SHOW CREATE TABLE ${tableName}" + + // def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + // assertTrue(res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) + + // assertTrue(!target_res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/tm_compact/test_ds_prop_tm_compact.groovy b/regression-test/suites/db_sync/prop/tm_compact/test_ds_prop_tm_compact.groovy new file mode 100644 index 00000000..ecbda416 --- /dev/null +++ b/regression-test/suites/db_sync/prop/tm_compact/test_ds_prop_tm_compact.groovy @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_tm_compact") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "1024", + "time_series_compaction_file_count_threshold" = "2000", + "time_series_compaction_time_threshold_seconds" = "3600", + "time_series_compaction_level_threshold" = "2" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"compaction_policy\" = \"time_series\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_goal_size_mbytes\" = \"1024\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_file_count_threshold\" = \"2000\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_time_threshold_seconds\" = \"3600\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_level_threshold\" = \"2\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/unique_key_mow/test_ds_prop_unique_key_mow.groovy b/regression-test/suites/db_sync/prop/unique_key_mow/test_ds_prop_unique_key_mow.groovy new file mode 100644 index 00000000..c92706bf --- /dev/null +++ b/regression-test/suites/db_sync/prop/unique_key_mow/test_ds_prop_unique_key_mow.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_unique_key_mow") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_unique_key_merge_on_write" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"enable_unique_key_merge_on_write\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop/variant_nested/test_ds_prop_variant_nested.groovy b/regression-test/suites/db_sync/prop/variant_nested/test_ds_prop_variant_nested.groovy new file mode 100644 index 00000000..2972395a --- /dev/null +++ b/regression-test/suites/db_sync/prop/variant_nested/test_ds_prop_variant_nested.groovy @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_variant_nested") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "variant_enable_flatten_nested" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) + + res = sql "desc ${tableName}" + + // target_res = target_sql "desc ${tableName}" + + // assertEquals(res,target_res) + + // target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + // assertTrue(!target_res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/auto_bucket/test_ds_prop_incrsync_auto_bucket.groovy b/regression-test/suites/db_sync/prop_incrsync/auto_bucket/test_ds_prop_incrsync_auto_bucket.groovy new file mode 100644 index 00000000..758289ac --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/auto_bucket/test_ds_prop_incrsync_auto_bucket.groovy @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_auto_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS AUTO")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS AUTO")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/auto_compaction/test_ds_prop_incrsync_auto_compaction.groovy b/regression-test/suites/db_sync/prop_incrsync/auto_compaction/test_ds_prop_incrsync_auto_compaction.groovy new file mode 100644 index 00000000..959417b0 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/auto_compaction/test_ds_prop_incrsync_auto_compaction.groovy @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_auto_compaction") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFullTrue = "tbl_full_true" + def tableNameFullFalse = "tbl_full_false" + def tableNameIncrementTrue = "tbl_incr_true" + def tableNameIncrementFalse = "tbl_incr_false" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFullTrue}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFullFalse}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFullTrue}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFullFalse}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrementTrue}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrementFalse}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrementTrue}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrementFalse}" + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFullTrue} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "disable_auto_compaction" = "true" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFullFalse} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "disable_auto_compaction" = "false" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFullTrue}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullTrue}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullTrue}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullFalse}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullFalse}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFullTrue}" + + assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"true\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameFullFalse}" + + assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"false\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrementTrue} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "disable_auto_compaction" = "true" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrementFalse} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "disable_auto_compaction" = "false" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementTrue}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementTrue}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementFalse}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementFalse}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrementFalse}" + + assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"false\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrementTrue}" + + assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/auto_increment/test_ds_prop_incrsync_auto_increment.groovy b/regression-test/suites/db_sync/prop_incrsync/auto_increment/test_ds_prop_incrsync_auto_increment.groovy new file mode 100644 index 00000000..f63583d8 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/auto_increment/test_ds_prop_incrsync_auto_increment.groovy @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_auto_increment") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFullDefault = "tbl_full_default" + def tableNameFull = "tbl_full" + def tableNameIncrementDefault = "tbl_incr_fault" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFullDefault}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFullDefault}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrementDefault}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrementDefault}" + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE ${tableNameFull} ( + `id` BIGINT NOT NULL AUTO_INCREMENT(100), + `value` int(11) NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE ${tableNameFullDefault} ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `value` int(11) NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql "INSERT INTO ${tableNameFull} (value) VALUES (${insert_num})" + } + sql "sync" + + for (int index = 0; index < insert_num; index++) { + sql "INSERT INTO ${tableNameFullDefault} (value) VALUES (${insert_num})" + } + sql "sync" + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullDefault}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullDefault}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("`id` bigint NOT NULL AUTO_INCREMENT(100)")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameFullDefault}" + + assertTrue(target_res[0][1].contains("`id` bigint NOT NULL AUTO_INCREMENT(1)")) + + res = sql "select * from ${tableNameFull} order by id" + + target_res = target_sql "select * from ${tableNameFull} order by id" + + assertEquals(target_res, res) + + res = sql "select * from ${tableNameFullDefault} order by id" + + target_res = target_sql "select * from ${tableNameFullDefault} order by id" + + assertEquals(target_res, res) + + sql """ + CREATE TABLE ${tableNameIncrement} ( + `id` BIGINT NOT NULL AUTO_INCREMENT(100), + `value` int(11) NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE ${tableNameIncrementDefault} ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `value` int(11) NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql "INSERT INTO ${tableNameIncrement} (value) VALUES (${insert_num})" + } + sql "sync" + + for (int index = 0; index < insert_num; index++) { + sql "INSERT INTO ${tableNameIncrementDefault} (value) VALUES (${insert_num})" + } + sql "sync" + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementDefault}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementDefault}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("`id` bigint NOT NULL AUTO_INCREMENT(100)")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrementDefault}" + + assertTrue(target_res[0][1].contains("`id` bigint NOT NULL AUTO_INCREMENT(1)")) + + res = sql "select * from ${tableNameIncrement} order by id" + + target_res = target_sql "select * from ${tableNameIncrement} order by id" + + assertEquals(target_res, res) + + res = sql "select * from ${tableNameIncrementDefault} order by id" + + target_res = target_sql "select * from ${tableNameIncrementDefault} order by id" + + assertEquals(target_res, res) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/binlog/test_ds_prop_incrsync_binlog.groovy b/regression-test/suites/db_sync/prop_incrsync/binlog/test_ds_prop_incrsync_binlog.groovy new file mode 100644 index 00000000..64fc43d0 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/binlog/test_ds_prop_incrsync_binlog.groovy @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_binlog") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "binlog.ttl_seconds" = "86401", + "binlog.max_bytes" = "9223372036854775806", + "binlog.max_history_nums" = "9223372036854775806" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"binlog.enable\" = \"true\"")) + assertTrue(target_res[0][1].contains("\"binlog.ttl_seconds\" = \"86401\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_bytes\" = \"9223372036854775806\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_history_nums\" = \"9223372036854775806\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "binlog.ttl_seconds" = "86401", + "binlog.max_bytes" = "9223372036854775806", + "binlog.max_history_nums" = "9223372036854775806" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"binlog.enable\" = \"true\"")) + assertTrue(target_res[0][1].contains("\"binlog.ttl_seconds\" = \"86401\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_bytes\" = \"9223372036854775806\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_history_nums\" = \"9223372036854775806\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/bloom_filter/test_ds_prop_incrsync_bloom_filter.groovy b/regression-test/suites/db_sync/prop_incrsync/bloom_filter/test_ds_prop_incrsync_bloom_filter.groovy new file mode 100644 index 00000000..b245c384 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/bloom_filter/test_ds_prop_incrsync_bloom_filter.groovy @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_bloom_filter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT, + INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index' + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "bloom_filter_columns" = "test" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"bloom_filter_columns\" = \"test\"")) + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index'")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT, + INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index' + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "bloom_filter_columns" = "test" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"bloom_filter_columns\" = \"test\"")) + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index'")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/colocate_with/test_ds_prop_incrsync_colocate_with.groovy b/regression-test/suites/db_sync/prop_incrsync/colocate_with/test_ds_prop_incrsync_colocate_with.groovy new file mode 100644 index 00000000..a87a8570 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/colocate_with/test_ds_prop_incrsync_colocate_with.groovy @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_colocate_with") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "colocate_with" = "group1" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableNameFull}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(res[0][1].contains("\"colocate_with\" = \"group1\"")) + + assertTrue(!target_res[0][1].contains("\"colocate_with\" = \"group1\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "colocate_with" = "group1" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + sql "SHOW CREATE TABLE ${tableNameIncrement}" + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(res[0][1].contains("\"colocate_with\" = \"group1\"")) + + assertTrue(!target_res[0][1].contains("\"colocate_with\" = \"group1\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/compaction_policy/test_ds_prop_incrsync_compaction_policy.groovy b/regression-test/suites/db_sync/prop_incrsync/compaction_policy/test_ds_prop_incrsync_compaction_policy.groovy new file mode 100644 index 00000000..8c34e4e5 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/compaction_policy/test_ds_prop_incrsync_compaction_policy.groovy @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_compaction_policy") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { res, property -> Boolean + if(!res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existCompaction = { res -> Boolean + assertTrue(checkShowResult(res, "\"compaction_policy\" = \"time_series\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"2048\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"3000\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"4000\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"6\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"2\"")) + return true + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "2048", + "time_series_compaction_file_count_threshold" = "3000", + "time_series_compaction_time_threshold_seconds" = "4000", + "time_series_compaction_empty_rowsets_threshold" = "6", + "time_series_compaction_level_threshold" = "2" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableNameFull}", existCompaction, 60, "sql")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "2048", + "time_series_compaction_file_count_threshold" = "3000", + "time_series_compaction_time_threshold_seconds" = "4000", + "time_series_compaction_empty_rowsets_threshold" = "6", + "time_series_compaction_level_threshold" = "2" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableNameIncrement}", existCompaction, 60, "sql")) + +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/compression/test_ds_prop_incrsync_compression.groovy b/regression-test/suites/db_sync/prop_incrsync/compression/test_ds_prop_incrsync_compression.groovy new file mode 100644 index 00000000..10e48dec --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/compression/test_ds_prop_incrsync_compression.groovy @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_compression") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compression"="zstd" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"compression\" = \"ZSTD\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compression"="zstd" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"compression\" = \"ZSTD\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/dynamic_partition/test_ds_prop_incrsync_dynamic_partition.groovy b/regression-test/suites/db_sync/prop_incrsync/dynamic_partition/test_ds_prop_incrsync_dynamic_partition.groovy new file mode 100644 index 00000000..e5f1944f --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/dynamic_partition/test_ds_prop_incrsync_dynamic_partition.groovy @@ -0,0 +1,343 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_dynamic_partition") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}_range_by_day" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}_range_by_week" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}_range_by_month" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}_range_by_day" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}_range_by_week" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}_range_by_month" + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}_range_by_day" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}_range_by_week" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}_range_by_month" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}_range_by_day" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}_range_by_week" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}_range_by_month" + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull}_range_by_day + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull}_range_by_week + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "WEEK", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.start_day_of_week" = "2", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ); + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull}_range_by_month + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "MONTH", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.start_day_of_month" = "1", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}_range_by_day", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}_range_by_week", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}_range_by_month", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}_range_by_day\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}_range_by_week\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}_range_by_month\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}_range_by_day\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}_range_by_week\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}_range_by_month\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}_range_by_day" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"DAY\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}_range_by_week" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"WEEK\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_week\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}_range_by_month" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"MONTH\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_month\" = \"1\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement}_range_by_day + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement}_range_by_week + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "WEEK", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.start_day_of_week" = "2", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ); + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement}_range_by_month + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "MONTH", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.start_day_of_month" = "1", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}_range_by_day\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}_range_by_week\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}_range_by_month\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}_range_by_day\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}_range_by_week\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}_range_by_month\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}_range_by_day" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"DAY\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}_range_by_week" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"WEEK\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_week\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}_range_by_month" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"MONTH\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_month\" = \"1\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/generated_column/test_ds_prop_incrsync_generated_column.groovy b/regression-test/suites/db_sync/prop_incrsync/generated_column/test_ds_prop_incrsync_generated_column.groovy new file mode 100644 index 00000000..cb645a36 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/generated_column/test_ds_prop_incrsync_generated_column.groovy @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_generated_column") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE ${tableNameFull} ( + product_id INT, + price DECIMAL(10,2), + quantity INT, + total_value DECIMAL(10,2) GENERATED ALWAYS AS (price * quantity) + ) DUPLICATE KEY(product_id) + DISTRIBUTED BY HASH(product_id) PROPERTIES ("replication_num" = "1") + """ + + sql """ + INSERT INTO ${tableNameFull} VALUES(1, 10.00, 10, default); + """ + + sql """ + INSERT INTO ${tableNameFull} (product_id, price, quantity) VALUES(1, 20.00, 10); + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableNameFull}", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableNameFull}", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("`total_value` decimal(10,2) AS ((`price` * CAST(`quantity` AS decimalv3(10,0)))) NULL")) + + target_res = target_sql_return_maparray "select * from ${tableNameFull} order by total_value" + + assertEquals(target_res[0].total_value,100.00) + assertEquals(target_res[1].total_value,200.00) + + sql """ + CREATE TABLE ${tableNameIncrement} ( + product_id INT, + price DECIMAL(10,2), + quantity INT, + total_value DECIMAL(10,2) GENERATED ALWAYS AS (price * quantity) + ) DUPLICATE KEY(product_id) + DISTRIBUTED BY HASH(product_id) PROPERTIES ("replication_num" = "1") + """ + + sql """ + INSERT INTO ${tableNameIncrement} VALUES(1, 10.00, 10, default); + """ + + sql """ + INSERT INTO ${tableNameIncrement} (product_id, price, quantity) VALUES(1, 20.00, 10); + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableNameIncrement}", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableNameIncrement}", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("`total_value` decimal(10,2) AS ((`price` * CAST(`quantity` AS decimalv3(10,0)))) NULL")) + + target_res = target_sql_return_maparray "select * from ${tableNameIncrement} order by total_value" + + assertEquals(target_res[0].total_value,100.00) + assertEquals(target_res[1].total_value,200.00) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/group_commit/test_ds_prop_incrsync_group_commit.groovy b/regression-test/suites/db_sync/prop_incrsync/group_commit/test_ds_prop_incrsync_group_commit.groovy new file mode 100644 index 00000000..30064fb6 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/group_commit/test_ds_prop_incrsync_group_commit.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_group_commit") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "group_commit_interval_ms" = "11000", + "group_commit_data_bytes" = "134217729" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"group_commit_interval_ms\" = \"11000\"")) + assertTrue(target_res[0][1].contains("\"group_commit_data_bytes\" = \"134217729\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "group_commit_interval_ms" = "11000", + "group_commit_data_bytes" = "134217729" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"group_commit_interval_ms\" = \"11000\"")) + assertTrue(target_res[0][1].contains("\"group_commit_data_bytes\" = \"134217729\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/index/test_ds_prop_incrsync_index.groovy b/regression-test/suites/db_sync/prop_incrsync/index/test_ds_prop_incrsync_index.groovy new file mode 100644 index 00000000..7fa9d79c --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/index/test_ds_prop_incrsync_index.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_index") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT, + INDEX id_idx (id) USING INVERTED COMMENT 'test_id_idx' + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_id_idx'")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT, + INDEX id_idx (id) USING INVERTED COMMENT 'test_id_idx' + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_id_idx'")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/repi_alloc/test_ds_prop_incrsync_repli_alloc.groovy b/regression-test/suites/db_sync/prop_incrsync/repi_alloc/test_ds_prop_incrsync_repli_alloc.groovy new file mode 100644 index 00000000..520dc5ff --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/repi_alloc/test_ds_prop_incrsync_repli_alloc.groovy @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_repli_alloc") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + + def exist = { res -> Boolean + return res.size() != 0 + } + + def extractReplicationAllocation = { createTableStatement -> String + def matcher = createTableStatement[0][1] =~ /"replication_allocation" = "([^"]+)"/ + if (matcher) { + return matcher[0][1] + } + return null + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableNameFull}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + def res_replication_allocation = extractReplicationAllocation(res) + + def target_res_replication_allocation = extractReplicationAllocation(target_res) + + assertTrue(res_replication_allocation == target_res_replication_allocation) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + sql "SHOW CREATE TABLE ${tableNameIncrement}" + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + res_replication_allocation = extractReplicationAllocation(res) + + target_res_replication_allocation = extractReplicationAllocation(target_res) + + assertTrue(res_replication_allocation == target_res_replication_allocation) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/row_store/test_ds_prop_incrsync_row_store.groovy b/regression-test/suites/db_sync/prop_incrsync/row_store/test_ds_prop_incrsync_row_store.groovy new file mode 100644 index 00000000..160abe46 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/row_store/test_ds_prop_incrsync_row_store.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_row_store") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "row_store_columns" = "test,id", + "row_store_page_size" = "4096" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"row_store_columns\" = \"test,id\"")) + assertTrue(target_res[0][1].contains("\"row_store_page_size\" = \"4096\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "row_store_columns" = "test,id", + "row_store_page_size" = "4096" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"row_store_columns\" = \"test,id\"")) + assertTrue(target_res[0][1].contains("\"row_store_page_size\" = \"4096\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/schema_change/test_ds_prop_incrsync_schema_change.groovy b/regression-test/suites/db_sync/prop_incrsync/schema_change/test_ds_prop_incrsync_schema_change.groovy new file mode 100644 index 00000000..51fdd1a3 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/schema_change/test_ds_prop_incrsync_schema_change.groovy @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_schema_change") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "light_schema_change" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"light_schema_change\" = \"true\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "light_schema_change" = "true" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"light_schema_change\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/seq_col/test_ds_prop_incrsync_seq_col.groovy b/regression-test/suites/db_sync/prop_incrsync/seq_col/test_ds_prop_incrsync_seq_col.groovy new file mode 100644 index 00000000..042286a7 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/seq_col/test_ds_prop_incrsync_seq_col.groovy @@ -0,0 +1,147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_seq_col") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFullF = "tbl_full_1" + def tableNameFullS = "tbl_full_2" + def tableNameIncrementF = "tbl_incr_1" + def tableNameIncrementS = "tbl_incr_2" + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFullF}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFullS}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFullF}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFullS}" + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrementF}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrementS}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrementF}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrementS}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFullF} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_col" = "test" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFullS} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_type" = "int" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFullF}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFullS}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullF}\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullS}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullF}\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFullS}\"", exist, 60, "target")) + + def target_res_1 = target_sql "SHOW CREATE TABLE ${tableNameFullF}" + def target_res_2 = target_sql "SHOW CREATE TABLE ${tableNameFullS}" + + assertTrue(target_res_1[0][1].contains("\"function_column.sequence_col\" = \"test\"")) + assertTrue(target_res_2[0][1].contains("\"function_column.sequence_type\" = \"int\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrementF} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_col" = "test" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrementS} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_type" = "int" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementF}\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementS}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementF}\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrementS}\"", exist, 60, "target")) + + target_res_1 = target_sql "SHOW CREATE TABLE ${tableNameIncrementF}" + target_res_2 = target_sql "SHOW CREATE TABLE ${tableNameIncrementS}" + + assertTrue(target_res_1[0][1].contains("\"function_column.sequence_col\" = \"test\"")) + assertTrue(target_res_2[0][1].contains("\"function_column.sequence_type\" = \"int\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/single_compact/test_ds_prop_incrsync_single_compact.groovy b/regression-test/suites/db_sync/prop_incrsync/single_compact/test_ds_prop_incrsync_single_compact.groovy new file mode 100644 index 00000000..c4043692 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/single_compact/test_ds_prop_incrsync_single_compact.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_single_compact") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_single_replica_compaction" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"enable_single_replica_compaction\" = \"true\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_single_replica_compaction" = "true" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"enable_single_replica_compaction\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/storage_medium/test_ds_prop_incrsync_storage_medium.groovy b/regression-test/suites/db_sync/prop_incrsync/storage_medium/test_ds_prop_incrsync_storage_medium.groovy new file mode 100644 index 00000000..a5766220 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/storage_medium/test_ds_prop_incrsync_storage_medium.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_storage_medium") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_medium" = "SSD" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"storage_medium\" = \"ssd\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_medium" = "SSD" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"storage_medium\" = \"ssd\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/storage_policy/test_ds_prop_incrsync_storage_policy.groovy b/regression-test/suites/db_sync/prop_incrsync/storage_policy/test_ds_prop_incrsync_storage_policy.groovy new file mode 100644 index 00000000..7df3243a --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/storage_policy/test_ds_prop_incrsync_storage_policy.groovy @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_storage_policy") { + + logger.info("don't support this case, storage_policy can't be synchronized") + return + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + def resource_name = "test_ds_tbl_storage_policy_resource" + def policy_name= "test_ds_tbl_storage_policy" + + def check_storage_policy_exist = { name-> + def polices = sql""" + show storage policy; + """ + for (p in polices) { + if (name == p[0]) { + return true; + } + } + return false; + } + + def has_resouce = sql """ + SHOW RESOURCES WHERE NAME = "${resource_name}"; + """ + + if (check_storage_policy_exist(policy_name)) { + sql """ + DROP STORAGE POLICY ${policy_name} + """ + } + + if (has_resouce.size() > 0) { + sql """ + DROP RESOURCE ${resource_name} + """ + } + + sql """ + CREATE RESOURCE IF NOT EXISTS "${resource_name}" + PROPERTIES( + "type"="s3", + "AWS_ENDPOINT" = "${getS3Endpoint()}", + "AWS_REGION" = "${getS3Region()}", + "AWS_ROOT_PATH" = "regression/cooldown", + "AWS_ACCESS_KEY" = "${getS3AK()}", + "AWS_SECRET_KEY" = "${getS3SK()}", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "${getS3BucketName()}", + "s3_validity_check" = "true" + ); + """ + + sql """ + CREATE STORAGE POLICY IF NOT EXISTS ${policy_name} + PROPERTIES( + "storage_resource" = "${resource_name}", + "cooldown_ttl" = "300" + ) + """ + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_policy" = "${policy_name}" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + // storage_policy should't be synchronized + // def res = sql "SHOW CREATE TABLE ${tableNameFull}" + + // def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + // assertTrue(res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) + + // assertTrue(!target_res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) + + if (check_storage_policy_exist(policy_name)) { + sql """ + DROP STORAGE POLICY ${policy_name} + """ + } + + if (has_resouce.size() > 0) { + sql """ + DROP RESOURCE ${resource_name} + """ + } + + sql """ + CREATE RESOURCE IF NOT EXISTS "${resource_name}" + PROPERTIES( + "type"="s3", + "AWS_ENDPOINT" = "${getS3Endpoint()}", + "AWS_REGION" = "${getS3Region()}", + "AWS_ROOT_PATH" = "regression/cooldown", + "AWS_ACCESS_KEY" = "${getS3AK()}", + "AWS_SECRET_KEY" = "${getS3SK()}", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "${getS3BucketName()}", + "s3_validity_check" = "true" + ); + """ + + sql """ + CREATE STORAGE POLICY IF NOT EXISTS ${policy_name} + PROPERTIES( + "storage_resource" = "${resource_name}", + "cooldown_ttl" = "300" + ) + """ + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_policy" = "${policy_name}" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + // storage_policy should't be synchronized + // res = sql "SHOW CREATE TABLE ${tableNameIncrement}" + + // target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + // assertTrue(res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) + + // assertTrue(!target_res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/tm_compact/test_ds_prop_incrsync_tm_compact.groovy b/regression-test/suites/db_sync/prop_incrsync/tm_compact/test_ds_prop_incrsync_tm_compact.groovy new file mode 100644 index 00000000..8faa4fd0 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/tm_compact/test_ds_prop_incrsync_tm_compact.groovy @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_tm_compact") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "1024", + "time_series_compaction_file_count_threshold" = "2000", + "time_series_compaction_time_threshold_seconds" = "3600", + "time_series_compaction_level_threshold" = "2" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableNameFull}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"compaction_policy\" = \"time_series\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_goal_size_mbytes\" = \"1024\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_file_count_threshold\" = \"2000\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_time_threshold_seconds\" = \"3600\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_level_threshold\" = \"2\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "1024", + "time_series_compaction_file_count_threshold" = "2000", + "time_series_compaction_time_threshold_seconds" = "3600", + "time_series_compaction_level_threshold" = "2" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + res = sql "SHOW CREATE TABLE ${tableNameIncrement}" + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"compaction_policy\" = \"time_series\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_goal_size_mbytes\" = \"1024\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_file_count_threshold\" = \"2000\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_time_threshold_seconds\" = \"3600\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_level_threshold\" = \"2\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/unique_key_mow/test_ds_prop_incrsync_unique_key_mow.groovy b/regression-test/suites/db_sync/prop_incrsync/unique_key_mow/test_ds_prop_incrsync_unique_key_mow.groovy new file mode 100644 index 00000000..4e05bffc --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/unique_key_mow/test_ds_prop_incrsync_unique_key_mow.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_unique_key_mow") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_unique_key_merge_on_write" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(target_res[0][1].contains("\"enable_unique_key_merge_on_write\" = \"true\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_unique_key_merge_on_write" = "true" + ) + """ + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(target_res[0][1].contains("\"enable_unique_key_merge_on_write\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/prop_incrsync/variant_nested/test_ds_prop_incrsync_variant_nested.groovy b/regression-test/suites/db_sync/prop_incrsync/variant_nested/test_ds_prop_incrsync_variant_nested.groovy new file mode 100644 index 00000000..d7a3f5b6 --- /dev/null +++ b/regression-test/suites/db_sync/prop_incrsync/variant_nested/test_ds_prop_incrsync_variant_nested.groovy @@ -0,0 +1,119 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_incrsync_incsync_variant_nested") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableNameFull = "tbl_full" + def tableNameIncrement = "tbl_incr" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameFull}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameFull}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableNameIncrement}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableNameIncrement}" + + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameFull} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "variant_enable_flatten_nested" = "true" + ) + """ + + assertTrue(helper.checkRestoreFinishTimesOf("${tableNameFull}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameFull}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableNameFull}" + + assertTrue(res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) + + res = sql "desc ${tableNameFull}" + + // target_res = target_sql "desc ${tableNameFull}" + + // assertEquals(res,target_res) + + // target_res = target_sql "SHOW CREATE TABLE ${tableNameFull}" + + // assertTrue(!target_res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) + + sql """ + CREATE TABLE if NOT EXISTS ${tableNameIncrement} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "variant_enable_flatten_nested" = "true" + ) + """ + + + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableNameIncrement}\"", exist, 60, "target")) + + res = sql "SHOW CREATE TABLE ${tableNameIncrement}" + + assertTrue(res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) + + res = sql "desc ${tableNameIncrement}" + + // target_res = target_sql "desc ${tableNameIncrement}" + + // assertEquals(res,target_res) + + // target_res = target_sql "SHOW CREATE TABLE ${tableNameIncrement}" + + // assertTrue(!target_res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/restore/test_db_sync_table_restore.groovy b/regression-test/suites/db_sync/restore/test_db_sync_table_restore.groovy new file mode 100644 index 00000000..e686e13b --- /dev/null +++ b/regression-test/suites/db_sync/restore/test_db_sync_table_restore.groovy @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_db_sync_table_restore") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "test_db_sync_backup_restore_table_1" + def newtableName = "test_db_sync_backup_restore_table_2" + def snapshotName = "test_db_sync_backup_restore_table_snapshot" + def repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + def test_num = 0 + def insert_num = 10 + def syncer = getSyncer() + def dbNameOrigin = context.dbName + def dbNameTarget = "TEST_" + context.dbName + syncer.createS3Repository(repoName) + + target_sql("DROP DATABASE IF EXISTS ${dbNameTarget}") + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${tableName}" + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${newtableName}" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + logger.info("=== Test 1: Check table not exist ===") + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newtableName}" """, notExist, 60, "sql")) + + + logger.info("=== Test 2: Backup table===") + + assertTrue(helper.checkShowTimesOf(""" select * from ${dbNameOrigin}.${tableName} """, exist, 60, "sql")) + + sql """ + BACKUP SNAPSHOT ${snapshotName} + TO `${repoName}` + ON ( ${tableName} ) + PROPERTIES ("type" = "full") + """ + + syncer.waitSnapshotFinish() + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + syncer.waitTargetRestoreFinish() + + logger.info("=== Test 3: Restore new table ===") + + sql """ + RESTORE SNAPSHOT ${snapshotName} + FROM `${repoName}` + ON (${tableName} as ${newtableName}) + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "replication_num" = "1" + ) + """ + + syncer.waitAllRestoreFinish() + + logger.info("=== Test 4: Check table ===") + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}" """, exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newtableName}" """, exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}" """, exist, 60, "target_sql")) + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newtableName}" """, exist, 60, "target_sql")) + + order_qt_sql_source_content("SELECT * FROM ${tableName}") + order_qt_target_sql_content("SELECT * FROM ${newtableName}") +} diff --git a/regression-test/suites/db_sync/restore_1/test_db_sync_table_restore1.groovy b/regression-test/suites/db_sync/restore_1/test_db_sync_table_restore1.groovy new file mode 100644 index 00000000..1dca7fc1 --- /dev/null +++ b/regression-test/suites/db_sync/restore_1/test_db_sync_table_restore1.groovy @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_db_sync_table_restore1") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "test_db_sync_backup_restore_table_1" + def snapshotName = "test_db_sync_backup_restore_table_snapshot" + def repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + def test_num = 0 + def insert_num = 3 + def syncer = getSyncer() + def dbNameOrigin = context.dbName + def dbNameTarget = "TEST_" + context.dbName + syncer.createS3Repository(repoName) + + target_sql("DROP DATABASE IF EXISTS ${dbNameTarget}") + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${tableName}" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + assertTrue(helper.checkShowTimesOf(""" select * from ${dbNameOrigin}.${tableName} """, exist, 60, "sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + order_qt_sql_source_content_backup("SELECT * FROM ${tableName}") + order_qt_target_sql_content_backup("SELECT * FROM ${tableName}") + + logger.info("=== Test 1: Backup table===") + + sql """ + BACKUP SNAPSHOT ${snapshotName} + TO `${repoName}` + ON ( ${tableName} ) + PROPERTIES ("type" = "full") + """ + + syncer.waitSnapshotFinish() + test_num = 9 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + order_qt_sql_source_content_new("SELECT * FROM ${tableName}") + order_qt_target_sql_content_new("SELECT * FROM ${tableName}") + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + syncer.waitTargetRestoreFinish() + + logger.info("=== Test 3: Restore new table ===") + + sql """ + RESTORE SNAPSHOT ${snapshotName} + FROM `${repoName}` + ON (${tableName}) + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "replication_num" = "1" + ) + """ + + syncer.waitAllRestoreFinish() + + logger.info("=== Test 4: Check table ===") + // this value should be only from backup only 3 + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", + insert_num, 30)) + order_qt_sql_source_content_restore("SELECT * FROM ${tableName}") + order_qt_target_sql_content_restore("SELECT * FROM ${tableName}") +} diff --git a/regression-test/suites/db_sync/restore_multi/test_db_sync_table_restore_multi.groovy b/regression-test/suites/db_sync/restore_multi/test_db_sync_table_restore_multi.groovy new file mode 100644 index 00000000..50750790 --- /dev/null +++ b/regression-test/suites/db_sync/restore_multi/test_db_sync_table_restore_multi.groovy @@ -0,0 +1,147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_db_sync_table_restore_multi") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "test_db_sync_backup_restore_table_1" + def tableName2 = "test_db_sync_backup_restore_table_2" + def newtableName = "test_db_sync_backup_restore_table_new_1" + def newtableName2 = "test_db_sync_backup_restore_table_new_2" + def snapshotName = "test_db_sync_backup_restore_table_snapshot" + def repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + def test_num = 0 + def insert_num = 10 + def syncer = getSyncer() + def dbNameOrigin = context.dbName + def dbNameTarget = "TEST_" + context.dbName + syncer.createS3Repository(repoName) + + target_sql("DROP DATABASE IF EXISTS ${dbNameTarget}") + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${tableName}" + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${newtableName}" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName2} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName2} VALUES (${test_num}, ${index}) + """ + } + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + logger.info("=== Test 1: Check table not exist ===") + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newtableName}" """, notExist, 60, "sql")) + + + logger.info("=== Test 2: Backup table===") + + assertTrue(helper.checkShowTimesOf(""" select * from ${dbNameOrigin}.${tableName} """, exist, 60, "sql")) + + sql """ + BACKUP SNAPSHOT ${snapshotName} + TO `${repoName}` + ON ( ${tableName}, ${tableName2}) + PROPERTIES ("type" = "full") + """ + + syncer.waitSnapshotFinish() + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + syncer.waitTargetRestoreFinish() + + logger.info("=== Test 3: Restore new table ===") + + sql """ + RESTORE SNAPSHOT ${snapshotName} + FROM `${repoName}` + ON (${tableName} as ${newtableName}, + ${tableName2} as ${newtableName2}) + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "replication_num" = "1" + ) + """ + + syncer.waitAllRestoreFinish() + + logger.info("=== Test 4: Check table ===") + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}" """, exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newtableName}" """, exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}" """, exist, 60, "target_sql")) + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${newtableName}" """, exist, 60, "target_sql")) + + order_qt_sql_source_content("SELECT * FROM ${tableName}") + order_qt_target_sql_content("SELECT * FROM ${newtableName}") + order_qt_sql_source_content("SELECT * FROM ${tableName2}") + order_qt_target_sql_content("SELECT * FROM ${newtableName2}") +} diff --git a/regression-test/suites/db_sync/table/aggregate/test_ds_tbl_aggregate.groovy b/regression-test/suites/db_sync/table/aggregate/test_ds_tbl_aggregate.groovy new file mode 100644 index 00000000..6fe3c92d --- /dev/null +++ b/regression-test/suites/db_sync/table/aggregate/test_ds_tbl_aggregate.groovy @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_aggregate") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("AGGREGATE KEY(`test`, `id`)")) +} diff --git a/regression-test/suites/db_sync/table/clean_restore/test_ds_clean_restore.groovy b/regression-test/suites/db_sync/table/clean_restore/test_ds_clean_restore.groovy new file mode 100644 index 00000000..3804944e --- /dev/null +++ b/regression-test/suites/db_sync/table/clean_restore/test_ds_clean_restore.groovy @@ -0,0 +1,199 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_clean_restore") { + // FIXME(walter) fix clean tables. + return + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 20 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql "CREATE DATABASE IF NOT EXISTS TEST_${context.dbName}" + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_3 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_3 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_2 VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_3 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_2 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_3 VALUES ${values.join(",")} """ + + def v = target_sql "SELECT * FROM ${tableName}_1" + assertEquals(v.size(), insert_num); + v = target_sql "SELECT * FROM ${tableName}_2" + assertEquals(v.size(), insert_num); + v = target_sql "SELECT * FROM ${tableName}_3" + assertEquals(v.size(), insert_num); + + sql "DROP TABLE ${tableName}_1 FORCE" + sql "ALTER TABLE ${tableName}_2 DROP PARTITION ${opPartitonName}_0 FORCE" + sql "ALTER TABLE ${tableName}_2 DROP PARTITION ${opPartitonName}_1 FORCE" + sql "sync" + + helper.ccrJobDelete() + helper.ccrJobCreate() + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_3", 60)) + + v = target_sql "SELECT * FROM ${tableName}_3" + assertTrue(v.size() == insert_num); + v = target_sql "SELECT * FROM ${tableName}_2" + assertTrue(v.size() == (insert_num-10)); + v = target_sql """ SHOW TABLES LIKE "${tableName}_1" """ + assertTrue(v.size() == 0); +} + + diff --git a/regression-test/suites/db_sync/table/create_drop/test_ds_tbl_create_drop.groovy b/regression-test/suites/db_sync/table/create_drop/test_ds_tbl_create_drop.groovy new file mode 100644 index 00000000..3349de28 --- /dev/null +++ b/regression-test/suites/db_sync/table/create_drop/test_ds_tbl_create_drop.groovy @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_tbl_create_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 10 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + logger.info("=== Test 1: Check table and backup size ===") + sql "sync" + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "target")) + + // save the backup num of source cluster + def show_backup_result = sql "SHOW BACKUP" + def backup_num = show_backup_result.size() + logger.info("backups before drop partition: ${show_backup_result}") + + logger.info("=== Test 2: Pause and create new table ===") + + helper.ccrJobPause() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + sql "sync" + + logger.info("=== Test 3: Resume and check new table ===") + + helper.ccrJobResume() + + sql "sync" + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_2" + """, + exist, 60, "target")) + + logger.info("=== Test 4: Pause and drop old table ===") + + helper.ccrJobPause() + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + + sql """ + DROP TABLE ${tableName}_1 FORCE + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "sql")) + + logger.info("=== Test 5: Resume and verify no new backups are triggered ===") + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "target")) + + show_backup_result = sql "SHOW BACKUP" + logger.info("backups after drop old table: ${show_backup_result}") + assertTrue(show_backup_result.size() == backup_num) +} diff --git a/regression-test/suites/db_sync/table/drop_create/test_ds_tbl_drop_create.groovy b/regression-test/suites/db_sync/table/drop_create/test_ds_tbl_drop_create.groovy new file mode 100644 index 00000000..76dd570c --- /dev/null +++ b/regression-test/suites/db_sync/table/drop_create/test_ds_tbl_drop_create.groovy @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_drop_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + // TBD +} diff --git a/regression-test/suites/db_sync/table/duplicate/test_ds_tbl_duplicate.groovy b/regression-test/suites/db_sync/table/duplicate/test_ds_tbl_duplicate.groovy new file mode 100644 index 00000000..14b2511b --- /dev/null +++ b/regression-test/suites/db_sync/table/duplicate/test_ds_tbl_duplicate.groovy @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_duplicate") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("DUPLICATE KEY(`test`, `id`)")) +} diff --git a/regression-test/suites/db_sync/table/part_bucket/test_ds_tbl_part_bucket.groovy b/regression-test/suites/db_sync/table/part_bucket/test_ds_tbl_part_bucket.groovy new file mode 100644 index 00000000..d0680a47 --- /dev/null +++ b/regression-test/suites/db_sync/table/part_bucket/test_ds_tbl_part_bucket.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_part_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("PARTITION BY RANGE(`id`)")) + assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 1")) +} \ No newline at end of file diff --git a/regression-test/suites/db_sync/table/recover/test_ds_tbl_drop_recover.groovy b/regression-test/suites/db_sync/table/recover/test_ds_tbl_drop_recover.groovy new file mode 100644 index 00000000..e91da172 --- /dev/null +++ b/regression-test/suites/db_sync/table/recover/test_ds_tbl_drop_recover.groovy @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_tbl_drop_recover") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_recover" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + logger.info("=== Test 1: Check table and backup size ===") + sql "sync" + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "target")) + + helper.ccrJobPause() + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + + sql """ + DROP TABLE ${tableName}_1 + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "sql")) + + logger.info("=== Test 5: Resume and verify ===") + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "target")) + + // not both source and target dont have this table. it should be in recycle bin. + // lets try recover. + helper.ccrJobPause() + sql """ + RECOVER TABLE ${tableName}_1 + """ + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "sql")) // check recovered in local + helper.ccrJobResume() + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "target")) // check recovered in target + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + def resSql = target_sql "SELECT * FROM ${tableName}_1 WHERE test=0" + def resSrcSql = sql "SELECT * FROM ${context.dbName}.${tableName}_1 WHERE test=0" + logger.info("=== {} vs {} ===", resSql.size(), resSrcSql.size()) + assertTrue(resSql.size() == resSrcSql.size()) + + test_num = 2 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1 WHERE test=${test_num}", + insert_num, 30)) + + qt_target_sql_content("SELECT * FROM ${tableName}_1") + qt_sql_source_content("SELECT * FROM ${tableName}_1") + + logger.info("=== Test 6: Drop again and try recover and insert ===") + sql """ + DROP TABLE ${tableName}_1 + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "target")) + + // not both source and target dont have this table. it should be in recycle bin. + sql """ + RECOVER TABLE ${tableName}_1 + """ + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "sql")) // check recovered in local + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "target")) // check recovered in target + + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1 WHERE test=${test_num}", + insert_num, 30)) + order_qt_target_sql_content_2("SELECT * FROM ${tableName}_1") + order_qt_sql_source_content_2("SELECT * FROM ${tableName}_1") +} diff --git a/regression-test/suites/db_sync/table/recover1/test_ds_tbl_drop_recover_new.groovy b/regression-test/suites/db_sync/table/recover1/test_ds_tbl_drop_recover_new.groovy new file mode 100644 index 00000000..11f9965b --- /dev/null +++ b/regression-test/suites/db_sync/table/recover1/test_ds_tbl_drop_recover_new.groovy @@ -0,0 +1,143 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_tbl_drop_recover_new") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_recover" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "part" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + logger.info("=== Test 1: Check table and backup size ===") + sql "sync" + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "target")) + + helper.ccrJobPause() + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + + sql """ + DROP TABLE ${tableName}_1 + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "sql")) + + logger.info("=== Test 5: Resume and verify ===") + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_1" + """, + notExist, 60, "target")) + + // not both source and target dont have this table. it should be in recycle bin. + // lets try recover. + helper.ccrJobPause() + sql """ + RECOVER TABLE ${tableName}_1 as ${tableName}_10 + """ + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_10" """, exist, 60, "sql")) // check recovered in local + helper.ccrJobResume() + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_10" """, exist, 60, "target")) // check recovered in target + + test_num = 2 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_10 VALUES (${test_num}, ${index}) + """ + } + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_10 WHERE test=${test_num}", + insert_num, 30)) + + qt_target_sql_content("SELECT * FROM ${tableName}_10") + qt_sql_source_content("SELECT * FROM ${tableName}_10") + + logger.info("=== Test 6: Drop again and try recover and insert ===") + sql """ + DROP TABLE ${tableName}_10 + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_10" + """, + notExist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW TABLES LIKE "${tableName}_10" + """, + notExist, 60, "target")) + + // not both source and target dont have this table. it should be in recycle bin. + sql """ + RECOVER TABLE ${tableName}_10 as ${tableName}_100 + """ + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_100" """, exist, 60, "sql")) // check recovered in local + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_100" """, exist, 60, "target")) // check recovered in target + + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_100 VALUES (${test_num}, ${index}) + """ + } + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_100 WHERE test=${test_num}", + insert_num, 30)) + order_qt_target_sql_content_2("SELECT * FROM ${tableName}_100") + order_qt_sql_source_content_2("SELECT * FROM ${tableName}_100") +} diff --git a/regression-test/suites/db_sync/table/recover2/test_ds_tbl_drop_recover2.groovy b/regression-test/suites/db_sync/table/recover2/test_ds_tbl_drop_recover2.groovy new file mode 100644 index 00000000..1d677d57 --- /dev/null +++ b/regression-test/suites/db_sync/table/recover2/test_ds_tbl_drop_recover2.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_tbl_drop_recover2") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_recover" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.ccrJobDelete() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + + sql """ + DROP TABLE ${tableName}_1 + """ + helper.enableDbBinlog() + helper.ccrJobCreate() + int interations = 10; + for(int t = 0; t <= interations; t += 1){ + /* first iteration already deleted */ + sql """ + DROP TABLE if exists ${tableName}_1 + """ + + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, notExist, 60, "sql")) // check recovered in local + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, notExist, 60, "target")) + + sql """ + RECOVER TABLE ${tableName}_1 + """ + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "sql")) // check recovered in local + assertTrue(helper.checkShowTimesOf(""" SHOW TABLES LIKE "${tableName}_1" """, exist, 60, "target")) // check recovered in target + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + test_num = t + 10; + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + // need check restore, + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + // check in remote available. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1 WHERE test=${test_num}", + insert_num, 30)) + + } + order_qt_target_sql_content_2("SELECT * FROM ${tableName}_1") + qt_sql_source_content_2("SELECT * FROM ${tableName}_1") +} diff --git a/regression-test/suites/db_sync/table/recover3/test_ds_tbl_drop_recover3.groovy b/regression-test/suites/db_sync/table/recover3/test_ds_tbl_drop_recover3.groovy new file mode 100644 index 00000000..ad042fec --- /dev/null +++ b/regression-test/suites/db_sync/table/recover3/test_ds_tbl_drop_recover3.groovy @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_tbl_drop_recover3") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_recover" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.disableDbBinlog(); + helper.ccrJobDelete() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "false" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + + sql """ + DROP TABLE ${tableName}_1 + """ + helper.enableDbBinlog() + helper.ccrJobCreate() + int interations = 10; + for(int t = 0; t <= interations; t += 1){ + /* first iteration already deleted */ + sql """ + DROP TABLE if exists ${tableName}_1 + """ + sql """ + RECOVER TABLE ${tableName}_1 + """ + test_num = t + 10; + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName}_1 VALUES (${test_num}, ${index}) + """ + } + } + // before validate, lets see restore is ok or not in target. + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1",36, 30)) + + order_qt_target_sql_content_2("SELECT * FROM ${tableName}_1") + qt_sql_source_content_2("SELECT * FROM ${tableName}_1") +} diff --git a/regression-test/suites/db_sync/table/rename/test_ds_tbl_rename.groovy b/regression-test/suites/db_sync/table/rename/test_ds_tbl_rename.groovy new file mode 100644 index 00000000..ec3fa72c --- /dev/null +++ b/regression-test/suites/db_sync/table/rename/test_ds_tbl_rename.groovy @@ -0,0 +1,121 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_rename") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.') || versions[0].Value.contains('doris-2.1.')) { + logger.info("2.0/2.1 not support this case, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 10 + def opPartitonName = "less" + def new_rollup_name = "rn_new" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def hasRollupFull = { res -> Boolean + for (List row : res) { + if ((row[0] as String) == "${new_rollup_name}") { + return true + } + } + return false + } + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + `id` int, + `no` int, + `name` varchar(10) + ) ENGINE = olap + UNIQUE KEY(`id`, `no`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1", + "binlog.enable" = "true", + "enable_unique_key_merge_on_write" = "false" + ); + """ + sql """ INSERT INTO ${tableName} VALUES (2, 1, 'b') """ + sql """ ALTER TABLE ${tableName} ADD ROLLUP rn (no, id) """ + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}_1 + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费" + ) ENGINE = olap + AGGREGATE KEY(`user_id`, `date`) + PARTITION BY RANGE (`date`) + ( + PARTITION `p201701` VALUES LESS THAN ("2017-02-01"), + PARTITION `p201702` VALUES LESS THAN ("2017-03-01"), + PARTITION `p201703` VALUES LESS THAN ("2017-04-01") + ) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 2 + PROPERTIES ("replication_num" = "1", "binlog.enable" = "true"); + """ + helper.enableDbBinlog() + sql """ INSERT INTO ${tableName}_1 VALUES (1, '2017-03-30', 1), (2, '2017-03-29', 2), (3, '2017-03-28', 1) """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_1", 60)) + + logger.info("=== Test 0: Db sync ===") + sql "sync" + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} ", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}_1 ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 1, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1", 3, 30)) + + logger.info("=== Test 1: Rename rollup case ===") + sql "ALTER TABLE ${tableName} RENAME ROLLUP rn ${new_rollup_name}; " + sql "sync" + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} ", 1, 30)) + assertTrue(helper.checkShowTimesOf("""desc ${tableName} all """, hasRollupFull, 60, "target")) + + logger.info("=== Test 2: Rename partition case ===") + sql "ALTER TABLE ${tableName}_1 RENAME PARTITION p201702 p201702_new " + sql "sync" + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}_1 ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}_1 p201702_new ", 3, 30)) + + logger.info("=== Test 3: Rename table case ===") + def newTableName = "NEW_${tableName}" + sql "ALTER TABLE ${tableName} RENAME ${newTableName}" + sql "sync" + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${newTableName} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName} WHERE id = 2", 1, 30)) +} + diff --git a/regression-test/suites/db_sync/table/rename_dep/test_ds_tbl_rename_dep.groovy b/regression-test/suites/db_sync/table/rename_dep/test_ds_tbl_rename_dep.groovy new file mode 100644 index 00000000..35cad0a6 --- /dev/null +++ b/regression-test/suites/db_sync/table/rename_dep/test_ds_tbl_rename_dep.groovy @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_rename_dep") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 10 + def opPartitonName = "less" + def new_rollup_name = "rn_new" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + `id` int, + `no` int, + `name` varchar(10) + ) ENGINE = olap + UNIQUE KEY(`id`, `no`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1", + "binlog.enable" = "true", + "enable_unique_key_merge_on_write" = "false" + ); + """ + sql """ INSERT INTO ${tableName} VALUES (2, 1, 'b') """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 1, 30)) + + logger.info("=== Test 1: Rename table case ===") + + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + sql "INSERT INTO ${tableName} VALUES (3, 1, 'c')" + def newTableName = "NEW_${tableName}" + sql "ALTER TABLE ${tableName} RENAME ${newTableName}" + sql "INSERT INTO ${newTableName} VALUES (4, 1, 'd')" + sql "sync" + helper.ccrJobResume() + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${newTableName}\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${newTableName} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName} WHERE id = 3", 1, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName} WHERE id = 4", 1, 30)) + + def last_job_progress = helper.get_job_progress() + if (first_job_progress.full_sync_start_at != last_job_progress.full_sync_start_at) { + logger.error("full sync should not be triggered") + assertTrue(false) + } +} + diff --git a/regression-test/suites/db_sync/table/replace/test_ds_tbl_replace.groovy b/regression-test/suites/db_sync/table/replace/test_ds_tbl_replace.groovy new file mode 100644 index 00000000..fc71c9a4 --- /dev/null +++ b/regression-test/suites/db_sync/table/replace/test_ds_tbl_replace.groovy @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== replace with swap ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"true\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + sql "INSERT INTO ${newTableName} VALUES (4, 400)" // o:n, 3:7 + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName}", 7, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + + logger.info(" ==== replace without swap ==== ") + + helper.ccrJobPause() + + sql "INSERT INTO ${newTableName} VALUES (5, 500), (500, 5)" // o:n, 3:9 + sql "INSERT INTO ${oldTableName} VALUES (5, 500), (500, 5)" // o:n, 5:9 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 9:0 + sql "INSERT INTO ${oldTableName} VALUES (6, 600)" // o:n, 10:0 + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 10, 60)) + + // new table are dropped + v = target_sql """ SHOW TABLES LIKE "${newTableName}" """ + assertTrue(v.size() == 0); + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/db_sync/table/replace_different/test_ds_tbl_replace_different.groovy b/regression-test/suites/db_sync/table/replace_different/test_ds_tbl_replace_different.groovy new file mode 100644 index 00000000..0a25f068 --- /dev/null +++ b/regression-test/suites/db_sync/table/replace_different/test_ds_tbl_replace_different.groovy @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_replace_different") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + logger.info("replace table with different partition range") + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p100` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== replace without swap ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + + // new table are dropped + v = target_sql """ SHOW TABLES LIKE "${newTableName}" """ + assertTrue(v.size() == 0); + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/db_sync/table/res_agg_state/test_ds_tbl_res_agg_state.groovy b/regression-test/suites/db_sync/table/res_agg_state/test_ds_tbl_res_agg_state.groovy new file mode 100644 index 00000000..8f17bb1d --- /dev/null +++ b/regression-test/suites/db_sync/table/res_agg_state/test_ds_tbl_res_agg_state.groovy @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ds_tbl_res_agg_state") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def aggTableName = "agg_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "store_row_column" = "true", + "binlog.enable" = "true" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: Create table with agg state ===") + sql """set enable_agg_state=true""" + sql """ + create table ${aggTableName} ( + k1 int null, + k2 agg_state generic, + k3 agg_state generic + ) + aggregate key (k1) + distributed BY hash(k1) buckets 3 + properties("replication_num" = "1"); + """ + + assertTrue(helper.check_table_exists(aggTableName, 60)) + + sql "insert into ${aggTableName} values(1,max_by_state(3,1),group_concat_state('a'))" + sql "insert into ${aggTableName} values(1,max_by_state(2,2),group_concat_state('bb'))" + sql "insert into ${aggTableName} values(2,max_by_state(1,3),group_concat_state('ccc'))" + + assertTrue(helper.checkSelectTimesOf("select * from ${aggTableName}", 2, 60)) +} + diff --git a/regression-test/suites/db_sync/table/truncate/test_ds_tbl_truncate.groovy b/regression-test/suites/db_sync/table/truncate/test_ds_tbl_truncate.groovy new file mode 100644 index 00000000..a0834bd8 --- /dev/null +++ b/regression-test/suites/db_sync/table/truncate/test_ds_tbl_truncate.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_truncate") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "tbl_truncate_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create table ===") + def tableName = "${baseTableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("400") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.enableDbBinlog() + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + sql "INSERT INTO ${tableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 4, 60)) + + logger.info(" ==== truncate table ==== ") + def first_job_progress = helper.get_job_progress() + + helper.ccrJobPause() + + sql "INSERT INTO ${tableName} VALUES (3, 300), (300, 3)" + sql "TRUNCATE TABLE ${tableName}" + sql "INSERT INTO ${tableName} VALUES (2, 300)" + + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 1, 60)) + + logger.info(" ==== truncate partitions ==== ") + + helper.ccrJobPause() + sql "INSERT INTO ${tableName} VALUES (3, 230)" // insert into p4 + sql "INSERT INTO ${tableName} VALUES (4, 250)" // insert into p4 + sql "INSERT INTO ${tableName} VALUES (2, 350)" // insert into p5 + sql "TRUNCATE TABLE ${tableName} PARTITIONS (p5)" + helper.ccrJobResume() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 2, 60)) // p5 are truncated + + // no fullsync are triggered + def last_job_progress = helper.get_job_progress() + if (helper.is_version_supported([20107, 20016])) { // at least doris 2.1.7 and doris 2.0.16 + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) + } +} diff --git a/regression-test/suites/db_sync/table/unique/test_ds_tbl_unique.groovy b/regression-test/suites/db_sync/table/unique/test_ds_tbl_unique.groovy new file mode 100644 index 00000000..81988890 --- /dev/null +++ b/regression-test/suites/db_sync/table/unique/test_ds_tbl_unique.groovy @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_tbl_unique") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("UNIQUE KEY(`test`, `id`)")) +} diff --git a/regression-test/suites/db_sync/view/alter/test_ds_view_alter.groovy b/regression-test/suites/db_sync/view/alter/test_ds_view_alter.groovy new file mode 100644 index 00000000..af62b998 --- /dev/null +++ b/regression-test/suites/db_sync/view/alter/test_ds_view_alter.groovy @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_view_alter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + user_id BIGINT NOT NULL COMMENT "用户 ID", + name VARCHAR(20) COMMENT "用户姓名", + age INT COMMENT "用户年龄" + ) + ENGINE=OLAP + DUPLICATE KEY(user_id) + DISTRIBUTED BY HASH(user_id) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + sql """ DROP VIEW IF EXISTS view_test_${suffix} """ + sql """ DROP VIEW IF EXISTS view_test_1_${suffix} """ + createDuplicateTable(tableDuplicate0) + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (1, "Emily", 25), + (2, "Benjamin", 35), + (3, "Olivia", 28), + (4, "Alexander", 60), + (5, "Ava", 17), + (5, "Ava", 18); + """ + + sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" + + logger.info("=== Test1: create view ===") + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 6, 30)) + + sql """ + ALTER VIEW view_test_${suffix} + ( + k1, name, v1 + ) + AS + SELECT user_id as k1, name, MAX(age) FROM ${tableDuplicate0} + GROUP BY k1, name + """ + + // Since create view is synced to downstream, this insert will be sync too. + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (6, "Zhangsan", 31), + (5, "Ava", 20); + """ + sql "sync" + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 8, 50)) + def view_size = target_sql "SHOW VIEW FROM ${tableDuplicate0}" + assertTrue(view_size.size() == 1); + def show_view_result = target_sql "SHOW CREATE VIEW view_test_${suffix}" + logger.info("show view result: ${show_view_result}") + assertTrue(show_view_result[0][1].contains("MAX(")) +} + + diff --git a/regression-test/suites/db_sync/view/basic/test_ds_view_basic.groovy b/regression-test/suites/db_sync/view/basic/test_ds_view_basic.groovy new file mode 100644 index 00000000..a0d806e3 --- /dev/null +++ b/regression-test/suites/db_sync/view/basic/test_ds_view_basic.groovy @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_view_basic") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + user_id BIGINT NOT NULL COMMENT "用户 ID", + name VARCHAR(20) COMMENT "用户姓名", + age INT COMMENT "用户年龄" + ) + ENGINE=OLAP + DUPLICATE KEY(user_id) + DISTRIBUTED BY HASH(user_id) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + } + + def checkRestoreRowsTimesOf = {rowSize, times -> Boolean + Boolean ret = false + while (times > 0) { + def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + if (sqlInfo.size() == rowSize) { + ret = true + break + } else if (--times > 0 && sqlInfo.size < rowSize) { + sleep(sync_gap_time) + } + } + + return ret + } + def checkTableOrViewExists = { res, name -> Boolean + for (List row : res) { + if ("${row[0]}".equals(name)) { + return true + } + } + return false + } + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + createDuplicateTable(tableDuplicate0) + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (1, "Emily", 25), + (2, "Benjamin", 35), + (3, "Olivia", 28), + (4, "Alexander", 60), + (5, "Ava", 17); + """ + + sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 5, 30)) + + logger.info("=== Test1: create view and materialized view ===") + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + sql """ + create materialized view user_id_name_${suffix} as + select user_id, name from ${tableDuplicate0}; + """ + + def checkViewExistFunc = { res -> Boolean + return checkTableOrViewExists(res, "view_test_${suffix}") + } + assertTrue(helper.checkShowTimesOf("SHOW VIEWS", + checkViewExistFunc, 30, func = "target_sql")) + + explain { + sql("select user_id, name from ${tableDuplicate0}") + contains "user_id_name" + } + + logger.info("=== Test 2: drop view ===") + sql "DROP VIEW view_test_${suffix}" + sql "sync" + def checkViewNotExistFunc = { res -> Boolean + return !checkTableOrViewExists(res, "view_test_${suffix}") + } + assertTrue(helper.checkShowTimesOf("SHOW VIEWS", checkViewNotExistFunc, 30, func = "target_sql")) + + logger.info("=== Test 2: delete job ===") + test_num = 5 + helper.ccrJobDelete() + + sql """ + INSERT INTO ${tableDuplicate0} VALUES (6, "Zhangsan", 31) + """ + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 5, 5)) +} diff --git a/regression-test/suites/db_sync/view/drop_create/test_ds_view_drop_create.groovy b/regression-test/suites/db_sync/view/drop_create/test_ds_view_drop_create.groovy new file mode 100644 index 00000000..b8f37696 --- /dev/null +++ b/regression-test/suites/db_sync/view/drop_create/test_ds_view_drop_create.groovy @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_view_drop_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + user_id BIGINT NOT NULL COMMENT "用户 ID", + name VARCHAR(20) COMMENT "用户姓名", + age INT COMMENT "用户年龄" + ) + ENGINE=OLAP + DUPLICATE KEY(user_id) + DISTRIBUTED BY HASH(user_id) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + sql """ DROP VIEW IF EXISTS view_test_${suffix} """ + sql """ DROP VIEW IF EXISTS view_test_1_${suffix} """ + createDuplicateTable(tableDuplicate0) + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (1, "Emily", 25), + (2, "Benjamin", 35), + (3, "Olivia", 28), + (4, "Alexander", 60), + (5, "Ava", 17), + (5, "Ava", 18); + """ + + sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" + + logger.info("=== Test1: create view ===") + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 6, 30)) + + // drop the view, and create it again. + // Must be incremental sync. + sql """ + DROP VIEW view_test_${suffix} + """ + + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + // Since create view is synced to downstream, this insert will be sync too. + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (6, "Zhangsan", 31), + (5, "Ava", 20); + """ + sql "sync" + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 8, 50)) + def view_size = target_sql "SHOW VIEW FROM ${tableDuplicate0}" + assertTrue(view_size.size() == 1); + + // pause and create again, so create view will query the upstream to found table name. + helper.ccrJobPause() + + sql """ + CREATE VIEW view_test_1_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + sql """ DROP VIEW view_test_1_${suffix} """ + + helper.ccrJobResume() + + // insert will be sync. + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (6, "Zhangsan", 31), + (5, "Ava", 20); + """ + sql "sync" + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 10, 50)) + view_size = target_sql "SHOW VIEW FROM ${tableDuplicate0}" + assertTrue(view_size.size() == 1); + +} + diff --git a/regression-test/suites/db_sync/view/drop_delete_create/test_ds_view_drop_delete_create.groovy b/regression-test/suites/db_sync/view/drop_delete_create/test_ds_view_drop_delete_create.groovy new file mode 100644 index 00000000..a4bb0c4a --- /dev/null +++ b/regression-test/suites/db_sync/view/drop_delete_create/test_ds_view_drop_delete_create.groovy @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_view_drop_delete_create") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def createDuplicateTable = { tableName -> + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + user_id BIGINT NOT NULL COMMENT "用户 ID", + name VARCHAR(20) COMMENT "用户姓名", + age INT COMMENT "用户年龄" + ) + ENGINE=OLAP + DUPLICATE KEY(user_id) + DISTRIBUTED BY HASH(user_id) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def suffix = helper.randomSuffix() + def tableDuplicate0 = "tbl_duplicate_0_${suffix}" + createDuplicateTable(tableDuplicate0) + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (1, "Emily", 25), + (2, "Benjamin", 35), + (3, "Olivia", 28), + (4, "Alexander", 60), + (5, "Ava", 17), + (5, "Ava", 18); + """ + + sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" + + logger.info("=== Test1: create view ===") + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + helper.ccrJobDelete() + helper.ccrJobCreate() + + assertTrue(helper.checkRestoreFinishTimesOf("${tableDuplicate0}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 6, 30)) + + // delete this job, and recreate it again. + helper.ccrJobDelete() + + // drop the view, and create it again. + sql """ + DROP VIEW view_test_${suffix} + """ + + num_restore = helper.getRestoreRowSize(tableDuplicate0) + + helper.ccrJobCreate() + + // A new snapshot must be triggered. + assertTrue(helper.checkRestoreNumAndFinishedTimesOf("${tableDuplicate0}", num_restore + 1, 60)) + + // this create view job must be synced to downstream. + sql """ + CREATE VIEW view_test_${suffix} (k1, name, v1) + AS + SELECT user_id as k1, name, SUM(age) FROM ${tableDuplicate0} + GROUP BY k1,name; + """ + + // Since create view is synced to downstream, this insert will be sync too. + sql """ + INSERT INTO ${tableDuplicate0} VALUES + (6, "Zhangsan", 31), + (5, "Ava", 20); + """ + sql "sync" + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableDuplicate0}", 8, 50)) + def view_size = target_sql "SHOW VIEW FROM ${tableDuplicate0}" + assertTrue(view_size.size() == 1); +} + + diff --git a/regression-test/suites/syncer/ts_allow_table_exists/test_syncer_ts_allow_table_exists.groovy b/regression-test/suites/syncer/ts_allow_table_exists/test_syncer_ts_allow_table_exists.groovy new file mode 100644 index 00000000..5b411e6b --- /dev/null +++ b/regression-test/suites/syncer/ts_allow_table_exists/test_syncer_ts_allow_table_exists.groovy @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_syncer_ts_allow_tablet_exists") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.') || versions[0].Value.contains('doris-2.1')) { + logger.info("2.0/2.1 not support this case, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 20 + def opPartitonName = "less" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql "CREATE DATABASE IF NOT EXISTS TEST_${context.dbName}" + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName} VALUES ${values.join(",")} """ + + def v = target_sql "SELECT * FROM ${tableName}" + assertEquals(v.size(), insert_num); + sql "sync" + + // Since this table is not syncing, the `is_being_sycned` properties should not exists. + v = target_sql """SHOW CREATE TABLE ${tableName}""" + assertTrue(v[0][1].contains("is_being_synced\" = \"false") || !v[0][1].contains("is_being_synced")); + + helper.ccrJobCreateAllowTableExists(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + // table sync should NOT clean the exists tables in the same db!!! + v = target_sql "SELECT * FROM ${tableName}" + assertTrue(v.size() == insert_num); + v = target_sql """SHOW CREATE TABLE ${tableName}""" + assertTrue(v[0][1].contains("is_being_synced\" = \"true")); +} + + + diff --git a/regression-test/suites/table-sync/test_column_ops.groovy b/regression-test/suites/table-sync/test_column_ops.groovy deleted file mode 100644 index bbd3eb09..00000000 --- a/regression-test/suites/table-sync/test_column_ops.groovy +++ /dev/null @@ -1,201 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -suite("test_column_ops") { - - def tableName = "tbl_column_ops" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkSelectRowTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } - - def checkSelectColTimesOf = { sqlString, colSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() == 0 || tmpRes[0].size() != colSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() > 0 && tmpRes[0].size() == colSize - } - - def checkData = { data, beginCol, value -> Boolean - if (data.size() < beginCol + value.size()) { - return false - } - - for (int i = 0; i < value.size(); ++i) { - if ((data[beginCol + i]) as int != value[i]) { - return false - } - } - - return true - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def exist = { res -> Boolean - return res.size() != 0 - } - - sql "DROP TABLE IF EXISTS ${tableName}" - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `test` INT, - `id` INT - ) - ENGINE=OLAP - UNIQUE KEY(`test`, `id`) - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "binlog.enable" = "true" - ) - """ - - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableName} VALUES (${test_num}, ${index}) - """ - } - - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } - - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) - - - logger.info("=== Test 2: add column case ===") - sql """ - ALTER TABLE ${tableName} - ADD COLUMN (`cost` VARCHAR(3) DEFAULT "123") - """ - - assertTrue(checkShowTimesOf(""" - SHOW ALTER TABLE COLUMN - FROM ${context.dbName} - WHERE TableName = "${tableName}" AND State = "FINISHED" - """, - exist, 30)) - - assertTrue(checkSelectColTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 3, 30)) - - - logger.info("=== Test 3: modify column length case ===") - test_num = 3 - sql """ - ALTER TABLE ${tableName} - MODIFY COLUMN `cost` VARCHAR(4) DEFAULT "123" - """ - sql """ - INSERT INTO ${tableName} VALUES (${test_num}, 0, "8901") - """ - assertTrue(checkSelectRowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 1, 30)) - - -// logger.info("=== Test 4: modify column type case ===") -// test_num = 4 -// sql """ -// ALTER TABLE ${tableName} -// MODIFY COLUMN `cost` INT DEFAULT "123" -// """ -// assertTrue(checkRestoreFinishTimesOf("${tableName}", 1, 30)) -// -// sql """ -// INSERT INTO ${tableName} VALUES (${test_num}, 0, 23456) -// """ -// assertTrue(checkSelectRowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", -// 1, 30)) - - - logger.info("=== Test 5: drop column case ===") - sql """ - ALTER TABLE ${tableName} - DROP COLUMN `cost` - """ - assertTrue(checkSelectColTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 2, 30)) -} \ No newline at end of file diff --git a/regression-test/suites/table-sync/test_inverted_index.groovy b/regression-test/suites/table-sync/test_inverted_index.groovy deleted file mode 100644 index 6c2636dc..00000000 --- a/regression-test/suites/table-sync/test_inverted_index.groovy +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_inverted_index") { - - def tableName = "tbl_inverted_index_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = row[4] == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `test` INT, - `id` INT, - INDEX idx_id (`id`) USING INVERTED - ) - ENGINE=OLAP - DUPLICATE KEY(`test`, `id`) - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ) - """ - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableName} VALUES (${test_num}, ${index}) - """ - } - sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" - - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } - - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) - - def res = target_sql "SHOW INDEXES FROM TEST_${context.dbName}.${tableName}" - def invertIdx = false - for (List row : res) { - if ((row[2] as String) == "idx_id") { - invertIdx = (row[10] as String) == "INVERTED" - break - } - } - assertTrue(invertIdx) -} \ No newline at end of file diff --git a/regression-test/suites/table-sync/test_materialized_view.groovy b/regression-test/suites/table-sync/test_materialized_view.groovy deleted file mode 100644 index 0681edf9..00000000 --- a/regression-test/suites/table-sync/test_materialized_view.groovy +++ /dev/null @@ -1,141 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_materialized_index") { - - def tableName = "tbl_materialized_sync_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `id` INT, - `col1` INT, - `col2` INT, - `col3` INT, - `col4` INT, - ) - ENGINE=OLAP - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ) - """ - sql """ - CREATE MATERIALIZED VIEW mtr_${tableName}_full AS - SELECT id, col1, col3 FROM ${tableName} - """ - - def materializedFinished = { res -> Boolean - for (List row : res) { - if ((row[5] as String).contains("mtr_${tableName}_full")) { - return true - } - } - return false - } - assertTrue(checkShowTimesOf(""" - SHOW ALTER TABLE ROLLUP - FROM ${context.dbName} - WHERE TableName = "${tableName}" AND State = "FINISHED" - """, - materializedFinished, 30)) - sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" - - - logger.info("=== Test 1: full update rollup ===") - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } - - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) - - assertTrue(checkShowTimesOf(""" - SHOW ALTER TABLE ROLLUP - FROM ${context.dbName} - WHERE TableName = "${tableName}" AND State = "FINISHED" - """, - materializedFinished, 30, "target")) - - - logger.info("=== Test 2: incremental update rollup ===") - sql """ - CREATE MATERIALIZED VIEW ${tableName}_incr AS - SELECT id, col2, col4 FROM ${tableName} - """ - assertTrue(checkShowTimesOf(""" - SHOW ALTER TABLE ROLLUP - FROM ${context.dbName} - WHERE TableName = "${tableName}" AND State = "FINISHED" - """, - materializedFinished, 30, "target")) - -} \ No newline at end of file diff --git a/regression-test/suites/table-sync/test_row_storage.groovy b/regression-test/suites/table-sync/test_row_storage.groovy deleted file mode 100644 index 272c30fc..00000000 --- a/regression-test/suites/table-sync/test_row_storage.groovy +++ /dev/null @@ -1,171 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -suite("test_row_storage") { - - def tableName = "tbl_row_storage_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkSelectRowTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } - - def checkSelectColTimesOf = { sqlString, colSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() == 0 || tmpRes[0].size() != colSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() > 0 && tmpRes[0].size() == colSize - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def exist = { res -> Boolean - return res.size() != 0 - } - - sql "DROP TABLE IF EXISTS ${tableName}" - sql """ - CREATE TABLE if NOT EXISTS ${tableName} - ( - `test` INT, - `id` INT - ) - ENGINE=OLAP - UNIQUE KEY(`test`, `id`) - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "store_row_column" = "true", - "binlog.enable" = "true" - ) - """ - - for (int index = 0; index < insert_num; index++) { - sql """ - INSERT INTO ${tableName} VALUES (${test_num}, ${index}) - """ - } - - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } - - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) - def res = target_sql "SHOW CREATE TABLE TEST_${context.dbName}.${tableName}" - def rowStorage = false - for (List row : res) { - if ((row[0] as String) == "${tableName}") { - rowStorage = (row[1] as String).contains("\"store_row_column\" = \"true\"") - break - } - } - assertTrue(rowStorage) - - - logger.info("=== Test 2: add column case ===") - sql """ - ALTER TABLE ${tableName} - ADD COLUMN (`cost` VARCHAR(256) DEFAULT "add") - """ - - assertTrue(checkShowTimesOf(""" - SHOW ALTER TABLE COLUMN - FROM ${context.dbName} - WHERE TableName = "${tableName}" AND State = "FINISHED" - """, - exist, 30)) - - assertTrue(checkSelectColTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 3, 30)) - - - logger.info("=== Test 3: add a row ===") - test_num = 3 - sql """ - INSERT INTO ${tableName} VALUES (${test_num}, 0, "addadd") - """ - assertTrue(checkSelectRowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 1, 30)) - res = target_sql "SELECT cost FROM TEST_${context.dbName}.${tableName} WHERE test=${test_num}" - assertTrue((res[0][0] as String) == "addadd") -} \ No newline at end of file diff --git a/regression-test/suites/table_ps_inc/basic/test_tbl_ps_inc_basic.groovy b/regression-test/suites/table_ps_inc/basic/test_tbl_ps_inc_basic.groovy new file mode 100644 index 00000000..78f7c5ac --- /dev/null +++ b/regression-test/suites/table_ps_inc/basic/test_tbl_ps_inc_basic.groovy @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_tbl_ps_inc_basic") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_schema_change_partial_sync")) { + logger.info("this suite require feature_schema_change_partial_sync set to true") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + target_sql "DROP TABLE IF EXISTS ${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT SUM + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== pause job, add column and insert data") + helper.ccrJobPause(tableName) + + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + + def column = sql " SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = \"${tableName}\" " + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(column.size() + 1), 30)) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 1)" + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 2)" + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 3)" + + helper.ccrJobResume(tableName) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + logger.info("the aggregate keys inserted should be synced accurately") + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 1, 60)) + def last_record = target_sql "SELECT value FROM ${tableName} WHERE id = 123 AND test = 123" + logger.info("last record is ${last_record}") + assertTrue(last_record.size() == 1 && last_record[0][0] == 6) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + + diff --git a/regression-test/suites/table_ps_inc/cache/test_tbl_ps_inc_cache.groovy b/regression-test/suites/table_ps_inc/cache/test_tbl_ps_inc_cache.groovy new file mode 100644 index 00000000..f28cb24a --- /dev/null +++ b/regression-test/suites/table_ps_inc/cache/test_tbl_ps_inc_cache.groovy @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_tbl_ps_inc_cache") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + target_sql "DROP TABLE IF EXISTS ${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target_sql")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + + first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add first column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + + def column = sql " SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = \"${tableName}\" " + + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(column.size() + 1), 30)) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 123)" + + // cache must be clear and reload. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 1, 60)) + + // no full sync triggered. + last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + diff --git a/regression-test/suites/table_sync/alt_prop/bloom_filter/test_ts_alt_prop_bloom_filter.groovy b/regression-test/suites/table_sync/alt_prop/bloom_filter/test_ts_alt_prop_bloom_filter.groovy new file mode 100644 index 00000000..31f16fbb --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/bloom_filter/test_ts_alt_prop_bloom_filter.groovy @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_bloom_filter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existBF = { res -> Boolean + return checkShowResult(res, "\"bloom_filter_columns\" = \"test, id\"") + } + + def notExistBF = { res -> Boolean + return !checkShowResult(res, "\"bloom_filter_columns\" = \"test, id\"") + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBF, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBF, 60, "target")) + + logger.info("=== Test 2: alter table set property bloom filter columns ===") + + def state = sql """ SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = "${tableName}" AND State = "FINISHED" """ + + sql """ + ALTER TABLE ${tableName} SET ("bloom_filter_columns" = "test, id"); + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(state.size() + 1), 30)) + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existBF, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existBF, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/bucket/test_ts_alt_prop_bucket.groovy b/regression-test/suites/table_sync/alt_prop/bucket/test_ts_alt_prop_bucket.groovy new file mode 100644 index 00000000..f6904877 --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/bucket/test_ts_alt_prop_bucket.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def existOldBucket = { res -> Boolean + return res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 1") + } + + def existNewBucket = { res -> Boolean + return res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 20") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldBucket, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldBucket, 60, "target")) + + logger.info("=== Test 2: alter table set property bucket num ===") + + sql """ + ALTER TABLE ${tableName} MODIFY DISTRIBUTION DISTRIBUTED BY HASH(`id`) BUCKETS 20 + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existNewBucket, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldBucket, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/colocate/test_ts_alt_prop_colocate_with.groovy b/regression-test/suites/table_sync/alt_prop/colocate/test_ts_alt_prop_colocate_with.groovy new file mode 100644 index 00000000..d407484a --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/colocate/test_ts_alt_prop_colocate_with.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_colocate_with") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existGrooup1 = { res -> Boolean + return res[0][1].contains("\"colocate_with\" = \"test_group_1\"") + } + + def notExistGrooup1 = { res -> Boolean + return !res[0][1].contains("\"colocate_with\" = \"test_group_1\"") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistGrooup1, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistGrooup1, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + ALTER TABLE ${tableName} SET ("colocate_with" = "test_group_1") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existGrooup1, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistGrooup1, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/comment/test_ts_alt_prop_comment.groovy b/regression-test/suites/table_sync/alt_prop/comment/test_ts_alt_prop_comment.groovy new file mode 100644 index 00000000..1b549436 --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/comment/test_ts_alt_prop_comment.groovy @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_comment") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existComment = { res -> Boolean + return res[0][1].contains("COMMENT 'test_comment'") + } + + def notExistComment = { res -> Boolean + return !res[0][1].contains("COMMENT 'test_comment'") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistComment, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistComment, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + ALTER TABLE ${tableName} MODIFY COMMENT "test_comment" + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existComment, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existComment, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/compaction/test_ts_alt_prop_compaction.groovy b/regression-test/suites/table_sync/alt_prop/compaction/test_ts_alt_prop_compaction.groovy new file mode 100644 index 00000000..0d38166e --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/compaction/test_ts_alt_prop_compaction.groovy @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_compaction") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { res, property -> Boolean + if(!res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existNewCompaction = { res -> Boolean + Boolean result = checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"2048\"") && + checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"3000\"") && + checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"4000\"") && + checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"6\"") && + checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"2\"") + return result + } + + def existOldCompaction = { res -> Boolean + Boolean result = checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"1024\"") && + checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"2000\"") && + checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"3600\"") && + checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"5\"") && + checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"1\"") + return result + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldCompaction, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldCompaction, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + alter table ${tableName} set ("compaction_policy" = "time_series") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_goal_size_mbytes" = "2048") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_file_count_threshold" = "3000") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_time_threshold_seconds" = "4000") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_empty_rowsets_threshold" = "6") + """ + + sql """ + alter table ${tableName} set ("time_series_compaction_level_threshold" = "2") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existNewCompaction, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldCompaction, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/distribution_type/test_ts_alt_prop_distr_type.groovy b/regression-test/suites/table_sync/alt_prop/distribution_type/test_ts_alt_prop_distr_type.groovy new file mode 100644 index 00000000..826d04fe --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/distribution_type/test_ts_alt_prop_distr_type.groovy @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_distr_type") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existBucketNew = { res -> Boolean + return res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 20") + } + + def notExistBucketNew = { res -> Boolean + return !res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 20") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBucketNew, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBucketNew, 60, "target")) + + logger.info("=== Test 2: alter table set property distribution ===") + + sql """ + ALTER TABLE ${tableName} MODIFY DISTRIBUTION DISTRIBUTED BY HASH(id) BUCKETS 20; + """ + + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existBucketNew, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistBucketNew, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/dy_part/test_ts_alt_prop_dy_pary.groovy b/regression-test/suites/table_sync/alt_prop/dy_part/test_ts_alt_prop_dy_pary.groovy new file mode 100644 index 00000000..6ebdcdaf --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/dy_part/test_ts_alt_prop_dy_pary.groovy @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_dy_pary") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existNewPartitionProperty = { target_res -> Boolean + Boolean result = checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"WEEK\"") && + checkShowResult(target_res, "\"dynamic_partition.start\" = \"-3\"") && + checkShowResult(target_res, "\"dynamic_partition.end\" = \"3\"") && + checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"pp\"") && + checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"64\"") && + checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"1\"") && + checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"false\"") && + checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2023-01-01,2023-12-31],[2024-01-01,2024-12-31]\"") + return result + } + + def existOldPartitionProperty = { res -> Boolean + Boolean result = checkShowResult(res, "\"dynamic_partition.time_unit\" = \"DAY\"") && + checkShowResult(res, "\"dynamic_partition.start\" = \"-2\"") && + checkShowResult(res, "\"dynamic_partition.end\" = \"2\"") && + checkShowResult(res, "\"dynamic_partition.prefix\" = \"p\"") && + checkShowResult(res, "\"dynamic_partition.buckets\" = \"32\"") && + checkShowResult(res, "\"dynamic_partition.history_partition_num\" = \"2\"") && + checkShowResult(res, "\"dynamic_partition.create_history_partition\" = \"true\"") && + checkShowResult(res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"") + return result + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldPartitionProperty, 60, "sql")) + + logger.info("=== Test 2: alter table set property dynamic partition ===") + + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.time_unit" = "WEEK") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.start" = "-3") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.end" = "3") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.prefix" = "pp") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.create_history_partition" = "false") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.buckets" = "64") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.history_partition_num" = "1") + """ + sql """ + ALTER TABLE ${tableName} SET ("dynamic_partition.reserved_history_periods" = "[2023-01-01,2023-12-31],[2024-01-01,2024-12-31]") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existNewPartitionProperty, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existOldPartitionProperty, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/light_schema_change/test_ts_alt_prop_light_schema_change.groovy b/regression-test/suites/table_sync/alt_prop/light_schema_change/test_ts_alt_prop_light_schema_change.groovy new file mode 100644 index 00000000..8215bcf9 --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/light_schema_change/test_ts_alt_prop_light_schema_change.groovy @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_light_schema_change") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def lightSchemaChange = { res -> Boolean + return res[0][1].contains("\"light_schema_change\" = \"true\"") + } + + def notLightSchemaChange = { res -> Boolean + return !res[0][1].contains("\"light_schema_change\" = \"false\"") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "light_schema_change" = "false" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notLightSchemaChange, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notLightSchemaChange, 60, "target")) + + logger.info("=== Test 2: alter table set property light_schema_change ===") + + sql """ + ALTER TABLE ${tableName} SET ("light_schema_change" = "true"); + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", lightSchemaChange, 60, "sql")) + + // todo + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notLightSchemaChange, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/row_store/test_ts_alt_prop_row_store.groovy b/regression-test/suites/table_sync/alt_prop/row_store/test_ts_alt_prop_row_store.groovy new file mode 100644 index 00000000..fdaa721b --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/row_store/test_ts_alt_prop_row_store.groovy @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_row_store") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existRowStore = { res -> Boolean + if(!checkShowResult(res, "\"row_store_columns\" = \"test,id\"")) { + return false + } + if(!checkShowResult(res, "\"row_store_page_size\" = \"16384\"")) { + return false + } + return true + } + + def notExistRowStore = { res -> Boolean + if(!checkShowResult(res, "\"row_store_columns\" = \"test,id\"")) { + return true; + } + if(!checkShowResult(res, "\"row_store_page_size\" = \"16384\"")) { + return true; + } + return false + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistRowStore, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistRowStore, 60, "target")) + + logger.info("=== Test 2: alter table set property row store ===") + + def state = sql """ SHOW ALTER TABLE COLUMN FROM ${context.dbName} WHERE TableName = "${tableName}" AND State = "FINISHED" """ + + sql """ + ALTER TABLE ${tableName} SET ("store_row_column" = "true") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(state.size() + 1), 30)) + + sql """ + ALTER TABLE ${tableName} SET ("row_store_columns" = "test,id") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(state.size() + 2), 30)) + // mysql> ALTER TABLE t SET ("row_store_page_size" = "32768"); + // ERROR 1105 (HY000): errCode = 2, detailMessage = Unknown table property: [row_store_page_size] + // sql """ + // ALTER TABLE ${tableName} SET ("row_store_page_size" = "16348") + // """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existRowStore, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existRowStore, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/storage_policy/test_ts_alt_prop_stor_policy.groovy b/regression-test/suites/table_sync/alt_prop/storage_policy/test_ts_alt_prop_stor_policy.groovy new file mode 100644 index 00000000..f2e762de --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/storage_policy/test_ts_alt_prop_stor_policy.groovy @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_stor_policy") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def existPolicy = { res -> Boolean + return res[0][1].contains("\"storage_policy\" = \"test_policy\"") + } + + def notexistPolicy = { res -> Boolean + return !res[0][1].contains("\"storage_policy\" = \"test_policy\"") + } + + def resource_name = "test_ts_tbl_storage_policy_resource" + def policy_name= "test_policy" + + def check_storage_policy_exist = { name-> + def polices = sql""" + show storage policy; + """ + for (p in polices) { + if (name == p[0]) { + return true; + } + } + return false; + } + + if (check_storage_policy_exist(policy_name)) { + sql """ + DROP STORAGE POLICY ${policy_name} + """ + } + + def has_resouce = sql """ + SHOW RESOURCES WHERE NAME = "${resource_name}"; + """ + + if (has_resouce.size() > 0) { + sql """ + DROP RESOURCE ${resource_name} + """ + } + + sql """ + CREATE RESOURCE IF NOT EXISTS "${resource_name}" + PROPERTIES( + "type"="s3", + "AWS_ENDPOINT" = "${getS3Endpoint()}", + "AWS_REGION" = "${getS3Region()}", + "AWS_ROOT_PATH" = "regression/cooldown", + "AWS_ACCESS_KEY" = "${getS3AK()}", + "AWS_SECRET_KEY" = "${getS3SK()}", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "${getS3BucketName()}", + "s3_validity_check" = "true" + ); + """ + + sql """ + CREATE STORAGE POLICY IF NOT EXISTS ${policy_name} + PROPERTIES( + "storage_resource" = "${resource_name}", + "cooldown_ttl" = "300" + ) + """ + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notexistPolicy, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notexistPolicy, 60, "target")) + + logger.info("=== Test 2: alter table set property storage_policy ===") + + sql """ + ALTER TABLE ${tableName} set ("storage_policy" = "${policy_name}"); + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existPolicy, 60, "sql")) + + // don't synced + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notexistPolicy, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/alt_prop/synced/test_ts_alt_prop_synced.groovy b/regression-test/suites/table_sync/alt_prop/synced/test_ts_alt_prop_synced.groovy new file mode 100644 index 00000000..d396fef0 --- /dev/null +++ b/regression-test/suites/table_sync/alt_prop/synced/test_ts_alt_prop_synced.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_alt_prop_synced") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def existSynced = { res -> Boolean + return res[0][1].contains("\"is_being_synced\" = \"true\"") + } + + def notExistSynced = { res -> Boolean + return res[0][1].contains("\"is_being_synced\" = \"false\"") + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: check property not exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistSynced, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existSynced, 60, "target")) + + logger.info("=== Test 2: alter table set property colocate_with ===") + + sql """ + ALTER TABLE ${tableName} SET ("is_being_synced" = "false") + """ + + logger.info("=== Test 3: check property exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", notExistSynced, 60, "sql")) + + // don't sync + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existSynced, 60, "target")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/column/add/test_ts_col_add.groovy b/regression-test/suites/table_sync/column/add/test_ts_col_add.groovy new file mode 100644 index 00000000..aa9d1089 --- /dev/null +++ b/regression-test/suites/table_sync/column/add/test_ts_col_add.groovy @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_add") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add first column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + logger.info("=== Test 2: add column after last key ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 11049, + // "tableId": 11058, + // "tableName": "tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId": 11100, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `last` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `id`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last` INT KEY DEFAULT "0" AFTER `id` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + def has_column_last = { res -> Boolean + // Field == 'last' && 'Key' == 'YES' + return res[3][0] == 'last' && (res[3][3] == 'YES' || res[3][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last, 60, "target_sql")) + + logger.info("=== Test 3: add value column after last key ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11058, + // "indexSchemaMap": { + // "11101": [...] + // }, + // "indexes": [], + // "jobId": 11117, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first_value` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `last`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first_value` INT DEFAULT "0" AFTER `last` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(3), 30)) + + def has_column_first_value = { res -> Boolean + // Field == 'first_value' && 'Key' == 'NO' + return res[4][0] == 'first_value' && (res[4][3] == 'NO' || res[4][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first_value, 60, "target_sql")) + + logger.info("=== Test 4: add value column last ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11150, + // "indexSchemaMap": { + // "11180": [] + // }, + // "indexes": [], + // "jobId": 11197, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column5f9a63de97fc4b5fb7a001f778dd180d` ADD COLUMN `last_value` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `value`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last_value` INT DEFAULT "0" AFTER `value` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(4), 30)) + + def has_column_last_value = { res -> Boolean + // Field == 'last_value' && 'Key' == 'NO' + return res[6][0] == 'last_value' && (res[6][3] == 'NO' || res[6][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last_value, 60, "target_sql")) + + // no full sync triggered. + last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/table_sync/column/add_agg/test_ts_col_add_agg.groovy b/regression-test/suites/table_sync/column/add_agg/test_ts_col_add_agg.groovy new file mode 100644 index 00000000..304dc6d3 --- /dev/null +++ b/regression-test/suites/table_sync/column/add_agg/test_ts_col_add_agg.groovy @@ -0,0 +1,191 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_add_agg") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT SUM + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: add first column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + logger.info("=== Test 2: add column after last key ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 11049, + // "tableId": 11058, + // "tableName": "tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId": 11100, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `last` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `id`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last` INT KEY DEFAULT "0" AFTER `id` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + def has_column_last = { res -> Boolean + // Field == 'last' && 'Key' == 'YES' + return res[3][0] == 'last' && (res[3][3] == 'YES' || res[3][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last, 60, "target_sql")) + + logger.info("=== Test 3: add value column after last key ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11058, + // "indexSchemaMap": { + // "11101": [...] + // }, + // "indexes": [], + // "jobId": 11117, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first_value` int SUM NULL DEFAULT \"0\" COMMENT \"\" AFTER `last`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first_value` INT SUM DEFAULT "0" AFTER `last` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(3), 30)) + + def has_column_first_value = { res -> Boolean + // Field == 'first_value' && 'Key' == 'NO' + return res[4][0] == 'first_value' && (res[4][3] == 'NO' || res[4][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first_value, 60, "target_sql")) + + logger.info("=== Test 4: add value column last ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11150, + // "indexSchemaMap": { + // "11180": [] + // }, + // "indexes": [], + // "jobId": 11197, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column5f9a63de97fc4b5fb7a001f778dd180d` ADD COLUMN `last_value` int SUM NULL DEFAULT \"0\" COMMENT \"\" AFTER `value`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `last_value` INT SUM DEFAULT "0" AFTER `value` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(4), 30)) + + def has_column_last_value = { res -> Boolean + // Field == 'last_value' && 'Key' == 'NO' + return res[6][0] == 'last_value' && (res[6][3] == 'NO' || res[6][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_last_value, 60, "target_sql")) +} diff --git a/regression-test/suites/table_sync/column/add_many/test_ts_col_add_many.groovy b/regression-test/suites/table_sync/column/add_many/test_ts_col_add_many.groovy new file mode 100644 index 00000000..2c185a1a --- /dev/null +++ b/regression-test/suites/table_sync/column/add_many/test_ts_col_add_many.groovy @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_add_many") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: add column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 11049, + // "tableId": 11329, + // "tableName": "tbl_add_many_column431ed55d264646ba9bd30419a7b8f90d", + // "jobId": 11346, + // "jobState": "PENDING", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_many_column431ed55d264646ba9bd30419a7b8f90d` ADD COLUMN (`last_key` int NULL DEFAULT \"0\" COMMENT \"\", `last_value` int NULL DEFAULT \"0\" COMMENT \"\")" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN (`last_key` INT KEY DEFAULT "0", `last_value` INT DEFAULT "0") + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + exist, 30)) + + def has_columns = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + def found_last_key = false + def found_last_value = false + for (int i = 0; i < res.size(); i++) { + if (res[i][0] == 'last_key' && (res[i][3] == 'YES' || res[i][3] == 'true')) { + found_last_key = true + } + if (res[i][0] == 'last_value' && (res[i][3] == 'NO' || res[i][3] == 'false')) { + found_last_value = true + } + } + return found_last_key && found_last_value + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_columns, 60, "target_sql")) +} + diff --git a/regression-test/suites/table_sync/column/alter_type/test_ts_col_alter_type.groovy b/regression-test/suites/table_sync/column/alter_type/test_ts_col_alter_type.groovy new file mode 100644 index 00000000..d9b99aa9 --- /dev/null +++ b/regression-test/suites/table_sync/column/alter_type/test_ts_col_alter_type.groovy @@ -0,0 +1,139 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_alter_type") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def value_is_big_int = { res -> Boolean + // Field == 'value' && 'Type' == 'bigint' + return res[2][0] == 'value' && res[2][1] == 'bigint' + } + + def id_is_big_int = { res -> Boolean + // Field == 'id' && 'Type' == 'bigint' + return res[1][0] == 'id' && res[1][1] == 'bigint' + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: add key column type ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` MODIFY COLUMN `id` bigint NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + MODIFY COLUMN `id` BIGINT KEY + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", id_is_big_int, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", id_is_big_int, 60, "target_sql")) + + logger.info("=== Test 2: alter value column type ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 11049, + // "tableId": 11058, + // "tableName": "tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId": 11100, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` MODIFY COLUMN `value` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `id`" + // } + sql """ + ALTER TABLE ${tableName} + MODIFY COLUMN `value` BIGINT + """ + sql "sync" + + logger.info("=== Test 2: Check column type ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_is_big_int, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_is_big_int, 60, "target_sql")) +} + diff --git a/regression-test/suites/table_sync/column/basic/test_ts_col_basic.groovy b/regression-test/suites/table_sync/column/basic/test_ts_col_basic.groovy new file mode 100644 index 00000000..52778eea --- /dev/null +++ b/regression-test/suites/table_sync/column/basic/test_ts_col_basic.groovy @@ -0,0 +1,172 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_basic") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def checkColumnCommentTimesOf = { checkTable, expectedColComments, times -> Boolean + def res = target_sql "SHOW FULL COLUMNS FROM ${checkTable}" + while (times > 0) { + Boolean allMatch = true + for (expected in expectedColComments.entrySet()) { + Boolean oneMatch = false + for (List row : res) { + if (!oneMatch) { + oneMatch = + (row[0] as String).equals(expected.key) && (row[8] as String).equals(expected.value) + } + } + allMatch = allMatch & oneMatch + } + if (allMatch) { + return true + } else if (--times > 0) { + sleep(helper.sync_gap_time) + res = target_sql "SHOW FULL COLUMNS FROM ${checkTable}" + } + } + + return false + } + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: add column case ===") + sql """ + ALTER TABLE ${tableName} + ADD COLUMN (`cost` VARCHAR(3) DEFAULT "123") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + exist, 30)) + + def has_column = { num -> + return { res -> + res.size() > 0 && res[0].size() == num + } + } + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + has_column(3), 30)) + + logger.info("=== Test 2: modify column length case ===") + test_num = 2 + sql """ + ALTER TABLE ${tableName} + MODIFY COLUMN `cost` VARCHAR(4) DEFAULT "123" + """ + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, 0, "8901") + """ + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + 1, 30)) + + +// logger.info("=== Test 3: modify column type case ===") +// test_num = 3 +// sql """ +// ALTER TABLE ${tableName} +// MODIFY COLUMN `cost` INT DEFAULT "123" +// """ +// assertTrue(checkRestoreFinishTimesOf("${tableName}", 1, 30)) +// +// sql """ +// INSERT INTO ${tableName} VALUES (${test_num}, 0, 23456) +// """ +// assertTrue(checkSelectRowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", +// 1, 30)) + + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.') || versions[0].Value.contains('doris-2.1')) { + logger.info("2.0/2.1 not support rename column, current version is: ${versions[0].Value}") + return + } + + logger.info("=== Test 4: rename column case ===") + test_num = 4 + sql """ + ALTER TABLE ${tableName} + RENAME COLUMN `cost` `_cost` + """ + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, 0, "666") + """ + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", 1, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND _cost='666'", 1, 1)) + + if (!helper.is_version_supported([20108, 20017, 30004])) { + def version = helper.upstream_version() + logger.info("Skip the test case because the version is not supported. current version ${version}") + return + } + + logger.info("=== Test 4: modify column comment case ===") + sql """ + ALTER TABLE ${tableName} + MODIFY COLUMN `test` COMMENT 'test number', + MODIFY COLUMN `id` COMMENT 'index of one test number' + """ + assertTrue(checkColumnCommentTimesOf(tableName, + [test: "test number", id: "index of one test number", _cost: ""], 30)) + + + logger.info("=== Test 5: drop column case ===") + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `_cost` + """ + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + has_column(2), 30)) +} diff --git a/regression-test/suites/table_sync/column/drop_key/test_ts_col_drop_key.groovy b/regression-test/suites/table_sync/column/drop_key/test_ts_col_drop_key.groovy new file mode 100644 index 00000000..6aaff82b --- /dev/null +++ b/regression-test/suites/table_sync/column/drop_key/test_ts_col_drop_key.groovy @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_drop_key") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def id_column_not_exists = { res -> Boolean + def not_exists = true + for (int i = 0; i < res.size(); i++) { + if (res[i][0] == 'id') { + not_exists = false + } + } + return not_exists + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + logger.info("=== Test 1: add data and sync create ===") + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 2: drop key column ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` DROP COLUMN `id`" + // } + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `id` + """ + sql "sync" + + logger.info("=== Test 2: Check key column ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", id_column_not_exists, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", id_column_not_exists, 60, "target_sql")) +} + diff --git a/regression-test/suites/table_sync/column/drop_val/test_ts_col_drop_val.groovy b/regression-test/suites/table_sync/column/drop_val/test_ts_col_drop_val.groovy new file mode 100644 index 00000000..50951c61 --- /dev/null +++ b/regression-test/suites/table_sync/column/drop_val/test_ts_col_drop_val.groovy @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_drop_val") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def value_column_not_exists = { res -> Boolean + def not_exists = true + for (int i = 0; i < res.size(); i++) { + if (res[i][0] == 'value') { + not_exists = false + } + } + return not_exists + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + logger.info("=== Test 1: add data and sync create ===") + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 2: drop value column ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11415, + // "indexSchemaMap": { + // "11433": [ + // { + // "name": "test", + // "type": { + // "clazz": "ScalarType", + // "type": "INT", + // "len": -1, + // "precision": 0, + // "scale": 0 + // }, + // "isAggregationTypeImplicit": false, + // "isKey": true, + // "isAllowNull": true, + // "isAutoInc": false, + // "autoIncInitValue": -1, + // "comment": "", + // "stats": { + // "avgSerializedSize": -1.0, + // "maxSize": -1, + // "numDistinctValues": -1, + // "numNulls": -1 + // }, + // "children": [], + // "visible": true, + // "uniqueId": 0, + // "clusterKeyId": -1, + // "hasOnUpdateDefaultValue": false, + // "gctt": [] + // } + // ] + // }, + // "indexes": [], + // "jobId": 11444, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_drop_columnc84979beb0484120a5057fb2a3eeee6b` DROP COLUMN `value`" + // } + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `value` + """ + sql "sync" + + logger.info("=== Test 2: Check value column ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_column_not_exists, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", value_column_not_exists, 60, "target_sql")) +} + diff --git a/regression-test/suites/table_sync/column/filter_dropped_indexes/test_ts_col_filter_dropped_indexes.groovy b/regression-test/suites/table_sync/column/filter_dropped_indexes/test_ts_col_filter_dropped_indexes.groovy new file mode 100644 index 00000000..32287396 --- /dev/null +++ b/regression-test/suites/table_sync/column/filter_dropped_indexes/test_ts_col_filter_dropped_indexes.groovy @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_filter_dropped_indexes") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + + logger.info("=== pause job, insert data and issue schema change ===") + + helper.ccrJobPause(tableName) + sql "INSERT INTO ${tableName} VALUES (100, 100, 100)" + sql "INSERT INTO ${tableName} VALUES (101, 101, 101)" + sql "INSERT INTO ${tableName} VALUES (102, 102, 102)" + + logger.info("=== add first column ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("resume ccr job and wait sync job") + helper.ccrJobResume(tableName) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 123)" + + // cache must be clear and reload. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + + if (helper.has_feature("feature_schema_change_partial_sync")) { + // no full sync triggered. + def last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) + } +} + + + diff --git a/regression-test/suites/table_sync/column/order_by/test_ts_col_order_by.groovy b/regression-test/suites/table_sync/column/order_by/test_ts_col_order_by.groovy new file mode 100644 index 00000000..4dc99983 --- /dev/null +++ b/regression-test/suites/table_sync/column/order_by/test_ts_col_order_by.groovy @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_order_by") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def key_columns_order = { res -> Boolean + return res[0][0] == 'id' && (res[0][3] == 'YES' || res[0][3] == 'true') && + res[1][0] == 'test' && (res[1][3] == 'YES' || res[1][3] == 'true') && + res[2][0] == 'value1' && (res[2][3] == 'NO' || res[2][3] == 'false') && + res[3][0] == 'value' && (res[3][3] == 'NO' || res[3][3] == 'false') + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT, + `value1` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + logger.info("=== Test 1: add data and sync create ===") + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 2: order by column case ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 11651, + // "tableId": 11688, + // "tableName": "tbl_order_byd6f8a1162e8745039385af479c3df9fe", + // "jobId": 11705, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_schema_change`.`tbl_order_byd6f8a1162e8745039385af479c3df9fe` ORDER BY `id`, `test`, `value1`, `value`" + // } + sql """ + ALTER TABLE ${tableName} + ORDER BY (`id`, `test`, `value1`, `value`) + """ + sql "sync" + + logger.info("=== Test 3: Check ordered column ===") + + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + exist, 30)) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", key_columns_order, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", key_columns_order, 60, "target_sql")) +} + diff --git a/regression-test/suites/table_sync/column/rename/test_ts_col_rename.groovy b/regression-test/suites/table_sync/column/rename/test_ts_col_rename.groovy new file mode 100644 index 00000000..3137668f --- /dev/null +++ b/regression-test/suites/table_sync/column/rename/test_ts_col_rename.groovy @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_col_rename") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def dbNameTarget = "TEST_" + context.dbName + def tableName = "test_ts_col_rename_tbl" + def newColName = 'test_ts_col_rename_new_col' + def oldColName = 'test_ts_col_rename_old_col' + def test_num = 0 + def insert_num = 5 + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + def has_column = { column -> + return { res -> Boolean + res[0][0] == column + } + } + + def not_has_column = { column -> + return { res -> Boolean + res[0][0] != column + } + } + + helper.enableDbBinlog() + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS ${dbNameTarget}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + ${oldColName} INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(${oldColName}, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + + result = sql "select * from ${tableName}" + + assertEquals(result.size(), insert_num) + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: Check old column exist and new column not exist ===") + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(oldColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(newColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(oldColName), 60, "target_sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(newColName), 60, "target_sql")) + + logger.info("=== Test 2: Alter table rename column and insert data ===") + + sql "ALTER TABLE ${dbName}.${tableName} RENAME COLUMN ${oldColName} ${newColName} " + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(newColName), 60, "sql")) + + values = []; + for (int index = insert_num; index < insert_num * 2; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + logger.info("=== Test 3: Check old column not exist and new column exist ===") + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(oldColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(newColName), 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", not_has_column(oldColName), 60, "target_sql")) + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM ${tableName}", has_column(newColName), 60, "target_sql")) + + logger.info("=== Test 4: Check inserted data ===") + + result = sql " select * from ${tableName} " + + result_target = target_sql " select * from ${tableName} " + + assertEquals(result, result_target) + +} + diff --git a/regression-test/suites/table-sync/test_common.groovy b/regression-test/suites/table_sync/common/test_ts_common.groovy similarity index 56% rename from regression-test/suites/table-sync/test_common.groovy rename to regression-test/suites/table_sync/common/test_ts_common.groovy index 401c5423..6ed9a5b4 100644 --- a/regression-test/suites/table-sync/test_common.groovy +++ b/regression-test/suites/table_sync/common/test_ts_common.groovy @@ -14,64 +14,16 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -suite("test_common") { +suite("test_ts_common") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) - def tableName = "tbl_common_" + UUID.randomUUID().toString().replace("-", "") + def tableName = "tbl_" + helper.randomSuffix() def uniqueTable = "${tableName}_unique" def aggregateTable = "${tableName}_aggregate" def duplicateTable = "${tableName}_duplicate" - def syncerAddress = "127.0.0.1:9190" def test_num = 0 def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkSelectTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = row[4] == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkData = { data, beginCol, value -> Boolean - if (data.size() < beginCol + value.size()) { - return false - } - - for (int i = 0; i < value.size(); ++i) { - if ((data[beginCol + i] as int) != value[i]) { - return false - } - } - - return true - } sql """ CREATE TABLE if NOT EXISTS ${uniqueTable} @@ -141,53 +93,29 @@ suite("test_common") { INSERT INTO ${duplicateTable} VALUES (0, 99) """ } - + sql "sync" // test 1: target cluster follow source cluster logger.info("=== Test 1: backup/restore case ===") - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${uniqueTable}" - body "${bodyJson}" - op "post" - result respone - } - assertTrue(checkRestoreFinishTimesOf("${uniqueTable}", 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", + helper.ccrJobCreate(uniqueTable) + assertTrue(helper.checkRestoreFinishTimesOf("${uniqueTable}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", insert_num, 30)) - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${aggregateTable}" - body "${bodyJson}" - op "post" - result respone - } - assertTrue(checkRestoreFinishTimesOf("${aggregateTable}", 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${aggregateTable} WHERE test=${test_num}", + helper.ccrJobCreate(aggregateTable) + assertTrue(helper.checkRestoreFinishTimesOf("${aggregateTable}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${aggregateTable} WHERE test=${test_num}", 1, 30)) def resList = [4, 10, 4, 0] def resData = target_sql "SELECT * FROM ${aggregateTable} WHERE test=${test_num}" - assertTrue(checkData(resData[0], 1, resList)) + assertTrue(helper.checkData(resData[0], 1, resList)) - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${duplicateTable}" - body "${bodyJson}" - op "post" - result respone - } - assertTrue(checkRestoreFinishTimesOf("${duplicateTable}", 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${duplicateTable} WHERE test=${test_num}", + helper.ccrJobCreate(duplicateTable) + assertTrue(helper.checkRestoreFinishTimesOf("${duplicateTable}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${duplicateTable} WHERE test=${test_num}", insert_num, 30)) - - - logger.info("=== Test 2: dest cluster follow source cluster case ===") test_num = 2 for (int index = 0; index < insert_num; index++) { @@ -205,27 +133,21 @@ suite("test_common") { INSERT INTO ${duplicateTable} VALUES (0, 99) """ } - assertTrue(checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${aggregateTable} WHERE test=${test_num}", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${aggregateTable} WHERE test=${test_num}", 1, 30)) resData = target_sql "SELECT * FROM ${aggregateTable} WHERE test=${test_num}" - assertTrue(checkData(resData[0], 1, resList)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${duplicateTable} WHERE test=0", + assertTrue(helper.checkData(resData[0], 1, resList)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${duplicateTable} WHERE test=0", 2 * insert_num, 30)) logger.info("=== Test 3: pause and resume ===") - httpTest { - uri "/pause" - endpoint syncerAddress - def bodyJson = get_ccr_body "${uniqueTable}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobPause(uniqueTable) test_num = 3 for (int index = 0; index < insert_num; index++) { @@ -234,34 +156,21 @@ suite("test_common") { """ } - assertTrue(!checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", + sql "sync" + assertTrue(!helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", insert_num, 3)) - httpTest { - uri "/resume" - endpoint syncerAddress - def bodyJson = get_ccr_body "${uniqueTable}" - body "${bodyJson}" - op "post" - result respone - } - assertTrue(checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", + helper.ccrJobResume(uniqueTable) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", insert_num, 30)) - + logger.info("=== Test 4: desync job ===") test_num = 4 - httpTest { - uri "/desync" - endpoint syncerAddress - def bodyJson = get_ccr_body "${uniqueTable}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobDesync(uniqueTable) + + sleep(helper.sync_gap_time) - sleep(sync_gap_time) - def res = target_sql "SHOW CREATE TABLE TEST_${context.dbName}.${uniqueTable}" def desynced = false for (List row : res) { @@ -274,14 +183,7 @@ suite("test_common") { logger.info("=== Test 5: delete job ===") test_num = 5 - httpTest { - uri "/delete" - endpoint syncerAddress - def bodyJson = get_ccr_body "${uniqueTable}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobDelete(uniqueTable) for (int index = 0; index < insert_num; index++) { sql """ @@ -289,6 +191,7 @@ suite("test_common") { """ } - assertTrue(!checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", + sql "sync" + assertTrue(!helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=${test_num}", insert_num, 5)) -} \ No newline at end of file +} diff --git a/regression-test/suites/table_sync/dml/delete/test_ts_dml_delete.groovy b/regression-test/suites/table_sync/dml/delete/test_ts_dml_delete.groovy new file mode 100644 index 00000000..e26bd0f7 --- /dev/null +++ b/regression-test/suites/table_sync/dml/delete/test_ts_dml_delete.groovy @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_dml_delete") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 29 + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + col_1 tinyint, + col_2 smallint, + col_3 int, + col_4 bigint, + col_5 decimal(10,3), + col_6 char, + col_7 varchar(20), + col_9 date, + col_10 datetime, + col_11 boolean, + col_8 string, + ) ENGINE=OLAP + duplicate KEY(`col_1`, col_2, col_3, col_4, col_5, col_6, col_7, col_9, col_10, col_11) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`col_1`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 0: Common insert case ===") + + sql """ + INSERT INTO ${tableName} VALUES + (1, 2, 3, 4, 11.22, 'a', 'b', '2023-01-01', '2023-01-01 00:01:02', true, 'aaa'), + (2, 3, 4, 5, 22.33, 'b', 'c', '2023-01-02', '2023-01-02 00:01:02', false, 'bbb'), + (3, 4, 5, 6, 33.44, 'c', 'd', '2023-01-03', '2023-01-03 00:01:02', true, 'ccc'), + (4, 5, 6, 7, 44.55, 'd', 'e', '2023-01-04', '2023-01-04 00:01:02', false, 'ddd'), + (5, 6, 7, 8, 55.66, 'e', 'f', '2023-01-05', '2023-01-05 00:01:02', true, 'eee'), + (6, 7, 8, 9, 66.77, 'f', 'g', '2023-01-06', '2023-01-06 00:01:02', false, 'fff'), + (7, 8, 9, 10, 77.88, 'g', 'h', '2023-01-07', '2023-01-07 00:01:02', true, 'ggg'), + (8, 9, 10, 11, 88.99, 'h', 'i', '2023-01-08', '2023-01-08 00:01:02', false, 'hhh'), + (9, 10, 11, 12, 99.1, 'i', 'j', '2023-01-09', '2023-01-09 00:01:02', true, 'iii'), + (10, 11, 12, 13, 101.2, 'j', 'k', '2023-01-10', '2023-01-10 00:01:02', false, 'jjj'), + (11, 12, 13, 14, 102.2, 'l', 'k', '2023-01-11', '2023-01-11 00:01:02', true, 'kkk'), + (12, 13, 14, 15, 103.2, 'm', 'l', '2023-01-12', '2023-01-12 00:01:02', false, 'lll'), + (13, 14, 15, 16, 104.2, 'n', 'm', '2023-01-13', '2023-01-13 00:01:02', true, 'mmm'), + (14, 15, 16, 17, 105.2, 'o', 'n', '2023-01-14', '2023-01-14 00:01:02', false, 'nnn'), + (15, 16, 17, 18, 106.2, 'p', 'o', '2023-01-15', '2023-01-15 00:01:02', true, 'ooo'), + (15, 16, 17, 18, 106.2, 'q', 'p', '2023-01-16', '2023-01-16 00:01:02', false, 'ppp'), + (16, 17, 18, 19, 107.2, 'r', 'q', '2023-01-17', '2023-01-17 00:01:02', true, 'qqq'), + (17, 18, 19, 20, 108.2, 's', 'r', '2023-01-18', '2023-01-18 00:01:02', false, 'rrr'), + (18, 19, 20, 21, 109.2, 't', 's', '2023-01-19', '2023-01-19 00:01:02', true, 'sss'), + (19, 20, 21, 22, 110.2, 'v', 't', '2023-01-20', '2023-01-20 00:01:02', false, 'ttt'), + (20, 21, 22, 23, 111.2, 'u', 'u', '2023-01-21', '2023-01-21 00:01:02', true, 'uuu'), + (21, 22, 23, 24, 112.2, 'w', 'v', '2023-01-22', '2023-01-22 00:01:02', false, 'vvv'), + (22, 23, 24, 25, 113.2, 'x', 'w', '2023-01-23', '2023-01-23 00:01:02', true, 'www'), + (23, 24, 25, 26, 114.2, 'y', 'x', '2023-01-24', '2023-01-24 00:01:02', false, 'xxx'), + (24, 25, 26, 27, 115.2, 'z', 'y', '2023-01-25', '2023-01-25 00:01:02', true, 'yyy'), + (25, 26, 27, 28, 116.2, 'a', 'z', '2023-01-26', '2023-01-26 00:01:02', false, 'zzz'), + (26, 27, 28, 29, 117.2, 'b', 'a', '2023-01-27', '2023-01-27 00:01:02', true, 'aaa'), + (27, 28, 29, 30, 118.2, 'c', 'b', '2023-01-28', '2023-01-28 00:01:02', false, 'bbb'), + (28, 29, 30, 31, 119.2, 'd', 'c', '2023-01-29', '2023-01-29 00:01:02', true, 'ccc') + + """ + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", + insert_num, 30)) + + + + logger.info("=== Test 1: delete row case ===") + sql "DELETE FROM ${tableName} WHERE col_1 = 1" + sql "DELETE FROM ${tableName} WHERE col_2 = 3" + sql "DELETE FROM ${tableName} WHERE col_3 = 5" + sql "DELETE FROM ${tableName} WHERE col_4 = 7" + sql "DELETE FROM ${tableName} WHERE col_5 = 55.66" + sql "DELETE FROM ${tableName} WHERE col_6 = 'f'" + sql "DELETE FROM ${tableName} WHERE col_7 = 'h'" + sql "DELETE FROM ${tableName} WHERE col_8 = 'hhh'" + sql "DELETE FROM ${tableName} WHERE col_9 = '2023-01-09'" + sql "DELETE FROM ${tableName} WHERE col_10 = '2023-01-10 00:01:02'" + sql "DELETE FROM ${tableName} WHERE col_11 = true" + sql "DELETE FROM ${tableName} WHERE col_1 >= 27" + sql "DELETE FROM ${tableName} WHERE col_1 != 26 and col_1 != 25 and col_1 != 24 and col_1 != 23" + + + // 'select test from TEST_${context.dbName}.${tableName}' should return 2 rows + assertTrue(helper.checkSelectTimesOf("SELECT * FROM TEST_${context.dbName}.${tableName}", 2, 30)) + +} diff --git a/regression-test/suites/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.groovy b/regression-test/suites/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.groovy new file mode 100644 index 00000000..522c74fd --- /dev/null +++ b/regression-test/suites/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.groovy @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_dml_insert_overwrite") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.')) { + logger.info("2.0 not support this case, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + // The doris has two kind of insert overwrite handle logic: leagcy and nereids. + // The first will + // 1. create temp table + // 2. insert into temp table + // 3. replace table + // The second will + // 1. create temp partitions + // 2. insert into temp partitions + // 3. replace overlap partitions + def tableName = "tbl_" + helper.randomSuffix() + def uniqueTable = "${tableName}_unique" + def test_num = 0 + def insert_num = 5 + + sql """ + CREATE TABLE if NOT EXISTS ${uniqueTable} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(id) + ( + PARTITION `p1` VALUES LESS THAN ("100"), + PARTITION `p2` VALUES LESS THAN ("200") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "binlog.ttl_seconds" = "180" + ) + """ + + sql """ + INSERT INTO ${uniqueTable} VALUES + (1, 0), + (1, 1), + (1, 2), + (1, 3), + (1, 4) + """ + sql "sync" + + // test 1: target cluster follow source cluster + logger.info("=== Test 1: backup/restore case ===") + helper.ccrJobCreate(uniqueTable) + assertTrue(helper.checkRestoreFinishTimesOf("${uniqueTable}", 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test = 1 ORDER BY id", 5, 60)) + qt_sql "SELECT * FROM ${uniqueTable} WHERE test = 1 ORDER BY id" + qt_target_sql "SELECT * FROM ${uniqueTable} WHERE test = 1 ORDER BY id" + + logger.info("=== Test 2: dest cluster follow source cluster case ===") + + sql """ + INSERT INTO ${uniqueTable} VALUES + (2, 0), + (2, 1), + (2, 2), + (2, 3), + (2, 4) + """ + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=2", 5, 60)) + qt_sql "SELECT * FROM ${uniqueTable} WHERE test=2 ORDER BY id" + qt_target_sql "SELECT * FROM ${uniqueTable} WHERE test=2 ORDER BY id" + + logger.info("=== Test 3: insert overwrite source table ===") + + num_restore = helper.getRestoreRowSize(uniqueTable) + logger.info("current restore row size ${num_restore}") + + sql """ + INSERT OVERWRITE TABLE ${uniqueTable} VALUES + (3, 0), + (3, 1), + (3, 2), + (3, 3), + (3, 4) + """ + sql "sync" + + assertTrue(helper.checkRestoreNumAndFinishedTimesOf("${uniqueTable}", num_restore + 1, 60)) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable} WHERE test=3", 5, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${uniqueTable}", 5, 60)) + + qt_sql "SELECT * FROM ${uniqueTable} ORDER BY test, id" + qt_target_sql "SELECT * FROM ${uniqueTable} ORDER BY test, id" +} diff --git a/regression-test/suites/table_sync/idx_bf/add_drop/test_ts_idx_bf_add_drop.groovy b/regression-test/suites/table_sync/idx_bf/add_drop/test_ts_idx_bf_add_drop.groovy new file mode 100644 index 00000000..c6d1ade8 --- /dev/null +++ b/regression-test/suites/table_sync/idx_bf/add_drop/test_ts_idx_bf_add_drop.groovy @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tbl_idx_bf_add_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `username` varchar(32) NULL DEFAULT "", + `only4test` varchar(32) NULL DEFAULT "" + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "bloom_filter_columns" = "username", + "binlog.enable" = "true" + ) + """ + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}, "test_${index}", "${index}_test") + """ + } + sql "sync" + + logger.info("=== Test 1: full update bloom filter ===") + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + def checkBloomFilter = { inputRes -> Boolean + for (List row : inputRes) { + if ((row[1] as String).contains("\"bloom_filter_columns\" = \"username\"")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE TEST_${context.dbName}.${tableName} + """, + checkBloomFilter, 30, "target")) + + logger.info("=== Test 2: incremental update bloom filter ===") + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 10140, + // "tableId": 10181, + // "tableName": "tbl_752378863", + // "jobId": 10216, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_sync_idx_bf_add_drop`.`tbl_752378863` PROPERTIES (\"bloom_filter_columns\" = \"username,only4test\")", + // "iim": { + // "10217": 10182 + // } + // } + sql """ + ALTER TABLE ${tableName} + SET ("bloom_filter_columns" = "username,only4test") + """ + def checkBloomFilter2 = { inputRes -> Boolean + for (List row : inputRes) { + if ((row[1] as String).contains("\"bloom_filter_columns\"")) { + def columns = row[1] + .split("\"bloom_filter_columns\" = \"")[1] + .split("\"")[0] + .split(",") + .collect { it.trim() } + if (columns.contains("username") && columns.contains("only4test")) { + return true + } + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE ${context.dbName}.${tableName} + """, + checkBloomFilter2, 30, "sql")) + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE TEST_${context.dbName}.${tableName} + """, + checkBloomFilter2, 30, "target")) + + logger.info("=== Test 3: drop bloom filter ===") + sql """ + ALTER TABLE ${tableName} + SET ("bloom_filter_columns" = "only4test") + """ + sql "INSERT INTO ${tableName} VALUES (1, 1, '1', '1')" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 1, 30)) + + def checkBloomFilter3 = { inputRes -> Boolean + for (List row : inputRes) { + if ((row[1] as String).contains("\"bloom_filter_columns\" = \"only4test\"")) { + return true + } + } + return false + } + def show_create_table = target_sql "SHOW CREATE TABLE ${tableName}" + assertTrue(checkBloomFilter3(show_create_table)) +} diff --git a/regression-test/suites/table_sync/idx_bf/fpp/test_ts_idx_bf_fpp.groovy b/regression-test/suites/table_sync/idx_bf/fpp/test_ts_idx_bf_fpp.groovy new file mode 100644 index 00000000..11777bed --- /dev/null +++ b/regression-test/suites/table_sync/idx_bf/fpp/test_ts_idx_bf_fpp.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tbl_idx_bf_fpp") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `username` varchar(32) NULL DEFAULT "", + `only4test` varchar(32) NULL DEFAULT "" + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "bloom_filter_columns" = "username", + "binlog.enable" = "true" + ) + """ + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}, "test_${index}", "${index}_test") + """ + } + sql "sync" + + logger.info("=== Test 1: full update bloom filter ===") + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + def checkBloomFilter = { inputRes -> Boolean + for (List row : inputRes) { + if ((row[1] as String).contains("\"bloom_filter_columns\" = \"username\"")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE TEST_${context.dbName}.${tableName} + """, + checkBloomFilter, 30, "target")) + + logger.info("=== Test 2: update bloom filter fpp property ===") + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 10335, + // "tableId": 10383, + // "tableName": "tbl_557895746", + // "jobId": 10418, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_sync_idx_bf_fpp`.`tbl_557895746` PROPERTIES (\"bloom_filter_fpp\" = \"0.01\")", + // "iim": { + // "10419": 10384 + // } + // } + sql """ + ALTER TABLE ${tableName} + SET ("bloom_filter_fpp" = "0.01") + """ + def checkBloomFilterFPP = { inputRes -> Boolean + for (List row : inputRes) { + if ((row[1] as String).contains("\"bloom_filter_fpp\" = \"0.01\"")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE TABLE TEST_${context.dbName}.${tableName} + """, + checkBloomFilter, 30, "target")) +} + diff --git a/regression-test/suites/table-sync/test_bitmap_index.groovy b/regression-test/suites/table_sync/idx_bitmap/add/test_ts_idx_bitmap_add.groovy similarity index 95% rename from regression-test/suites/table-sync/test_bitmap_index.groovy rename to regression-test/suites/table_sync/idx_bitmap/add/test_ts_idx_bitmap_add.groovy index 3f3bf66b..44da05a7 100644 --- a/regression-test/suites/table-sync/test_bitmap_index.groovy +++ b/regression-test/suites/table_sync/idx_bitmap/add/test_ts_idx_bitmap_add.groovy @@ -15,14 +15,16 @@ // specific language governing permissions and limitations // under the License. -suite("test_bitmap_index") { +suite("test_ts_index_add_bitmap") { + logger.info("test bitmap index will be replaced by inverted index") + return - def tableName = "tbl_bitmap_index_" + UUID.randomUUID().toString().replace("-", "") + def tableName = "tbl_" + helper.randomSuffix() def syncerAddress = "127.0.0.1:9190" def test_num = 0 def insert_num = 5 def sync_gap_time = 5000 - String respone + String response def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean Boolean ret = false @@ -96,6 +98,7 @@ suite("test_bitmap_index") { } return false } + sql "sync" assertTrue(checkShowTimesOf(""" SHOW ALTER TABLE COLUMN WHERE TableName = \"${tableName}\" @@ -113,7 +116,7 @@ suite("test_bitmap_index") { def bodyJson = get_ccr_body "${tableName}" body "${bodyJson}" op "post" - result respone + result response } assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) @@ -146,4 +149,4 @@ suite("test_bitmap_index") { SHOW INDEXES FROM TEST_${context.dbName}.${tableName} """, checkBitmap2, 30, "target")) -} \ No newline at end of file +} diff --git a/regression-test/suites/table_sync/idx_inverted/add_build_drop/test_ts_idx_inverted_add_build_drop.groovy b/regression-test/suites/table_sync/idx_inverted/add_build_drop/test_ts_idx_inverted_add_build_drop.groovy new file mode 100644 index 00000000..52c7cd53 --- /dev/null +++ b/regression-test/suites/table_sync/idx_inverted/add_build_drop/test_ts_idx_inverted_add_build_drop.groovy @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_idx_inverted_add_build_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` String, + `value1` String + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, '${index}', '${index}')") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + def show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add inverted index ===") + sql """ + ALTER TABLE ${tableName} + ADD INDEX idx_inverted(value) USING INVERTED + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (1, 1, "1", "1") """ + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 1, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted' && it['Index_type'] == 'INVERTED' }) + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ + BUILD INDEX idx_inverted ON ${tableName} + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (2, 2, "2", "2") """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW BUILD INDEX FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num+2, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted' && it['Index_type'] == 'INVERTED' }) + + sql """ + ALTER TABLE ${tableName} + DROP INDEX idx_inverted + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ INSERT INTO ${tableName} VALUES (3, 3, "3", "3")""" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 3, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.isEmpty()) + + // FIXME(walter) no such binlogs + + // logger.info("=== Test 2: build bloom filter ===") + // sql """ + // ALTER TABLE ${tableName} + // SET ("bloom_filter_columns" = "value,value1") + // """ + // sql "sync" + + // assertTrue(helper.checkShowTimesOf(""" + // SHOW ALTER TABLE COLUMN + // FROM ${context.dbName} + // WHERE TableName = "${tableName}" AND State = "FINISHED" + // """, + // has_count(3), 30)) + + // // drop bloom filter + // sql """ + // ALTER TABLE ${tableName} + // SET ("bloom_filter_columns" = "") + // """ + // assertTrue(helper.checkShowTimesOf(""" + // SHOW ALTER TABLE COLUMN + // FROM ${context.dbName} + // WHERE TableName = "${tableName}" AND State = "FINISHED" + // """, + // has_count(4), 30)) +} + diff --git a/regression-test/suites/table_sync/idx_inverted/add_drop_multi/test_ts_idx_inverted_add_drop_multi.groovy b/regression-test/suites/table_sync/idx_inverted/add_drop_multi/test_ts_idx_inverted_add_drop_multi.groovy new file mode 100644 index 00000000..2ed6b8e1 --- /dev/null +++ b/regression-test/suites/table_sync/idx_inverted/add_drop_multi/test_ts_idx_inverted_add_drop_multi.groovy @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_idx_inverted_add_drop_multi") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` String, + `value1` String + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, '${index}', '${index}')") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add inverted index ===") + sql """ + ALTER TABLE ${tableName} + ADD INDEX idx_inverted_1 (value) USING INVERTED, + ADD INDEX idx_inverted_2 (value1) USING INVERTED + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (1, 1, "1", "1") """ + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 1, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted_1' && it['Index_type'] == 'INVERTED' }) + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted_2' && it['Index_type'] == 'INVERTED' }) + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ + ALTER TABLE ${tableName} + DROP INDEX idx_inverted_1, + DROP INDEX idx_inverted_2 + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ INSERT INTO ${tableName} VALUES (3, 3, "3", "3")""" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 2, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.isEmpty()) +} + + + diff --git a/regression-test/suites/table_sync/idx_inverted/build_with_part/test_ts_idx_inverted_build_with_part.groovy b/regression-test/suites/table_sync/idx_inverted/build_with_part/test_ts_idx_inverted_build_with_part.groovy new file mode 100644 index 00000000..1ff4f06f --- /dev/null +++ b/regression-test/suites/table_sync/idx_inverted/build_with_part/test_ts_idx_inverted_build_with_part.groovy @@ -0,0 +1,148 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_idx_inverted_build_with_part") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` String, + `value1` String + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`test`) + ( + PARTITION p1 VALUES LESS THAN ("20"), + PARTITION p2 VALUES LESS THAN ("30"), + PARTITION p3 VALUES LESS THAN ("40"), + PARTITION p4 VALUES LESS THAN ("50") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, '${index}', '${index}')") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + def show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add inverted index ===") + sql """ + ALTER TABLE ${tableName} + ADD INDEX idx_inverted(value) USING INVERTED + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (1, 1, "1", "1") """ + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 1, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted' && it['Index_type'] == 'INVERTED' }) + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ + BUILD INDEX idx_inverted ON ${tableName} PARTITIONS (`p1`, `p2`) + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (2, 2, "2", "2") """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW BUILD INDEX FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num+2, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted' && it['Index_type'] == 'INVERTED' }) + + sql """ + ALTER TABLE ${tableName} + DROP INDEX idx_inverted + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ INSERT INTO ${tableName} VALUES (3, 3, "3", "3")""" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 3, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.isEmpty()) + +} + diff --git a/regression-test/suites/table_sync/idx_inverted/create_drop/test_ts_idx_inverted_create_drop.groovy b/regression-test/suites/table_sync/idx_inverted/create_drop/test_ts_idx_inverted_create_drop.groovy new file mode 100644 index 00000000..c48b9baf --- /dev/null +++ b/regression-test/suites/table_sync/idx_inverted/create_drop/test_ts_idx_inverted_create_drop.groovy @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_idx_inverted_create_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` String, + `value1` String + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, '${index}', '${index}')") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + def show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add inverted index ===") + sql """ + CREATE INDEX idx_inverted ON ${tableName} (value) USING INVERTED + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (1, 1, "1", "1") """ + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 1, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted' && it['Index_type'] == 'INVERTED' }) + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ + DROP INDEX idx_inverted ON ${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ INSERT INTO ${tableName} VALUES (3, 3, "3", "3")""" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 2, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${tableName}" + assertTrue(show_indexes_result.isEmpty()) +} + + diff --git a/regression-test/suites/table-sync/test_bloomfilter_index.groovy b/regression-test/suites/table_sync/idx_ngbf/add_drop/test_ts_idx_ngbf_add_drop.groovy similarity index 59% rename from regression-test/suites/table-sync/test_bloomfilter_index.groovy rename to regression-test/suites/table_sync/idx_ngbf/add_drop/test_ts_idx_ngbf_add_drop.groovy index 55986039..f4f7839b 100644 --- a/regression-test/suites/table-sync/test_bloomfilter_index.groovy +++ b/regression-test/suites/table_sync/idx_ngbf/add_drop/test_ts_idx_ngbf_add_drop.groovy @@ -15,59 +15,13 @@ // specific language governing permissions and limitations // under the License. -suite("test_bloomfilter_index") { +suite("test_tbl_idx_ngbf_add_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) - def tableName = "tbl_bloomfilter_index_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" + def tableName = "tbl_" + helper.randomSuffix() def test_num = 0 def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = row[4] == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } sql """ CREATE TABLE if NOT EXISTS ${tableName} @@ -83,7 +37,8 @@ suite("test_bloomfilter_index") { DISTRIBUTED BY HASH(id) BUCKETS 1 PROPERTIES ( "replication_allocation" = "tag.location.default: 1", - "bloom_filter_columns" = "id" + "bloom_filter_columns" = "id", + "binlog.enable" = "true" ) """ for (int index = 0; index < insert_num; index++) { @@ -91,19 +46,12 @@ suite("test_bloomfilter_index") { INSERT INTO ${tableName} VALUES (${test_num}, ${index}, "test_${index}", "${index}_test") """ } - sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + sql "sync" logger.info("=== Test 1: full update bloom filter ===") - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobCreate(tableName) - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) def checkNgramBf = { inputRes -> Boolean for (List row : inputRes) { if (row[2] == "idx_ngrambf" && row[10] == "NGRAM_BF") { @@ -112,9 +60,9 @@ suite("test_bloomfilter_index") { } return false } - assertTrue(checkShowTimesOf(""" + assertTrue(helper.checkShowTimesOf(""" SHOW INDEXES FROM TEST_${context.dbName}.${tableName} - """, + """, checkNgramBf, 30, "target")) def checkBloomFilter = { inputRes -> Boolean for (List row : inputRes) { @@ -124,14 +72,14 @@ suite("test_bloomfilter_index") { } return false } - assertTrue(checkShowTimesOf(""" + assertTrue(helper.checkShowTimesOf(""" SHOW CREATE TABLE TEST_${context.dbName}.${tableName} - """, + """, checkBloomFilter, 30, "target")) - + logger.info("=== Test 2: incremental update Ngram bloom filter ===") sql """ - ALTER TABLE ${tableName} + ALTER TABLE ${tableName} ADD INDEX idx_only4test(`only4test`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256") """ def checkNgramBf1 = { inputRes -> Boolean @@ -142,8 +90,24 @@ suite("test_bloomfilter_index") { } return false } - assertTrue(checkShowTimesOf(""" + assertTrue(helper.checkShowTimesOf(""" + SHOW INDEXES FROM ${context.dbName}.${tableName} + """, + checkNgramBf1, 30, "sql")) + assertTrue(helper.checkShowTimesOf(""" SHOW INDEXES FROM TEST_${context.dbName}.${tableName} - """, + """, checkNgramBf1, 30, "target")) -} \ No newline at end of file + + logger.info("=== Test 3: drop bloom filter ===") + sql """ + ALTER TABLE ${tableName} + DROP INDEX idx_ngrambf + """ + sql "INSERT INTO ${tableName} VALUES (1, 1, '1', '1')" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${tableName} """, insert_num + 1, 30)) + def show_indexes_result = target_sql "show indexes from ${tableName}" + assertFalse(checkNgramBf(show_indexes_result)) +} diff --git a/regression-test/suites/table_sync/mv/create_drop/test_ts_mv_create_drop.groovy b/regression-test/suites/table_sync/mv/create_drop/test_ts_mv_create_drop.groovy new file mode 100644 index 00000000..19515e29 --- /dev/null +++ b/regression-test/suites/table_sync/mv/create_drop/test_ts_mv_create_drop.groovy @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_mv_create_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `id` INT, + `col1` INT, + `col2` INT, + `col3` INT, + `col4` INT, + ) + ENGINE=OLAP + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE MATERIALIZED VIEW mtr_${tableName}_full AS + SELECT id, col1, col3 FROM ${tableName} + """ + + def materializedFinished = { res -> Boolean + for (List row : res) { + if ((row[5] as String).contains("mtr_${tableName}_full")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + materializedFinished, 30)) + sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + + + logger.info("=== Test 1: full update rollup ===") + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def checkViewExists = { res -> Boolean + for (List row : res) { + if ((row[1] as String).contains("mtr_${tableName}_full")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE MATERIALIZED VIEW mtr_${tableName}_full + ON ${tableName} + """, + checkViewExists, 30, "target")) + + + logger.info("=== Test 2: incremental update rollup ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type": "ROLLUP", + // "dbId": 10099, + // "tableId": 12828, + // "tableName": "tbl_materialized_sync_f8096d00b4634a078f9a3df6311b68db", + // "jobId": 12853, + // "jobState": "FINISHED" + // } + sql """ + CREATE MATERIALIZED VIEW ${tableName}_incr AS + SELECT id, col2, col4 FROM ${tableName} + """ + + def materializedFinished1 = { res -> Boolean + for (List row : res) { + if ((row[5] as String).contains("${tableName}_incr")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + materializedFinished1, 30, "sql")) + + def checkViewExists1 = { res -> Boolean + for (List row : res) { + if ((row[1] as String).contains("${tableName}_incr")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE MATERIALIZED VIEW ${tableName}_incr + ON ${tableName} + """, + checkViewExists1, 30, "target")) + + logger.info("=== Test 3: drop materialized view ===") + + sql """ + DROP MATERIALIZED VIEW ${tableName}_incr ON ${tableName} + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW CREATE MATERIALIZED VIEW ${tableName}_incr + ON ${tableName} + """, + { res -> res.size() == 0 }, 30, "target")) +} diff --git a/regression-test/suites/table_sync/partition/add/test_ts_part_add.groovy b/regression-test/suites/table_sync/partition/add/test_ts_part_add.groovy new file mode 100644 index 00000000..eebb3308 --- /dev/null +++ b/regression-test/suites/table_sync/partition/add/test_ts_part_add.groovy @@ -0,0 +1,203 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_part_add") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "test_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Test 1: Add range partition ===") + def tableName = "${baseTableName}_range" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT NOT NULL + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + sql """ + ALTER TABLE ${tableName} ADD PARTITION p3 VALUES LESS THAN ("200") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p3" + """, + exist, 60, "target")) + + def show_result = target_sql """SHOW PARTITIONS FROM ${tableName} WHERE PartitionName = "p3" """ + logger.info("show partition: ${show_result}") + // columns Range + assertTrue(show_result[0][6].contains("100")) + assertTrue(show_result[0][6].contains("200")) + + logger.info("=== Test 2: Add list partition ===") + tableName = "${baseTableName}_list" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT NOT NULL + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY LIST(`id`) + ( + PARTITION `p1` VALUES IN ("0", "1", "2"), + PARTITION `p2` VALUES IN ("100", "200", "300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + sql """ + ALTER TABLE ${tableName} ADD PARTITION p3 VALUES IN ("500", "600", "700") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p3" + """, + exist, 60, "target")) + show_result = target_sql """SHOW PARTITIONS FROM ${tableName} WHERE PartitionName = "p3" """ + logger.info("show partition: ${show_result}") + // columns Range + assertTrue(show_result[0][6].contains("500")) + assertTrue(show_result[0][6].contains("600")) + assertTrue(show_result[0][6].contains("700")) + + // NOTE: ccr synder does not support syncing temp partition now. + // logger.info("=== Test 3: Add temp partition ===") + // tableName = "${baseTableName}_temp_range" + // sql """ + // CREATE TABLE if NOT EXISTS ${tableName} + // ( + // `test` INT, + // `id` INT + // ) + // ENGINE=OLAP + // UNIQUE KEY(`test`, `id`) + // PARTITION BY RANGE(`id`) + // ( + // PARTITION `p1` VALUES LESS THAN ("0"), + // PARTITION `p2` VALUES LESS THAN ("100") + // ) + // DISTRIBUTED BY HASH(id) BUCKETS AUTO + // PROPERTIES ( + // "replication_allocation" = "tag.location.default: 1", + // "binlog.enable" = "true" + // ) + // """ + + // helper.ccrJobCreate(tableName) + + // assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + // sql """ + // ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p3 VALUES LESS THAN ("200") + // """ + + // assertTrue(helper.checkShowTimesOf(""" + // SHOW TEMPORARY PARTITIONS + // FROM ${tableName} + // WHERE PartitionName = "p3" + // """, + // exist, 60, "target")) + + // sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p3) VALUES (1, 150)" + + // assertTrue(helper.checkShowTimesOf(""" + // SELECT * + // FROM ${tableName} + // TEMPORARY PARTITION (p3) + // WHERE id = 150 + // """, + // exist, 60, "target")) + + logger.info("=== Test 4: Add unpartitioned partition ===") + tableName = "${baseTableName}_unpart" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT NOT NULL + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.')) { + logger.info("2.0 not support INSERT OVERWRITE yet, current version is: ${versions[0].Value}") + return + } + + sql """ + INSERT OVERWRITE TABLE ${tableName} VALUES (1, 100); + """ + + assertTrue(helper.checkShowTimesOf(""" + SELECT * FROM ${tableName} + WHERE id = 100 + """, + exist, 60, "target")) +} diff --git a/regression-test/suites/table-sync/test_partition_ops.groovy b/regression-test/suites/table_sync/partition/add_drop/test_tbl_part_add_drop.groovy similarity index 57% rename from regression-test/suites/table-sync/test_partition_ops.groovy rename to regression-test/suites/table_sync/partition/add_drop/test_tbl_part_add_drop.groovy index 69cf3ee0..3d9936f8 100644 --- a/regression-test/suites/table-sync/test_partition_ops.groovy +++ b/regression-test/suites/table_sync/partition/add_drop/test_tbl_part_add_drop.groovy @@ -15,73 +15,14 @@ // specific language governing permissions and limitations // under the License. -suite("test_partition_ops") { +suite("test_tbl_part_add_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) - def tableName = "tbl_partition_ops_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" + def tableName = "tbl_" + helper.randomSuffix() def test_num = 0 def insert_num = 5 - def sync_gap_time = 5000 def opPartitonName = "less0" - String respone - - def checkSelectTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } def exist = { res -> Boolean return res.size() != 0 @@ -108,31 +49,20 @@ suite("test_partition_ops") { "binlog.enable" = "true" ) """ - // sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" - - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobCreate(tableName) - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) logger.info("=== Test 1: Check partitions in src before sync case ===") - assertTrue(checkShowTimesOf(""" + assertTrue(helper.checkShowTimesOf(""" SHOW PARTITIONS FROM TEST_${context.dbName}.${tableName} WHERE PartitionName = \"${opPartitonName}\" """, exist, 30, "target")) - - logger.info("=== Test 2: Add partitions case ===") opPartitonName = "one_to_five" sql """ @@ -141,12 +71,39 @@ suite("test_partition_ops") { VALUES [('0'), ('5')) """ - assertTrue(checkShowTimesOf(""" + // add partition use bucket number + opBucketNumberPartitonName = "bucket_number_partition" + sql """ + ALTER TABLE ${tableName} + ADD PARTITION ${opBucketNumberPartitonName} + VALUES [(5), (6)) DISTRIBUTED BY HASH(id) BUCKETS 2; + """ + opDifferentBucketNumberPartitonName = "different_bucket_number_partition" + sql """ + ALTER TABLE ${tableName} + ADD PARTITION ${opDifferentBucketNumberPartitonName} + VALUES [(6), (7)) DISTRIBUTED BY HASH(id) BUCKETS 3; + """ + + + assertTrue(helper.checkShowTimesOf(""" SHOW PARTITIONS FROM TEST_${context.dbName}.${tableName} WHERE PartitionName = \"${opPartitonName}\" """, exist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opBucketNumberPartitonName}\" + """, + exist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opDifferentBucketNumberPartitonName}\" + """, + exist, 30, "target")) logger.info("=== Test 3: Insert data in valid partitions case ===") @@ -156,7 +113,8 @@ suite("test_partition_ops") { INSERT INTO ${tableName} VALUES (${test_num}, ${index}) """ } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", insert_num, 30)) @@ -167,7 +125,7 @@ suite("test_partition_ops") { DROP PARTITION IF EXISTS ${opPartitonName} """ - assertTrue(checkShowTimesOf(""" + assertTrue(helper.checkShowTimesOf(""" SHOW PARTITIONS FROM TEST_${context.dbName}.${tableName} WHERE PartitionName = \"${opPartitonName}\" @@ -175,4 +133,4 @@ suite("test_partition_ops") { notExist, 30, "target")) def resSql = target_sql "SELECT * FROM ${tableName} WHERE test=3" assertTrue(resSql.size() == 0) -} \ No newline at end of file +} diff --git a/regression-test/suites/table_sync/partition/clean_restore/test_ts_part_clean_restore.groovy b/regression-test/suites/table_sync/partition/clean_restore/test_ts_part_clean_restore.groovy new file mode 100644 index 00000000..19bb1355 --- /dev/null +++ b/regression-test/suites/table_sync/partition/clean_restore/test_ts_part_clean_restore.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_part_clean_restore") { + // FIXME(walter) fix clean partitions. + return + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 20 + def sync_gap_time = 5000 + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql "CREATE DATABASE IF NOT EXISTS TEST_${context.dbName}" + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_2 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_2 VALUES ${values.join(",")} """ + + def v = target_sql "SELECT * FROM ${tableName}_1" + assertEquals(v.size(), insert_num); + v = target_sql "SELECT * FROM ${tableName}_2" + assertEquals(v.size(), insert_num); + + sql "ALTER TABLE ${tableName}_2 DROP PARTITION ${opPartitonName}_0 FORCE" + sql "ALTER TABLE ${tableName}_2 DROP PARTITION ${opPartitonName}_1 FORCE" + sql "sync" + + helper.ccrJobCreateAllowTableExists("${tableName}_2") + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_2", 60)) + + // table sync should NOT clean the exists tables in the same db!!! + v = target_sql "SELECT * FROM ${tableName}_2" + assertTrue(v.size() == (insert_num-10)); + v = target_sql """ SHOW TABLES LIKE "${tableName}_1" """ + assertTrue(v.size() == 1); +} + + diff --git a/regression-test/suites/table_sync/partition/recover/test_tbl_part_recover.groovy b/regression-test/suites/table_sync/partition/recover/test_tbl_part_recover.groovy new file mode 100644 index 00000000..dda58aaa --- /dev/null +++ b/regression-test/suites/table_sync/partition/recover/test_tbl_part_recover.groovy @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tbl_part_recover") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "part" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("100") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: Check partitions in src before sync case ===") + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + exist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + exist, 30, "target")) + + + + logger.info("=== Test 3: Insert data in valid partitions case ===") + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + + + logger.info("=== Test 4: Drop partitions case ===") + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_1 + """ + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_2 + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + notExist, 30, "target")) + + logger.info("=== Test 4: recover partitions case ===") + sql """ + RECOVER PARTITION ${opPartitonName}_1 from ${tableName} + """ + sql """ + RECOVER PARTITION ${opPartitonName}_2 from ${tableName} + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + exist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + exist, 30, "target")) + + test_num = 5 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + order_qt_target_sql_content("SELECT * FROM ${tableName}") + order_qt_sql_source_content("SELECT * FROM ${tableName}") +} diff --git a/regression-test/suites/table_sync/partition/recover1/test_tbl_part_recover_new.groovy b/regression-test/suites/table_sync/partition/recover1/test_tbl_part_recover_new.groovy new file mode 100644 index 00000000..1cf9a5ab --- /dev/null +++ b/regression-test/suites/table_sync/partition/recover1/test_tbl_part_recover_new.groovy @@ -0,0 +1,144 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tbl_part_recover_new") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 3 + def opPartitonName = "part" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("100") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + + logger.info("=== Test 1: Check partitions in src before sync case ===") + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + exist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + exist, 30, "target")) + + + + logger.info("=== Test 3: Insert data in valid partitions case ===") + test_num = 3 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + + + logger.info("=== Test 4: Drop partitions case ===") + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_1 + """ + sql """ + ALTER TABLE ${tableName} + DROP PARTITION IF EXISTS ${opPartitonName}_2 + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_1\" + """, + notExist, 30, "target")) + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_2\" + """, + notExist, 30, "target")) + + logger.info("=== Test 4: recover partitions case ===") + sql """ + RECOVER PARTITION ${opPartitonName}_1 as ${opPartitonName}_11 from ${tableName} + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_11\" + """, + exist, 30, "target")) + sql """ + RECOVER PARTITION ${opPartitonName}_2 as ${opPartitonName}_22 from ${tableName} + """ + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM TEST_${context.dbName}.${tableName} + WHERE PartitionName = \"${opPartitonName}_22\" + """, + exist, 30, "target")) + + test_num = 5 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + + order_qt_target_sql_content("SELECT * FROM ${tableName}") + order_qt_sql_source_content("SELECT * FROM ${tableName}") +} diff --git a/regression-test/suites/table_sync/partition/rename/test_ts_part_rename.groovy b/regression-test/suites/table_sync/partition/rename/test_ts_part_rename.groovy new file mode 100644 index 00000000..0880fb52 --- /dev/null +++ b/regression-test/suites/table_sync/partition/rename/test_ts_part_rename.groovy @@ -0,0 +1,176 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_part_rename") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + // only works on 3.0.4/2.1.8/2.0.16 + if (!helper.is_version_supported([30004, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def tableName = "test_ts_rename_partition_tbl" + def test_num = 0 + def insert_num = 5 + def opPartitonNameOrigin = "partitionName_1" + def opPartitonNameNew = "partitionName_2" + + + def exist = { res -> Boolean + return res.size() != 0 + } + + def notExist = { res -> Boolean + return res.size() == 0 + } + + helper.enableDbBinlog() + + sql "DROP TABLE IF EXISTS ${context.dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${context.dbName}.${tableName}" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: Add partitions case ===") + + sql """ + ALTER TABLE ${tableName} + ADD PARTITION ${opPartitonNameOrigin} + VALUES [('0'), ('5')) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameOrigin}\" + """, + exist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + notExist, 30, "target")) + + logger.info("=== Test 2: Check new partitions not exist ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + notExist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + notExist, 30, "target")) + + logger.info("=== Test 3: Rename partitions name ===") + + sql """ + ALTER TABLE ${tableName} RENAME PARTITION ${opPartitonNameOrigin} ${opPartitonNameNew} + """ + + logger.info("=== Test 4: Check new partitions exist and origin partition not exist ===") + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameOrigin}\" + """, + notExist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + exist, 30, "sql")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameOrigin}\" + """, + notExist, 30, "target")) + + assertTrue(helper.checkShowTimesOf(""" + SHOW PARTITIONS + FROM ${tableName} + WHERE PartitionName = \"${opPartitonNameNew}\" + """, + exist, 30, "target")) + + logger.info("=== Test 5: Check new partitions key and range ===") + + show_result = target_sql_return_maparray """SHOW PARTITIONS FROM TEST_${context.dbName}.${tableName} WHERE PartitionName = \"${opPartitonNameNew}\" """ + /* + *************************** 1. row *************************** + PartitionId: 13055 + PartitionName: partitionName_2 + VisibleVersion: 1 + VisibleVersionTime: 2024-11-11 11:48:33 + State: NORMAL + PartitionKey: id + Range: [types: [INT]; keys: [0]; ..types: [INT]; keys: [5]; ) + DistributionKey: id + Buckets: 1 + ReplicationNum: 1 + StorageMedium: HDD + CooldownTime: 9999-12-31 23:59:59 + RemoteStoragePolicy: + LastConsistencyCheckTime: NULL + DataSize: 0.000 + IsInMemory: false + ReplicaAllocation: tag.location.default: 1 + IsMutable: true + SyncWithBaseTables: true + UnsyncTables: NULL + CommittedVersion: 1 + RowCount: 0 + */ + assertEquals(show_result[0].Range, "[types: [INT]; keys: [0]; ..types: [INT]; keys: [5]; )") +} diff --git a/regression-test/suites/table_sync/partition/replace/test_ts_part_replace.groovy b/regression-test/suites/table_sync/partition/replace/test_ts_part_replace.groovy new file mode 100644 index 00000000..92642c92 --- /dev/null +++ b/regression-test/suites/table_sync/partition/replace/test_ts_part_replace.groovy @@ -0,0 +1,197 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_part_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create table ===") + def tableName = "${baseTableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + + logger.info("=== Add temp partition p5 ===") + + sql """ + ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p5 VALUES [("0"), ("100")) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TEMPORARY PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p5" + """, + exist, 60, "sql")) + + sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p5) VALUES (1, 50)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + TEMPORARY PARTITION (p5) + WHERE id = 50 + """, + exist, 60, "sql")) + + logger.info("=== Replace partition p2 by p5 ===") + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + notExist, 60, "target")) + + sql "ALTER TABLE ${tableName} REPLACE PARTITION (p2) WITH TEMPORARY PARTITION (p5)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + exist, 60, "target")) + + // We don't support replace partition with non-strict range and use temp name. + + // logger.info("=== Add temp partition p6 ===") + + // sql """ + // ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p6 VALUES [("100"), ("200")) + // """ + + // assertTrue(checkShowTimesOf(""" + // SHOW TEMPORARY PARTITIONS + // FROM ${tableName} + // WHERE PartitionName = "p6" + // """, + // exist, 60, "sql")) + + // sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p6) VALUES (2, 150)" + + // assertTrue(checkShowTimesOf(""" + // SELECT * + // FROM ${tableName} + // TEMPORARY PARTITION (p6) + // WHERE id = 150 + // """, + // exist, 60, "sql")) + + // logger.info("=== Replace partition p3 by p6, with tmp partition name ===") + + // assertTrue(checkShowTimesOf(""" + // SELECT * + // FROM ${tableName} + // WHERE id = 150 + // """, + // notExist, 60, "target")) + + // sql """ALTER TABLE ${tableName} REPLACE PARTITION (p3) WITH TEMPORARY PARTITION (p6) + // PROPERTIES ( + // "use_temp_partition_name" = "true" + // ) + // """ + + // assertTrue(checkShowTimesOf(""" + // SELECT * + // FROM ${tableName} + // WHERE id = 150 + // """, + // exist, 60, "target")) + +// // for non strict range +// logger.info("=== Add temp partition p7 ===") + +// sql """ +// ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p7 VALUES [("0"), ("200")) +// """ + +// assertTrue(checkShowTimesOf(""" +// SHOW TEMPORARY PARTITIONS +// FROM ${tableName} +// WHERE PartitionName = "p7" +// """, +// exist, 60, "sql")) + +// sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p7) VALUES (1, 60), (2, 160)" + +// assertTrue(checkShowTimesOf(""" +// SELECT * +// FROM ${tableName} +// TEMPORARY PARTITION (p7) +// WHERE id = 60 +// """, +// exist, 60, "sql")) + +// logger.info("=== Replace partition p2,p6 by p7 ===") + +// assertTrue(checkShowTimesOf(""" +// SELECT * +// FROM ${tableName} +// WHERE id = 60 +// """, +// notExist, 60, "target")) + +// sql """ALTER TABLE ${tableName} REPLACE PARTITION (p2,p6) WITH TEMPORARY PARTITION (p7) +// PROPERTIES( +// "strict_range" = "false" +// ) +// """ + +// assertTrue(checkShowTimesOf(""" +// SELECT * +// FROM ${tableName} +// WHERE id = 60 +// """, +// exist, 60, "target")) + +} + diff --git a/regression-test/suites/table_sync/partition/replace_partial/test_ts_part_replace_partial.groovy b/regression-test/suites/table_sync/partition/replace_partial/test_ts_part_replace_partial.groovy new file mode 100644 index 00000000..e3b14f32 --- /dev/null +++ b/regression-test/suites/table_sync/partition/replace_partial/test_ts_part_replace_partial.groovy @@ -0,0 +1,164 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_part_replace_partial") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def baseTableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + def opPartitonName = "less0" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create table ===") + tableName = "${baseTableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + // insert into p2,p3,p4 + sql """ + INSERT INTO ${tableName} VALUES + (1, 10), + (1, 11), + (1, 12), + (1, 13), + (1, 14), + (2, 100), + (2, 110), + (2, 120), + (2, 130), + (2, 140), + (3, 200), + (3, 210), + (3, 220), + (3, 230), + (3, 240) + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + // p2,p3,p4 all has 5 rows + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=1", 5, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=2", 5, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=3", 5, 60)) + + logger.info("=== Add temp partition p5 ===") + + sql """ + ALTER TABLE ${tableName} ADD TEMPORARY PARTITION p5 VALUES [("0"), ("100")) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW TEMPORARY PARTITIONS + FROM ${tableName} + WHERE PartitionName = "p5" + """, + exist, 60, "sql")) + + sql "INSERT INTO ${tableName} TEMPORARY PARTITION (p5) VALUES (1, 50)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + TEMPORARY PARTITION (p5) + WHERE id = 50 + """, + exist, 60, "sql")) + + logger.info("=== Replace partition p2 by p5 ===") + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + notExist, 60, "target")) + + sql "ALTER TABLE ${tableName} REPLACE PARTITION (p2) WITH TEMPORARY PARTITION (p5)" + + assertTrue(helper.checkShowTimesOf(""" + SELECT * + FROM ${tableName} + WHERE id = 50 + """, + exist, 60, "target")) + + // p3,p4 all has 5 rows, p2 has 1 row + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=1", 1, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=2", 5, 60)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=3", 5, 60)) + + // The last restore should contains only partition p2 + def show_restore_result = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + def restore_num = show_restore_result.size() + def last_restore_result = show_restore_result[restore_num-1] + def restore_objects = last_restore_result[10] // RestoreObjs + logger.info("The restore result: ${last_restore_result}") + logger.info("The restore objects: ${restore_objects}") + + // { + // "name": "ccrp_regression_test_table_sync_test_replace_partial_p_02f747eda70e4f768afd613e074e790d_1722983645", + // "database": "regression_test_table_sync", + // "backup_time": 1722983645667, + // "content": "ALL", + // "olap_table_list": [ + // { + // "name": "test_replace_partial_p_02f747eda70e4f768afd613e074e790d", + // "partition_names": [ + // "p2" + // ] + // } + // ], + // "view_list": [], + // "odbc_table_list": [], + // "odbc_resource_list": [] + // } + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${restore_objects}" + assertTrue(object.olap_table_list[0].partition_names.size() == 1) + assertTrue(object.olap_table_list[0].partition_names[0] == "p2") +} + + diff --git a/regression-test/suites/table_sync/prop/auto_bucket/test_ts_prop_auto_bucket.groovy b/regression-test/suites/table_sync/prop/auto_bucket/test_ts_prop_auto_bucket.groovy new file mode 100644 index 00000000..2043a7fb --- /dev/null +++ b/regression-test/suites/table_sync/prop/auto_bucket/test_ts_prop_auto_bucket.groovy @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_res_auto_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS AUTO")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/auto_compaction/test_ts_prop_auto_compaction.groovy b/regression-test/suites/table_sync/prop/auto_compaction/test_ts_prop_auto_compaction.groovy new file mode 100644 index 00000000..7c58a985 --- /dev/null +++ b/regression-test/suites/table_sync/prop/auto_compaction/test_ts_prop_auto_compaction.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_auto_compaction") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "disable_auto_compaction" = "false" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"false\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/auto_increment/test_ts_prop_auto_increment.groovy b/regression-test/suites/table_sync/prop/auto_increment/test_ts_prop_auto_increment.groovy new file mode 100644 index 00000000..6205d721 --- /dev/null +++ b/regression-test/suites/table_sync/prop/auto_increment/test_ts_prop_auto_increment.groovy @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_auto_increment") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE ${tableName} ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `value` int(11) NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql "INSERT INTO ${tableName} (value) VALUES (${insert_num})" + } + sql "sync" + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("`id` bigint NOT NULL AUTO_INCREMENT(1)")) + + res = sql "select * from ${tableName} order by id" + + target_res = target_sql "select * from ${tableName} order by id" + + assertEquals(target_res, res) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/binlog/test_ts_prop_binlog.groovy b/regression-test/suites/table_sync/prop/binlog/test_ts_prop_binlog.groovy new file mode 100644 index 00000000..78f9bb9c --- /dev/null +++ b/regression-test/suites/table_sync/prop/binlog/test_ts_prop_binlog.groovy @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_binlog") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "binlog.ttl_seconds" = "86401", + "binlog.max_bytes" = "9223372036854775806", + "binlog.max_history_nums" = "9223372036854775806" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"binlog.enable\" = \"true\"")) + assertTrue(target_res[0][1].contains("\"binlog.ttl_seconds\" = \"86401\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_bytes\" = \"9223372036854775806\"")) + assertTrue(target_res[0][1].contains("\"binlog.max_history_nums\" = \"9223372036854775806\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/bloom_filter/test_ts_prop_bloom_filter.groovy b/regression-test/suites/table_sync/prop/bloom_filter/test_ts_prop_bloom_filter.groovy new file mode 100644 index 00000000..e6e7688a --- /dev/null +++ b/regression-test/suites/table_sync/prop/bloom_filter/test_ts_prop_bloom_filter.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_bloom_filter") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index' + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "bloom_filter_columns" = "test" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"bloom_filter_columns\" = \"test\"")) + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_index'")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/colocate_with/test_ts_prop_colocate_with.groovy b/regression-test/suites/table_sync/prop/colocate_with/test_ts_prop_colocate_with.groovy new file mode 100644 index 00000000..9434c3ed --- /dev/null +++ b/regression-test/suites/table_sync/prop/colocate_with/test_ts_prop_colocate_with.groovy @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_colocate_with") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "colocate_with" = "group1" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(res[0][1].contains("\"colocate_with\" = \"group1\"")) + + assertTrue(!target_res[0][1].contains("\"colocate_with\" = \"group1\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/compaction_policy/test_ts_prop_compaction_policy.groovy b/regression-test/suites/table_sync/prop/compaction_policy/test_ts_prop_compaction_policy.groovy new file mode 100644 index 00000000..a11842ad --- /dev/null +++ b/regression-test/suites/table_sync/prop/compaction_policy/test_ts_prop_compaction_policy.groovy @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_compaction_policy") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { res, property -> Boolean + if(!res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + def existCompaction = { res -> Boolean + assertTrue(checkShowResult(res, "\"compaction_policy\" = \"time_series\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_goal_size_mbytes\" = \"2048\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_file_count_threshold\" = \"3000\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_time_threshold_seconds\" = \"4000\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_empty_rowsets_threshold\" = \"6\"")) + assertTrue(checkShowResult(res, "\"time_series_compaction_level_threshold\" = \"2\"")) + return true + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "2048", + "time_series_compaction_file_count_threshold" = "3000", + "time_series_compaction_time_threshold_seconds" = "4000", + "time_series_compaction_empty_rowsets_threshold" = "6", + "time_series_compaction_level_threshold" = "2" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${tableName}", existCompaction, 60, "sql")) + +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/compression/test_ts_prop_compression.groovy b/regression-test/suites/table_sync/prop/compression/test_ts_prop_compression.groovy new file mode 100644 index 00000000..6af678b2 --- /dev/null +++ b/regression-test/suites/table_sync/prop/compression/test_ts_prop_compression.groovy @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_compression") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compression"="zstd" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"compression\" = \"ZSTD\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/dynamic_partition/test_ts_prop_dynamic_partition.groovy b/regression-test/suites/table_sync/prop/dynamic_partition/test_ts_prop_dynamic_partition.groovy new file mode 100644 index 00000000..54d58d3c --- /dev/null +++ b/regression-test/suites/table_sync/prop/dynamic_partition/test_ts_prop_dynamic_partition.groovy @@ -0,0 +1,200 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_dynamic_partition") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def checkShowResult = { target_res, property -> Boolean + if(!target_res[0][1].contains(property)){ + logger.info("don't contains {}", property) + return false + } + return true + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}_range_by_day" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}_range_by_week" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}_range_by_month" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}_range_by_day" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}_range_by_week" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}_range_by_month" + + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_range_by_day + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_range_by_week + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "WEEK", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.start_day_of_week" = "2", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ); + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_range_by_month + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "MONTH", + "dynamic_partition.time_zone" = "Asia/Shanghai", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "dynamic_partition.create_history_partition" = "true", + "dynamic_partition.history_partition_num" = "2", + "dynamic_partition.start_day_of_month" = "1", + "dynamic_partition.reserved_history_periods" = "[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]", + "dynamic_partition.replication_allocation" = "tag.location.default: 1" + ) + """ + + helper.ccrJobDelete(tableName + "_range_by_day") + helper.ccrJobDelete(tableName + "_range_by_week") + helper.ccrJobDelete(tableName + "_range_by_month") + helper.ccrJobCreate(tableName + "_range_by_day") + helper.ccrJobCreate(tableName + "_range_by_week") + helper.ccrJobCreate(tableName + "_range_by_month") + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_range_by_day", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_range_by_week", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}_range_by_month", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_day\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_week\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_month\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_day\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_week\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}_range_by_month\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}_range_by_day" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"DAY\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableName}_range_by_week" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"WEEK\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_week\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) + + target_res = target_sql "SHOW CREATE TABLE ${tableName}_range_by_month" + + assertTrue(checkShowResult(target_res, "\"dynamic_partition.enable\" = \"false\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_unit\" = \"MONTH\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.time_zone\" = \"Asia/Shanghai\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start\" = \"-2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.end\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.prefix\" = \"p\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.buckets\" = \"32\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.history_partition_num\" = \"2\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.start_day_of_month\" = \"1\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.create_history_partition\" = \"true\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.reserved_history_periods\" = \"[2024-01-01,2024-12-31],[2025-01-01,2025-12-31]\"")) + assertTrue(checkShowResult(target_res, "\"dynamic_partition.replication_allocation\" = \"tag.location.default: 1\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/generated_column/test_ts_prop_generated_column.groovy b/regression-test/suites/table_sync/prop/generated_column/test_ts_prop_generated_column.groovy new file mode 100644 index 00000000..d16c200a --- /dev/null +++ b/regression-test/suites/table_sync/prop/generated_column/test_ts_prop_generated_column.groovy @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_generated_column") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE ${tableName} ( + product_id INT, + price DECIMAL(10,2), + quantity INT, + total_value DECIMAL(10,2) GENERATED ALWAYS AS (price * quantity) + ) DUPLICATE KEY(product_id) + DISTRIBUTED BY HASH(product_id) PROPERTIES ("replication_num" = "1") + """ + + sql """ + INSERT INTO ${tableName} VALUES(1, 10.00, 10, default); + """ + + sql """ + INSERT INTO ${tableName} (product_id, price, quantity) VALUES(1, 20.00, 10); + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName}", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("`total_value` decimal(10,2) AS ((`price` * CAST(`quantity` AS decimalv3(10,0)))) NULL")) + + target_res = target_sql_return_maparray "select * from ${tableName} order by total_value" + + assertEquals(target_res[0].total_value,100.00) + assertEquals(target_res[1].total_value,200.00) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/group_commit/test_ts_prop_group_commit.groovy b/regression-test/suites/table_sync/prop/group_commit/test_ts_prop_group_commit.groovy new file mode 100644 index 00000000..72311fd8 --- /dev/null +++ b/regression-test/suites/table_sync/prop/group_commit/test_ts_prop_group_commit.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_group_commit") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "group_commit_interval_ms" = "10000", + "group_commit_data_bytes" = "134217728" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"group_commit_interval_ms\" = \"10000\"")) + assertTrue(target_res[0][1].contains("\"group_commit_data_bytes\" = \"134217728\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/index/test_ts_prop_index.groovy b/regression-test/suites/table_sync/prop/index/test_ts_prop_index.groovy new file mode 100644 index 00000000..78cc8f0e --- /dev/null +++ b/regression-test/suites/table_sync/prop/index/test_ts_prop_index.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_index") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + INDEX id_idx (id) USING INVERTED COMMENT 'test_id_idx' + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_id_idx'")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/light_schema_change/test_ts_prop_light_schema_change.groovy b/regression-test/suites/table_sync/prop/light_schema_change/test_ts_prop_light_schema_change.groovy new file mode 100644 index 00000000..1965228f --- /dev/null +++ b/regression-test/suites/table_sync/prop/light_schema_change/test_ts_prop_light_schema_change.groovy @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_light_schema_change") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "light_schema_change" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"light_schema_change\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/repli_alloc/test_ts_prop_repli_alloc.groovy b/regression-test/suites/table_sync/prop/repli_alloc/test_ts_prop_repli_alloc.groovy new file mode 100644 index 00000000..a2c28d4f --- /dev/null +++ b/regression-test/suites/table_sync/prop/repli_alloc/test_ts_prop_repli_alloc.groovy @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_repli_alloc") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + def extractReplicationAllocation = { createTableStatement -> String + def matcher = createTableStatement[0][1] =~ /"replication_allocation" = "([^"]+)"/ + if (matcher) { + return matcher[0][1] + } + return null + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + def res_replication_allocation = extractReplicationAllocation(res) + + def target_res_replication_allocation = extractReplicationAllocation(target_res) + + assertTrue(res_replication_allocation == target_res_replication_allocation) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/row_store/test_ts_prop_row_store.groovy b/regression-test/suites/table_sync/prop/row_store/test_ts_prop_row_store.groovy new file mode 100644 index 00000000..793fa822 --- /dev/null +++ b/regression-test/suites/table_sync/prop/row_store/test_ts_prop_row_store.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ds_prop_row_store") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "row_store_columns" = "test,id", + "row_store_page_size" = "4096" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"row_store_columns\" = \"test,id\"")) + assertTrue(target_res[0][1].contains("\"row_store_page_size\" = \"4096\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/seq_col/test_ts_prop_seq_col.groovy b/regression-test/suites/table_sync/prop/seq_col/test_ts_prop_seq_col.groovy new file mode 100644 index 00000000..737c0bc5 --- /dev/null +++ b/regression-test/suites/table_sync/prop/seq_col/test_ts_prop_seq_col.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_seq_col") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName1 = "tbl_" + helper.randomSuffix() + def tableName2 = "tbl_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName1}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName2}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName1}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName2}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName1} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_col" = "test" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${tableName2} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`) + PARTITION BY RANGE(`test`) + ( + ) + DISTRIBUTED BY HASH(test) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "function_column.sequence_type" = "int" + ) + """ + + helper.ccrJobDelete(tableName1) + helper.ccrJobDelete(tableName2) + helper.ccrJobCreate(tableName1) + helper.ccrJobCreate(tableName2) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName1}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName2}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName2}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName1}\"", exist, 60, "target")) + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName2}\"", exist, 60, "target")) + + def target_res_1 = target_sql "SHOW CREATE TABLE ${tableName1}" + def target_res_2 = target_sql "SHOW CREATE TABLE ${tableName2}" + + assertTrue(target_res_1[0][1].contains("\"function_column.sequence_col\" = \"test\"")) + assertTrue(target_res_2[0][1].contains("\"function_column.sequence_type\" = \"int\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/single_replica_compaction/test_ts_prop_single_repli_compact.groovy b/regression-test/suites/table_sync/prop/single_replica_compaction/test_ts_prop_single_repli_compact.groovy new file mode 100644 index 00000000..926920b2 --- /dev/null +++ b/regression-test/suites/table_sync/prop/single_replica_compaction/test_ts_prop_single_repli_compact.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_single_repli_compact") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_single_replica_compaction" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"enable_single_replica_compaction\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/storage_medium/test_ts_prop_storage_medium.groovy b/regression-test/suites/table_sync/prop/storage_medium/test_ts_prop_storage_medium.groovy new file mode 100644 index 00000000..ecbe5743 --- /dev/null +++ b/regression-test/suites/table_sync/prop/storage_medium/test_ts_prop_storage_medium.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_storage_medium") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_medium" = "SSD" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"storage_medium\" = \"ssd\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/storage_policy/test_ts_prop_storage_policy.groovy b/regression-test/suites/table_sync/prop/storage_policy/test_ts_prop_storage_policy.groovy new file mode 100644 index 00000000..fe3474d1 --- /dev/null +++ b/regression-test/suites/table_sync/prop/storage_policy/test_ts_prop_storage_policy.groovy @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_storage_policy") { + + logger.info("don't support this case, storage_policy can't be synchronized") + return + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + def resource_name = "test_ts_tbl_storage_policy_resource" + def policy_name= "test_ts_tbl_storage_policy" + + def check_storage_policy_exist = { name-> + def polices = sql""" + show storage policy; + """ + for (p in polices) { + if (name == p[0]) { + return true; + } + } + return false; + } + + if (check_storage_policy_exist(policy_name)) { + sql """ + DROP STORAGE POLICY ${policy_name} + """ + } + + def has_resouce = sql """ + SHOW RESOURCES WHERE NAME = "${resource_name}"; + """ + + if (has_resouce.size() > 0) { + sql """ + DROP RESOURCE ${resource_name} + """ + } + + sql """ + CREATE RESOURCE IF NOT EXISTS "${resource_name}" + PROPERTIES( + "type"="s3", + "AWS_ENDPOINT" = "${getS3Endpoint()}", + "AWS_REGION" = "${getS3Region()}", + "AWS_ROOT_PATH" = "regression/cooldown", + "AWS_ACCESS_KEY" = "${getS3AK()}", + "AWS_SECRET_KEY" = "${getS3SK()}", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "${getS3BucketName()}", + "s3_validity_check" = "true" + ); + """ + + sql """ + CREATE STORAGE POLICY IF NOT EXISTS ${policy_name} + PROPERTIES( + "storage_resource" = "${resource_name}", + "cooldown_ttl" = "300" + ) + """ + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "storage_policy" = "${policy_name}" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + // storage_policy should't be synchronized + // def res = sql "SHOW CREATE TABLE ${tableName}" + + // def ftarget_res = target_sql "SHOW CREATE TABLE ${tableName}" + + // assertTrue(res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) + + // assertTrue(!target_res[0][1].contains("\"storage_policy\" = \"${policy_name}\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/time_series_compaction/test_ts_prop_tm_series_compact.groovy b/regression-test/suites/table_sync/prop/time_series_compaction/test_ts_prop_tm_series_compact.groovy new file mode 100644 index 00000000..0d9f1a7e --- /dev/null +++ b/regression-test/suites/table_sync/prop/time_series_compaction/test_ts_prop_tm_series_compact.groovy @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_tm_series_compact") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "compaction_policy" = "time_series", + "time_series_compaction_goal_size_mbytes" = "1024", + "time_series_compaction_file_count_threshold" = "2000", + "time_series_compaction_time_threshold_seconds" = "3600", + "time_series_compaction_level_threshold" = "2" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"compaction_policy\" = \"time_series\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_goal_size_mbytes\" = \"1024\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_file_count_threshold\" = \"2000\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_time_threshold_seconds\" = \"3600\"")) + assertTrue(target_res[0][1].contains("\"time_series_compaction_level_threshold\" = \"2\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/unique_key_mow/test_ts_prop_unique_key_mow.groovy b/regression-test/suites/table_sync/prop/unique_key_mow/test_ts_prop_unique_key_mow.groovy new file mode 100644 index 00000000..f81bcc8d --- /dev/null +++ b/regression-test/suites/table_sync/prop/unique_key_mow/test_ts_prop_unique_key_mow.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_unique_key_mow") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "enable_unique_key_merge_on_write" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("\"enable_unique_key_merge_on_write\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/prop/variant_nested/test_ts_prop_variant_nested.groovy b/regression-test/suites/table_sync/prop/variant_nested/test_ts_prop_variant_nested.groovy new file mode 100644 index 00000000..1e9633d5 --- /dev/null +++ b/regression-test/suites/table_sync/prop/variant_nested/test_ts_prop_variant_nested.groovy @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_prop_variant_nested") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true", + "variant_enable_flatten_nested" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def res = sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) + + res = sql "desc ${tableName}" + + // target_res = target_sql "desc ${tableName}" + + // assertEquals(res,target_res) + + // target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + // assertTrue(!target_res[0][1].contains("\"variant_enable_flatten_nested\" = \"true\"")) +} \ No newline at end of file diff --git a/regression-test/suites/table_sync/restore/test_tbl_restore.groovy b/regression-test/suites/table_sync/restore/test_tbl_restore.groovy new file mode 100644 index 00000000..db446255 --- /dev/null +++ b/regression-test/suites/table_sync/restore/test_tbl_restore.groovy @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tbl_restore") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "test_tbl_sync_bak__restore_table_1" + def newtableName = "test_tbl_sync_bak__restore_table_2" + def snapshotName = "test_tbl_sync_bak__restore_table_snapshot" + def repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + def test_num = 0 + def insert_num = 10 + def syncer = getSyncer() + def dbNameOrigin = context.dbName + def dbNameTarget = "TEST_" + context.dbName + syncer.createS3Repository(repoName) + + target_sql("DROP DATABASE IF EXISTS ${dbNameTarget}") + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${tableName}" + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${newtableName}" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + logger.info("=== Test 1: Check table entries count ok ===") + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + logger.info("=== Test 2: Backup table===") + + assertTrue(helper.checkShowTimesOf(""" select * from ${dbNameOrigin}.${tableName} """, exist, 60, "sql")) + + sql """ + BACKUP SNAPSHOT ${snapshotName} + TO `${repoName}` + ON ( ${tableName} ) + PROPERTIES ("type" = "full") + """ + + syncer.waitSnapshotFinish() + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + syncer.waitTargetRestoreFinish() + + //insert more data , so that table sync will sync it to + // target table. + test_num = 1 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + syncer.waitAllRestoreFinish() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + order_qt_sql_source_content("SELECT * FROM ${tableName}") + order_qt_target_sql_content("SELECT * FROM ${tableName}") + + logger.info("=== Test 3: Restore new table ===") + + sql """ + RESTORE SNAPSHOT ${snapshotName} + FROM `${repoName}` + ON (${tableName}) + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "replication_num" = "1" + ) + """ + + syncer.waitAllRestoreFinish() + // after restore it must have only first set of inserted rows. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", + insert_num, 30)) + + logger.info("=== Test 4: Check table Content , This should be data from backup.===") + order_qt_sql_source_content("SELECT * FROM ${tableName}") + order_qt_target_sql_content("SELECT * FROM ${tableName}") +} diff --git a/regression-test/suites/table_sync/restore_multi/test_tbl_restore_multi.groovy b/regression-test/suites/table_sync/restore_multi/test_tbl_restore_multi.groovy new file mode 100644 index 00000000..88d3e87c --- /dev/null +++ b/regression-test/suites/table_sync/restore_multi/test_tbl_restore_multi.groovy @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tbl_restore_multi") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "test_tbl_sync_bak__restore_table_1" + def tableName2 = "test_tbl_sync_bak__restore_table_2" + def snapshotName = "test_tbl_sync_bak__restore_table_snapshot" + def snapshotName2 = "test_tbl_sync_bak__restore_table_snapshot2" + def repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + def test_num = 0 + def insert_num = 10 + def syncer = getSyncer() + def dbNameOrigin = context.dbName + def dbNameTarget = "TEST_" + context.dbName + syncer.createS3Repository(repoName) + + target_sql("DROP DATABASE IF EXISTS ${dbNameTarget}") + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${tableName}" + sql "DROP TABLE IF EXISTS ${dbNameOrigin}.${tableName2}" + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + } + + sql """ + CREATE TABLE if NOT EXISTS ${dbNameOrigin}.${tableName2} + ( + `test` INT, + `id` INT + ) + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName2} VALUES (${test_num}, ${index}) + """ + } + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + helper.ccrJobDelete(tableName2) + helper.ccrJobCreate(tableName2) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName2}", 60)) + logger.info("=== Test 1: Check table entries count ok ===") + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName2} WHERE test=${test_num}", + insert_num, 30)) + logger.info("=== Test 2: Backup table===") + + assertTrue(helper.checkShowTimesOf(""" select * from ${dbNameOrigin}.${tableName} """, exist, 60, "sql")) + assertTrue(helper.checkShowTimesOf(""" select * from ${dbNameOrigin}.${tableName2} """, exist, 60, "sql")) + + sql """ + BACKUP SNAPSHOT ${snapshotName} + TO `${repoName}` + ON ( ${tableName} ) + PROPERTIES ("type" = "full") + """ + syncer.waitSnapshotFinish() + sql """ + BACKUP SNAPSHOT ${snapshotName2} + TO `${repoName}` + ON ( ${tableName2} ) + PROPERTIES ("type" = "full") + """ + syncer.waitSnapshotFinish() + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + def snapshot2 = syncer.getSnapshotTimestamp(repoName, snapshotName2) + assertTrue(snapshot != null) + syncer.waitTargetRestoreFinish() + + //insert more data , so that table sync will sync it to + // target table. + test_num = 1 + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${dbNameOrigin}.${tableName} VALUES (${test_num}, ${index}) + """ + sql """ + INSERT INTO ${dbNameOrigin}.${tableName2} VALUES (${test_num}, ${index}) + """ + } + syncer.waitAllRestoreFinish() + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + insert_num, 30)) + order_qt_sql_source_content("SELECT * FROM ${tableName}") + order_qt_target_sql_content("SELECT * FROM ${tableName}") + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName2} WHERE test=${test_num}", + insert_num, 30)) + order_qt_sql_source_content("SELECT * FROM ${tableName2}") + order_qt_target_sql_content("SELECT * FROM ${tableName2}") + logger.info("=== Test 3: Restore new table ===") + + sql """ + RESTORE SNAPSHOT ${snapshotName} + FROM `${repoName}` + ON (${tableName}) + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "replication_num" = "1" + ) + """ + syncer.waitAllRestoreFinish() + sql """ + RESTORE SNAPSHOT ${snapshotName2} + FROM `${repoName}` + ON (${tableName2}) + PROPERTIES + ( + "backup_timestamp" = "${snapshot2}", + "replication_num" = "1" + ) + """ + syncer.waitAllRestoreFinish() + // after restore it must have only first set of inserted rows. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", + insert_num, 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName2}", + insert_num, 30)) + logger.info("=== Test 4: Check table Content , This should be data from backup.===") + order_qt_sql_source_content("SELECT * FROM ${tableName2}") + order_qt_target_sql_content("SELECT * FROM ${tableName2}") +} diff --git a/regression-test/suites/table-sync/test_rollup.groovy b/regression-test/suites/table_sync/rollup/add_drop/test_ts_rollup_add_drop.groovy similarity index 54% rename from regression-test/suites/table-sync/test_rollup.groovy rename to regression-test/suites/table_sync/rollup/add_drop/test_ts_rollup_add_drop.groovy index 2b8f7e2a..051abf9d 100644 --- a/regression-test/suites/table-sync/test_rollup.groovy +++ b/regression-test/suites/table_sync/rollup/add_drop/test_ts_rollup_add_drop.groovy @@ -15,61 +15,21 @@ // specific language governing permissions and limitations // under the License. -suite("test_rollup_sync") { - def tableName = "tbl_rollup_sync_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } +suite("test_ts_rollup_add_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) - return ret + if (helper.has_feature("feature_skip_rollup_binlogs")) { + logger.info("skip this suite because feature_skip_rollup_binlogs is enabled") + return } - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 sql """ - CREATE TABLE if NOT EXISTS ${tableName} + CREATE TABLE if NOT EXISTS ${tableName} ( `id` INT, `col1` INT, @@ -78,14 +38,14 @@ suite("test_rollup_sync") { `col4` INT, ) ENGINE=OLAP - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES ( + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( "replication_allocation" = "tag.location.default: 1", "binlog.enable" = "true" ) """ sql """ - ALTER TABLE ${tableName} + ALTER TABLE ${tableName} ADD ROLLUP rollup_${tableName}_full (id, col2, col4) """ @@ -97,25 +57,18 @@ suite("test_rollup_sync") { } return false } - assertTrue(checkShowTimesOf(""" - SHOW ALTER TABLE ROLLUP + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP FROM ${context.dbName} WHERE TableName = "${tableName}" AND State = "FINISHED" - """, + """, rollupFullFinished, 30)) sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" logger.info("=== Test 1: full update rollup ===") - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobCreate(tableName) - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) def hasRollupFull = { res -> Boolean for (List row : res) { @@ -126,13 +79,13 @@ suite("test_rollup_sync") { return false } - assertTrue(checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", + assertTrue(helper.checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", hasRollupFull, 30, "target")) logger.info("=== Test 2: incremental update rollup ===") sql """ - ALTER TABLE ${tableName} + ALTER TABLE ${tableName} ADD ROLLUP rollup_${tableName}_incr (id, col1, col3) """ def hasRollupIncremental = { res -> Boolean @@ -143,6 +96,22 @@ suite("test_rollup_sync") { } return false } - assertTrue(checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", + assertTrue(helper.checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", hasRollupIncremental, 30, "target")) + + logger.info("=== Test 3: drop rollup") + sql """ + ALTER TABLE ${tableName} DROP ROLLUP rollup_${tableName}_incr + """ + + def hasRollupIncrementalDropped = { res -> Boolean + for (List row : res) { + if ((row[0] as String) == "rollup_${tableName}_incr") { + return false + } + } + return true + } + assertTrue(helper.checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", + hasRollupIncrementalDropped, 30, "target")) } \ No newline at end of file diff --git a/regression-test/suites/table_sync/rollup/rename/test_ts_rollup_rename.groovy b/regression-test/suites/table_sync/rollup/rename/test_ts_rollup_rename.groovy new file mode 100644 index 00000000..d9f3acd9 --- /dev/null +++ b/regression-test/suites/table_sync/rollup/rename/test_ts_rollup_rename.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_rollup_rename") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (helper.has_feature("feature_skip_rollup_binlogs")) { + logger.info("skip this suite because feature_skip_rollup_binlogs is enabled") + return + } + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `id` INT, + `col1` INT, + `col2` INT, + `col3` INT, + `col4` INT, + ) + ENGINE=OLAP + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + ALTER TABLE ${tableName} + ADD ROLLUP rollup_${tableName}_full (id, col2, col4) + """ + + def rollupFullFinished = { res -> Boolean + for (List row : res) { + if ((row[5] as String).contains("rollup_${tableName}_full")) { + return true + } + } + return false + } + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + rollupFullFinished, 30)) + sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + + logger.info("=== Test 1: full update rollup ===") + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def hasRollupFull = { res -> Boolean + for (List row : res) { + if ((row[0] as String) == "rollup_${tableName}_full") { + return true + } + } + + return false + } + assertTrue(helper.checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", + hasRollupFull, 30, "target")) + + logger.info("=== Test 2: Rename rollup ===") + sql """ + ALTER TABLE ${tableName} + RENAME ROLLUP rollup_${tableName}_full rollup_${tableName}_full_new + """ + def hasRollupFullNew = { res -> Boolean + for (List row : res) { + if ((row[0] as String) == "rollup_${tableName}_full_new") { + return true + } + } + + return false + } + assertTrue(helper.checkShowTimesOf("DESC TEST_${context.dbName}.${tableName} ALL", + hasRollupFullNew, 30, "target")) +} diff --git a/regression-test/suites/table_sync/rollup_col/add/test_ts_rollup_col_add.groovy b/regression-test/suites/table_sync/rollup_col/add/test_ts_rollup_col_add.groovy new file mode 100644 index 00000000..c5320797 --- /dev/null +++ b/regression-test/suites/table_sync/rollup_col/add/test_ts_rollup_col_add.groovy @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_rollup_col_add") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `id` INT, + `col1` INT, + `col2` INT, + `col3` INT, + `col4` INT, + ) + ENGINE=OLAP + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + ALTER TABLE ${tableName} + ADD ROLLUP rollup_${tableName} (id, col2, col4) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.check_table_describe_times(tableName, 30)) + + first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add key column ===") + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 10273, + // "tableId": 10485, + // "tableName": "tbl_848588167", + // "jobId": 10527, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_sync_rollup_col_add`.`tbl_848588167` ADD COLUMN `key` int NULL DEFAULT \"0\" COMMENT \"\" IN `rollup_tbl_848588167`", + // "iim": { + // "10528": 10486, + // "10533": 10492 + // } + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `key` INT KEY DEFAULT "0" + TO rollup_${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" + AND IndexName = "rollup_${tableName}" + AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.check_table_describe_times(tableName, 30)) + + logger.info("=== Test 2: add value column ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11058, + // "indexSchemaMap": { + // "11101": [...] + // }, + // "indexes": [], + // "jobId": 11117, + // "rawSql":"ALTER TABLE `regression_test_table_sync_rollup_col_add`.`tbl_848588167` ADD COLUMN `first_value` int NULL DEFAULT \"0\" COMMENT \"\" IN `rollup_tbl_848588167`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first_value` INT DEFAULT "0" + TO rollup_${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" + AND IndexName = "rollup_${tableName}" + AND State = "FINISHED" + """, + has_count(2), 30)) + assertTrue(helper.check_table_describe_times(tableName, 30)) + + // no full sync triggered. + last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/table_sync/rollup_col/drop/test_ts_rollup_col_drop.groovy b/regression-test/suites/table_sync/rollup_col/drop/test_ts_rollup_col_drop.groovy new file mode 100644 index 00000000..a73f814c --- /dev/null +++ b/regression-test/suites/table_sync/rollup_col/drop/test_ts_rollup_col_drop.groovy @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_rollup_col_drop") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `id` INT, + `col1` INT, + `col2` INT, + `col3` INT, + `col4` INT, + ) + ENGINE=OLAP + DUPLICATE KEY(`id`, `col1`, `col2`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + ALTER TABLE ${tableName} + ADD ROLLUP rollup_${tableName} (id, col2, col4) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.check_table_describe_times(tableName, 30)) + + first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: drop key column ===") + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 10577, + // "tableId": 10640, + // "tableName": "tbl_1919050016", + // "jobId": 10682, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_sync_rollup_col_drop`.`tbl_1919050016` DROP COLUMN `col2` IN `rollup_tbl_1919050016`", + // "iim": { + // "10683": 10647 + // } + // } + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `col2` + FROM rollup_${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" + AND IndexName = "rollup_${tableName}" + AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.check_table_describe_times(tableName, 30)) + + logger.info("=== Test 2: drop value column ===") + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 10577, + // "tableId": 10640, + // "tableName": "tbl_1919050016", + // "jobId": 10717, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_sync_rollup_col_drop`.`tbl_1919050016` DROP COLUMN `col4` IN `rollup_tbl_1919050016`", + // "iim": { + // "10718": 10683 + // } + // } + sql """ + ALTER TABLE ${tableName} + DROP COLUMN `col4` + FROM rollup_${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" + AND IndexName = "rollup_${tableName}" + AND State = "FINISHED" + """, + has_count(2), 30)) + assertTrue(helper.check_table_describe_times(tableName, 30)) + + // no full sync triggered. + last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} diff --git a/regression-test/suites/table_sync/rollup_col/order_by/test_ts_rollup_col_order_by.groovy b/regression-test/suites/table_sync/rollup_col/order_by/test_ts_rollup_col_order_by.groovy new file mode 100644 index 00000000..aaa68f64 --- /dev/null +++ b/regression-test/suites/table_sync/rollup_col/order_by/test_ts_rollup_col_order_by.groovy @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_rollup_col_order_by") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `id` INT, + `col1` INT, + `col2` INT, + `col3` INT, + `col4` INT, + ) + ENGINE=OLAP + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + ALTER TABLE ${tableName} + ADD ROLLUP rollup_${tableName} (id, col2, col4) + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE ROLLUP + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.check_table_describe_times(tableName, 30)) + + first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: order by columns ===") + // { + // "type": "SCHEMA_CHANGE", + // "dbId": 10844, + // "tableId": 10846, + // "tableName": "tbl_824618273", + // "jobId": 10889, + // "jobState": "FINISHED", + // "rawSql": "ALTER TABLE `regression_test_table_sync_rollup_col_order_by`.`tbl_824618273` ORDER BY `col2`, `id`, `col4` IN `rollup_tbl_824618273`", + // "iim": { + // "10890": 10853 + // } + // } + sql """ + ALTER TABLE ${tableName} + ORDER BY (col2, id, col4) + FROM rollup_${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" + AND IndexName = "rollup_${tableName}" + AND State = "FINISHED" + """, + has_count(1), 30)) + + assertTrue(helper.check_table_describe_times(tableName, 30)) + + // no full sync triggered. + last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + diff --git a/regression-test/suites/table_sync/table/aggregate/test_ts_tbl_aggregate.groovy b/regression-test/suites/table_sync/table/aggregate/test_ts_tbl_aggregate.groovy new file mode 100644 index 00000000..c015f972 --- /dev/null +++ b/regression-test/suites/table_sync/table/aggregate/test_ts_tbl_aggregate.groovy @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_aggregate") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("AGGREGATE KEY(`test`, `id`)")) +} diff --git a/regression-test/suites/table_sync/table/duplicate/test_ts_tbl_duplicate.groovy b/regression-test/suites/table_sync/table/duplicate/test_ts_tbl_duplicate.groovy new file mode 100644 index 00000000..7eb638e9 --- /dev/null +++ b/regression-test/suites/table_sync/table/duplicate/test_ts_tbl_duplicate.groovy @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_duplicate") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + DUPLICATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("DUPLICATE KEY(`test`, `id`)")) +} diff --git a/regression-test/suites/table_sync/table/modify_comment/test_ts_table_modify_comment.groovy b/regression-test/suites/table_sync/table/modify_comment/test_ts_table_modify_comment.groovy new file mode 100644 index 00000000..50c3e4b2 --- /dev/null +++ b/regression-test/suites/table_sync/table/modify_comment/test_ts_table_modify_comment.groovy @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_table_modify_comment") { + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([20108, 20017, 30004])) { + def version = helper.upstream_version() + logger.info("Skip the test case because the version is not supported. current version ${version}") + } + + def tableName = "tbl_" + helper.randomSuffix() + + def checkTableCommentTimesOf = { checkTable, expectedComment, times -> Boolean + def expected = "COMMENT '${expectedComment}'" + def res = target_sql "SHOW CREATE TABLE ${checkTable}" + while (times > 0) { + if (res.size() > 0 && (res[0][1] as String).contains(expected)) { + return true + } + if (--times > 0) { + sleep(helper.sync_gap_time) + res = target_sql "SHOW CREATE TABLE ${checkTable}" + } + } + return false + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: modify table comment case ===") + sql """ + ALTER TABLE ${tableName} + MODIFY COMMENT "this is a test table" + """ + assertTrue(checkTableCommentTimesOf(tableName, "this is a test table", 30)) +} diff --git a/regression-test/suites/table_sync/table/part_bucket/test_ts_tbl_part_bucket.groovy b/regression-test/suites/table_sync/table/part_bucket/test_ts_tbl_part_bucket.groovy new file mode 100644 index 00000000..27241030 --- /dev/null +++ b/regression-test/suites/table_sync/table/part_bucket/test_ts_tbl_part_bucket.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_part_bucket") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + AGGREGATE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("PARTITION BY RANGE(`id`)")) + assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS 1")) +} \ No newline at end of file diff --git a/regression-test/suites/table-sync/test_rename.groovy b/regression-test/suites/table_sync/table/rename/test_ts_tbl_rename.groovy similarity index 65% rename from regression-test/suites/table-sync/test_rename.groovy rename to regression-test/suites/table_sync/table/rename/test_ts_tbl_rename.groovy index 1f2ad7b7..644196e3 100644 --- a/regression-test/suites/table-sync/test_rename.groovy +++ b/regression-test/suites/table_sync/table/rename/test_ts_tbl_rename.groovy @@ -15,71 +15,16 @@ // specific language governing permissions and limitations // under the License. -suite("test_rename") { - def tableName = "tbl_rename_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" - def test_num = 0 - def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkSelectTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } +suite("test_ts_tbl_rename") { + logger.info("exit because test_rename is not supported yet") + return - def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean - Boolean ret = false - List> res - while (times > 0) { - try { - if (func == "sql") { - res = sql "${sqlString}" - } else { - res = target_sql "${sqlString}" - } - if (myClosure.call(res)) { - ret = true - } - } catch (Exception e) {} - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 sql """ CREATE TABLE if NOT EXISTS ${tableName} @@ -101,16 +46,9 @@ suite("test_rename") { """ sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobCreate(tableName) - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) logger.info("=== Test 0: Common insert case ===") @@ -119,7 +57,8 @@ suite("test_rename") { INSERT INTO ${tableName} VALUES (${test_num}, ${index}) """ } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", insert_num, 30)) @@ -134,7 +73,8 @@ suite("test_rename") { INSERT INTO ${newTableName} VALUES (${test_num}, ${index}) """ } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${newTableName} WHERE test=${test_num}", insert_num, 30)) @@ -191,4 +131,4 @@ suite("test_rename") { // assertTrue(resSql.size() == 0) // resSql = target_sql "SELECT * FROM ${tableName} WHERE test=100" // assertTrue(resSql.size() == 0) -} \ No newline at end of file +} diff --git a/regression-test/suites/table_sync/table/replace/test_ts_tbl_replace.groovy b/regression-test/suites/table_sync/table/replace/test_ts_tbl_replace.groovy new file mode 100644 index 00000000..15417be9 --- /dev/null +++ b/regression-test/suites/table_sync/table/replace/test_ts_tbl_replace.groovy @@ -0,0 +1,119 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_replace") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.is_version_supported([30003, 20108, 20016])) { + // at least doris 3.0.3, 2.1.8 and doris 2.0.16 + def version = helper.upstream_version() + logger.info("skip this suite because version is not supported, upstream version ${version}") + return + } + + def oldTableName = "tbl_old_" + helper.randomSuffix() + def newTableName = "tbl_new_" + helper.randomSuffix() + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + logger.info("=== Create both table ===") + sql """ + CREATE TABLE if NOT EXISTS ${oldTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${newTableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `p1` VALUES LESS THAN ("0"), + PARTITION `p2` VALUES LESS THAN ("100"), + PARTITION `p3` VALUES LESS THAN ("200"), + PARTITION `p4` VALUES LESS THAN ("300"), + PARTITION `p5` VALUES LESS THAN ("1000") + ) + DISTRIBUTED BY HASH(id) BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(oldTableName) + helper.ccrJobCreate(oldTableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${oldTableName}", 60)) + + sql "INSERT INTO ${oldTableName} VALUES (1, 100), (100, 1), (2, 200), (200, 2)" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 4, 60)) + + logger.info(" ==== replace with swap ==== ") + helper.ccrJobPause(oldTableName) + + sql "INSERT INTO ${newTableName} VALUES (3, 300), (300, 3)" // o:n, 4:2 + sql "INSERT INTO ${oldTableName} VALUES (3, 300), (300, 3)" // o:n, 6:2 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"true\")" // o:n, 2:6 + sql "INSERT INTO ${oldTableName} VALUES (4, 400)" // o:n, 3:6 + sql "INSERT INTO ${newTableName} VALUES (4, 400)" // o:n, 3:7 + + helper.ccrJobResume(oldTableName) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 3, 60)) + + logger.info(" ==== replace without swap ==== ") + + helper.ccrJobPause(oldTableName) + + sql "INSERT INTO ${newTableName} VALUES (5, 500), (500, 5)" // o:n, 3:9 + sql "INSERT INTO ${oldTableName} VALUES (5, 500), (500, 5)" // o:n, 5:9 + sql "ALTER TABLE ${oldTableName} REPLACE WITH TABLE ${newTableName} PROPERTIES (\"swap\"=\"false\")" // o:n, 9:0 + sql "INSERT INTO ${oldTableName} VALUES (6, 600)" // o:n, 10:0 + + helper.ccrJobResume(oldTableName) + + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${oldTableName}", 10, 60)) +} + diff --git a/regression-test/suites/table_sync/table/res_inverted_idx/test_ts_tbl_res_inverted_idx.groovy b/regression-test/suites/table_sync/table/res_inverted_idx/test_ts_tbl_res_inverted_idx.groovy new file mode 100644 index 00000000..7b8893ab --- /dev/null +++ b/regression-test/suites/table_sync/table/res_inverted_idx/test_ts_tbl_res_inverted_idx.groovy @@ -0,0 +1,186 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_res_inverted_idx") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_inverted_index_dup_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def checkSyncFinishTimesOf = { count, times -> Boolean + Boolean ret = false + while (times > 0) { + def sqlInfo = target_sql "SELECT COUNT() FROM TEST_${context.dbName}.${tableName}" + if ((sqlInfo[0][0] as Integer) == count) { + ret = true + break + } else if (--times > 0) { + sleep(helper.sync_gap_time) + } + } + + return ret + } + + def insert_data = { -> + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "bason", "bason hate pear", 98); """ + sql """ INSERT INTO ${tableName} VALUES (3, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (4, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (4, "andy", "andy love apple", 100); """ + } + + def insert_data2 = { -> + sql """ INSERT INTO ${tableName} VALUES (5, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (5, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (6, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (6, "andy", "andy love apple", 98); """ + } + + def run_sql = { String db -> + if (db.startsWith('TEST_')) { + qt_target_sql """ select * from ${db}.${tableName} order by id, name, hobbies, score """ + qt_target_sql """ select * from ${db}.${tableName} where name match "andy" order by id, name, hobbies, score """ + qt_target_sql """ select * from ${db}.${tableName} where hobbies match "pear" order by id, name, hobbies, score """ + qt_target_sql """ select * from ${db}.${tableName} where score < 99 order by id, name, hobbies, score """ + } else { + qt_sql """ select * from ${db}.${tableName} order by id, name, hobbies, score """ + qt_sql """ select * from ${db}.${tableName} where name match "andy" order by id, name, hobbies, score """ + qt_sql """ select * from ${db}.${tableName} where hobbies match "pear" order by id, name, hobbies, score """ + qt_sql """ select * from ${db}.${tableName} where score < 99 order by id, name, hobbies, score """ + } + } + + def run_test = { -> + insert_data.call() + + sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def show_result = target_sql "SHOW INDEXES FROM TEST_${context.dbName}.${tableName}" + logger.info("show index from TEST_${context.dbName}.${tableName} result: " + show_result) + assertEquals(show_result.size(), 3) + assertEquals(show_result[0][2], "index_name") + assertEquals(show_result[1][2], "index_hobbies") + assertEquals(show_result[2][2], "index_score") + + run_sql.call("${context.dbName}") + run_sql.call("TEST_${context.dbName}") + + insert_data2.call() + sql "sync" + + if (("${tableName}" as String).contains("tbl_inverted_index_dup")) { + assertTrue(checkSyncFinishTimesOf(12, 30)) + } else { + assertTrue(checkSyncFinishTimesOf(6, 30)) + } + + run_sql.call("${context.dbName}") + run_sql.call("TEST_${context.dbName}") + } + + /** + * test for duplicated key table + */ + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `hobbies` text NULL, + `score` int(11) NULL, + index index_name (name) using inverted, + index index_hobbies (hobbies) using inverted properties("parser"="english"), + index index_score (score) using inverted + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( "replication_num" = "1"); + """ + + run_test.call() + + /** + * test for unique key table with mow + */ + tableName = "tbl_inverted_index_uniq_mow_" + helper.randomSuffix() + + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `hobbies` text NULL, + `score` int(11) NULL, + index index_name (name) using inverted, + index index_hobbies (hobbies) using inverted properties("parser"="english"), + index index_score (score) using inverted + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true" + ); + """ + + run_test.call() + + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.')) { + logger.info("doris 2.0 not support inverted index with unique mor") + return + } + + /** + * test for unique key table with mor + */ + tableName = "tbl_inverted_index_uniq_mor_" + helper.randomSuffix() + + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `hobbies` text NULL, + `score` int(11) NULL, + index index_name (name) using inverted, + index index_hobbies (hobbies) using inverted properties("parser"="english"), + index index_score (score) using inverted + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ); + """ + + run_test.call() +} diff --git a/regression-test/suites/table-sync/test_mow.groovy b/regression-test/suites/table_sync/table/res_mow/test_ts_table_res_mow.groovy similarity index 62% rename from regression-test/suites/table-sync/test_mow.groovy rename to regression-test/suites/table_sync/table/res_mow/test_ts_table_res_mow.groovy index 06df743b..c569e1e3 100644 --- a/regression-test/suites/table-sync/test_mow.groovy +++ b/regression-test/suites/table_sync/table/res_mow/test_ts_table_res_mow.groovy @@ -15,46 +15,14 @@ // specific language governing permissions and limitations // under the License. -suite("test_mow") { - def tableName = "tbl_mow_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" +suite("test_ts_table_res_mow") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() def test_num = 0 def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkSelectTimesOf = { sqlString, rowSize, times, func = null -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize || (func != null && !func(tmpRes))) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize && (func == null || func(tmpRes)) - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = (row[4] as String) == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } + String response sql """ CREATE TABLE if NOT EXISTS ${tableName} @@ -78,18 +46,12 @@ suite("test_mow") { """ } sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + sql "sync" logger.info("=== Test 1: full update mow ===") - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } + helper.ccrJobCreate(tableName) - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) // show create table regression_test_p0.tbl_mow_sync; def res = target_sql "SHOW CREATE TABLE ${tableName}" @@ -109,6 +71,7 @@ suite("test_mow") { INSERT INTO ${tableName} VALUES (${test_num}, ${index}, ${index}) """ } + sql "sync" def checkSeq1 = { inputRes -> Boolean for (List row : inputRes) { if ((row[2] as Integer) != 4) { @@ -117,9 +80,10 @@ suite("test_mow") { } return true } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 1, 30, checkSeq1)) - + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + checkSeq1, 30, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + 1, 30)) logger.info("=== Test 3: sequence value ===") test_num = 3 @@ -128,6 +92,7 @@ suite("test_mow") { INSERT INTO ${tableName} VALUES (${test_num}, ${test_num}, 5 - ${index}) """ } + sql "sync" def checkSeq2 = { inputRes -> Boolean for (List row : inputRes) { if ((row[2] as Integer) != 5) { @@ -136,6 +101,8 @@ suite("test_mow") { } return true } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", - 1, 30, checkSeq2)) -} \ No newline at end of file + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + checkSeq2, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + 1, 30)) +} diff --git a/regression-test/suites/table_sync/table/res_row_storage/test_ts_tbl_res_row_storage.groovy b/regression-test/suites/table_sync/table/res_row_storage/test_ts_tbl_res_row_storage.groovy new file mode 100644 index 00000000..321dd583 --- /dev/null +++ b/regression-test/suites/table_sync/table/res_row_storage/test_ts_tbl_res_row_storage.groovy @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_ts_tbl_res_row_storage") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "store_row_column" = "true", + "binlog.enable" = "true" + ) + """ + + for (int index = 0; index < insert_num; index++) { + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, ${index}) + """ + } + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + def res = target_sql "SHOW CREATE TABLE TEST_${context.dbName}.${tableName}" + def rowStorage = false + for (List row : res) { + if ((row[0] as String) == "${tableName}") { + rowStorage = (row[1] as String).contains("\"store_row_column\" = \"true\"") + break + } + } + assertTrue(rowStorage) + + + logger.info("=== Test 2: add column case ===") + sql """ + ALTER TABLE ${tableName} + ADD COLUMN (`cost` VARCHAR(256) DEFAULT "add") + """ + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + exist, 30)) + + assertTrue(helper.checkSelectColTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + 3, 30)) + + + logger.info("=== Test 3: add a row ===") + test_num = 3 + sql """ + INSERT INTO ${tableName} VALUES (${test_num}, 0, "addadd") + """ + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num}", + 1, 30)) + res = target_sql "SELECT cost FROM TEST_${context.dbName}.${tableName} WHERE test=${test_num}" + assertTrue((res[0][0] as String) == "addadd") +} diff --git a/regression-test/suites/table_sync/table/res_variant/test_ts_tbl_res_variant.groovy b/regression-test/suites/table_sync/table/res_variant/test_ts_tbl_res_variant.groovy new file mode 100644 index 00000000..d4e293ba --- /dev/null +++ b/regression-test/suites/table_sync/table/res_variant/test_ts_tbl_res_variant.groovy @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_res_variant") { + def versions = sql_return_maparray "show variables like 'version_comment'" + if (versions[0].Value.contains('doris-2.0.')) { + logger.info("2.0 not support variant case, current version is: ${versions[0].Value}") + return + } + + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "test_" + helper.randomSuffix() + def insert_num = 5 + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + k bigint, + var variant + ) + UNIQUE KEY(`k`) + DISTRIBUTED BY HASH(k) BUCKETS 1 + properties("replication_num" = "1", "disable_auto_compaction" = "false"); + """ + for (int index = 0; index < insert_num; ++index) { + sql """ + INSERT INTO ${tableName} VALUES (${index}, '{"key_${index}":"value_${index}"}') + """ + } + sql """ALTER TABLE ${tableName} set ("binlog.enable" = "true")""" + sql "sync" + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + def res = target_sql "SHOW CREATE TABLE ${tableName}" + def createSuccess = false + for (List row : res) { + def get_table_name = row[0] as String + logger.info("get_table_name is ${get_table_name}") + def compare_table_name = "${tableName}" + logger.info("compare_table_name is ${compare_table_name}") + if (get_table_name == compare_table_name) { + createSuccess = true + break + } + } + assertTrue(createSuccess) + def count_res = target_sql " select count(*) from ${tableName}" + def count = count_res[0][0] as Integer + assertTrue(count.equals(insert_num)) + + (0..count-1).each {Integer i -> + def var_reult = target_sql " select CAST(var[\"key_${i}\"] AS TEXT) from ${tableName} where k = ${i}" + assertTrue((var_reult[0][0] as String) == ("value_${i}" as String)) + + } + +} + diff --git a/regression-test/suites/table-sync/test_truncate_table.groovy b/regression-test/suites/table_sync/table/truncate/test_ts_tbl_truncate.groovy similarity index 54% rename from regression-test/suites/table-sync/test_truncate_table.groovy rename to regression-test/suites/table_sync/table/truncate/test_ts_tbl_truncate.groovy index ee4d8c4d..6164f4d8 100644 --- a/regression-test/suites/table-sync/test_truncate_table.groovy +++ b/regression-test/suites/table_sync/table/truncate/test_ts_tbl_truncate.groovy @@ -14,61 +14,13 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -suite("test_truncate") { +suite("test_ts_tbl_truncate") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) - def tableName = "tbl_truncate_" + UUID.randomUUID().toString().replace("-", "") - def syncerAddress = "127.0.0.1:9190" + def tableName = "tbl_" + helper.randomSuffix() def test_num = 0 def insert_num = 5 - def sync_gap_time = 5000 - String respone - - def checkSelectTimesOf = { sqlString, rowSize, times -> Boolean - def tmpRes = target_sql "${sqlString}" - while (tmpRes.size() != rowSize) { - sleep(sync_gap_time) - if (--times > 0) { - tmpRes = target_sql "${sqlString}" - } else { - break - } - } - return tmpRes.size() == rowSize - } - - def checkRestoreFinishTimesOf = { checkTable, times -> Boolean - Boolean ret = false - while (times > 0) { - def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" - for (List row : sqlInfo) { - if ((row[10] as String).contains(checkTable)) { - ret = row[4] == "FINISHED" - } - } - - if (ret) { - break - } else if (--times > 0) { - sleep(sync_gap_time) - } - } - - return ret - } - - def checkData = { data, beginCol, value -> Boolean - if (data.size() < beginCol + value.size()) { - return false - } - - for (int i = 0; i < value.size(); ++i) { - if ((data[beginCol + i] as int) != value[i]) { - return false - } - } - - return true - } sql """ CREATE TABLE if NOT EXISTS ${tableName} @@ -96,19 +48,10 @@ suite("test_truncate") { // test 1: target cluster follow source cluster logger.info("=== Test 1: backup/restore case ===") - httpTest { - uri "/create_ccr" - endpoint syncerAddress - def bodyJson = get_ccr_body "${tableName}" - body "${bodyJson}" - op "post" - result respone - } - assertTrue(checkRestoreFinishTimesOf("${tableName}", 30)) - - - + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + def first_job_progress = helper.get_job_progress(tableName) logger.info("=== Test 2: full partitions ===") test_num = 2 @@ -132,13 +75,14 @@ suite("test_truncate") { INSERT INTO ${tableName} VALUES (${test_num}, 3, ${index}) """ } - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=0", + sql "sync" + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=0", insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=1", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=1", insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=2", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=2", insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=3", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE test=${test_num} AND id=3", insert_num, 30)) @@ -148,13 +92,13 @@ suite("test_truncate") { // TRUNCATE TABLE tbl PARTITION(p1, p2); sql """TRUNCATE TABLE ${tableName} PARTITION(`ONE`, `THREE`)""" - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=0", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=0", insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=1", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=1", 0, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=2", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=2", insert_num, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=3", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=3", 0, 30)) @@ -164,12 +108,17 @@ suite("test_truncate") { // TRUNCATE TABLE tbl PARTITION(p1, p2); sql """TRUNCATE TABLE ${tableName}""" - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=0", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=0", 0, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=1", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=1", 0, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=2", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=2", 0, 30)) - assertTrue(checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=3", + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName} WHERE id=3", 0, 30)) -} \ No newline at end of file + + def last_job_progress = helper.get_job_progress(tableName) + if (helper.is_version_supported([20107, 20016])) { // at least doris 2.1.7 and doris 2.0.16 + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) + } +} diff --git a/regression-test/suites/table_sync/table/txn_insert/test_txn_insert.groovy b/regression-test/suites/table_sync/table/txn_insert/test_txn_insert.groovy new file mode 100644 index 00000000..f228a0e4 --- /dev/null +++ b/regression-test/suites/table_sync/table/txn_insert/test_txn_insert.groovy @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_txn_insert") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + if (!helper.has_feature("feature_txn_insert")) { + logger.info("Skip the test because the feature is not supported.") + return + } + + def tableName1 = "t1_" + helper.randomSuffix() + def tableName2 = "t2_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 10 + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + def hasRollupFull = { res -> Boolean + for (List row : res) { + if ((row[0] as String) == "${new_rollup_name}") { + return true + } + } + return false + } + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName1} + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `city` VARCHAR(20) COMMENT "用户所在城市" + ) ENGINE = olap + unique KEY(`user_id`, `date`) + PARTITION BY RANGE (`date`) + ( + PARTITION `p201701` VALUES LESS THAN ("2017-02-01"), + PARTITION `p201702` VALUES LESS THAN ("2017-03-01"), + PARTITION `p201703` VALUES LESS THAN ("2017-04-01") + ) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 + PROPERTIES ("replication_num" = "1", "binlog.enable" = "true","enable_unique_key_merge_on_write" = "false"); + """ + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName2} (`id` int) + ENGINE = olap unique KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ("replication_allocation" = "tag.location.default: 1", "binlog.enable" = "true", "enable_unique_key_merge_on_write" = "false"); + """ + + sql """ insert into ${tableName2} values (3),(4),(5); """ + sql """ insert into ${tableName1} values (1, '2017-03-31', 'a'), (2, '2017-02-28', 'b'); """ + + helper.ccrJobDelete(tableName1) + helper.ccrJobCreate(tableName1) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName1}", 60)) + + + logger.info("=== Test 0: Table sync ===") + sql "sync" + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName1} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1}", 2, 30)) + + + logger.info("=== Test 1: insert only ===") + sql """ + begin; + insert into ${tableName1} select id, '2017-02-28', 'y1' from ${tableName2} where id = 3; + insert into ${tableName1} select id, '2017-02-28', 'y1' from ${tableName2} where id = 5; + insert into ${tableName1} select id, '2017-03-31', 'x' from ${tableName2} where id = 4; + commit; + """ + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName1} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1} ", 5, 30)) + + + logger.info("=== Test 2: insert + update ===") + sql """ + begin; + update ${tableName1} set city = 'xxx' where user_id = 3; + insert into ${tableName1} select id + 1, '2017-02-28', 'yyy' from ${tableName2} where id = 5; + commit; + """ + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName1} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1} where city = 'xxx' or user_id = 6", 2, 30)) + + + logger.info("=== Test 3: insert + delete ===") + sql """ + begin; + delete from ${tableName1} PARTITION p201702 where user_id = 5; + insert into ${tableName1} select id, '2017-02-28', 'new_y1' from ${tableName2} where id = 5; + commit; + """ + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName1} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1} where city = 'new_y1'", 1, 30)) + + + logger.info("=== Test 4: insert + update + delete ===") + sql """ + begin; + delete from ${tableName1} PARTITION p201702 where user_id = 6; + insert into ${tableName1} select id + 2, '2017-02-28', 'y1' from ${tableName2} where id = 5; + update ${tableName1} set city = 'new_city' where user_id = 5; + commit; + """ + assertTrue(helper.checkShowTimesOf("SELECT * FROM ${tableName1} ", exist, 60, "target")) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName1} where city = 'new_city' and user_id = 5", 1, 30)) +} + diff --git a/regression-test/suites/table_sync/table/unique/test_ts_tbl_unique.groovy b/regression-test/suites/table_sync/table/unique/test_ts_tbl_unique.groovy new file mode 100644 index 00000000..42583a4d --- /dev/null +++ b/regression-test/suites/table_sync/table/unique/test_ts_tbl_unique.groovy @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_ts_tbl_unique") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def dbName = context.dbName + def tableName = "tbl_" + helper.randomSuffix() + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}" + + helper.enableDbBinlog() + + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + helper.ccrJobDelete(tableName) + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql")) + + assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target")) + + def sql_res = sql "SHOW CREATE TABLE ${tableName}" + + def target_res = target_sql "SHOW CREATE TABLE ${tableName}" + + assertTrue(target_res[0][1].contains("UNIQUE KEY(`test`, `id`)")) +} diff --git a/regression-test/suites/table_sync_alias/column/add_value/test_tsa_column_add.groovy b/regression-test/suites/table_sync_alias/column/add_value/test_tsa_column_add.groovy new file mode 100644 index 00000000..fd61c7c8 --- /dev/null +++ b/regression-test/suites/table_sync_alias/column/add_value/test_tsa_column_add.groovy @@ -0,0 +1,100 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_tsa_column_add") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def suffix = helper.randomSuffix() + def tableName = "test_${suffix}" + def aliasName = "alias_${suffix}" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.set_alias(aliasName) + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + logger.info("=== Test 1: add value column after last key ===") + // binlog type: MODIFY_TABLE_ADD_OR_DROP_COLUMNS, binlog data: + // { + // "dbId": 11049, + // "tableId": 11058, + // "indexSchemaMap": { + // "11101": [...] + // }, + // "indexes": [], + // "jobId": 11117, + // "rawSql": "ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first_value` int NULL DEFAULT \"0\" COMMENT \"\" AFTER `last`" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first_value` INT DEFAULT "0" AFTER `id` + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def has_column_first_value = { res -> Boolean + // Field == 'first_value' && 'Key' == 'NO' + return res[2][0] == 'first_value' && (res[2][3] == 'NO' || res[2][3] == 'false') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${aliasName}`", has_column_first_value, 60, "target_sql")) +} + diff --git a/regression-test/suites/table_sync_alias/index/create_drop_inverted/test_tsa_index_create_drop_inverted.groovy b/regression-test/suites/table_sync_alias/index/create_drop_inverted/test_tsa_index_create_drop_inverted.groovy new file mode 100644 index 00000000..095a922d --- /dev/null +++ b/regression-test/suites/table_sync_alias/index/create_drop_inverted/test_tsa_index_create_drop_inverted.groovy @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_tsa_index_create_drop_inverted") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def suffix = helper.randomSuffix() + def tableName = "tbl_${suffix}" + def aliasName = "alias_${suffix}" + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + helper.set_alias(aliasName) + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` String, + `value1` String + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, '${index}', '${index}')") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + def show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + helper.ccrJobCreate(tableName) + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("=== Test 1: add inverted index ===") + sql """ + CREATE INDEX idx_inverted ON ${tableName} (value) USING INVERTED + """ + sql "sync" + + sql """ INSERT INTO ${tableName} VALUES (1, 1, "1", "1") """ + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${aliasName} """, insert_num + 1, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${aliasName}" + assertTrue(show_indexes_result.any { + it['Key_name'] == 'idx_inverted' && it['Index_type'] == 'INVERTED' }) + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ + DROP INDEX idx_inverted ON ${tableName} + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(2), 30)) + + show_indexes_result = sql "show indexes from ${tableName}" + logger.info("show indexes: ${show_indexes_result}") + + sql """ INSERT INTO ${tableName} VALUES (3, 3, "3", "3")""" + + assertTrue(helper.checkSelectTimesOf( + """ SELECT * FROM ${aliasName} """, insert_num + 2, 30)) + show_indexes_result = target_sql_return_maparray "show indexes from ${aliasName}" + assertTrue(show_indexes_result.isEmpty()) +} diff --git a/shell/enable_db_binlog.sh b/shell/enable_db_binlog.sh index 858eb5e2..a94c132e 100755 --- a/shell/enable_db_binlog.sh +++ b/shell/enable_db_binlog.sh @@ -49,9 +49,22 @@ fi echo "enable db ${db} binlog" # use mysql client list all tables in db tables=$(${mysql_client} -e "use ${db};show tables;" 2>/dev/null | sed '1d') +view_or_external_tables=$(${mysql_client} -e "select table_name from information_schema.tables where table_schema=\"${db}\" and table_type in ('VIEW','EXTERNAL TABLE')" 2>/dev/null | sed '1d') for table in $tables; do echo "table: $table" + # skip view + is_view_or_external_table="false" + for view_or_external_table in $view_or_external_tables; do + if [ "$view_or_external_table" == "$table" ]; then + is_view_or_external_table="true" + break + fi + done + if [ "$is_view_or_external_table" == "true" ]; then + continue + fi + # check table binlog is enable table_binlog_enable=$($mysql_client -e "show create table ${db}.${table}" 2>/dev/null | grep '"binlog.enable" = "true"') # remove empty line @@ -60,8 +73,8 @@ for table in $tables; do echo "table ${table} binlog is enable" else echo "enable table ${table} binlog" - ${mysql_client} -e "ALTER TABLE $db.$table SET (\"binlog.enable\" = \"true\");" || exit 1 + ${mysql_client} -e "ALTER TABLE $db.$table SET (\"binlog.enable\" = \"true\", \"binlog.ttl_seconds\"=\"86400\");" || exit 1 fi done -${mysql_client} -e "ALTER DATABASE $db SET properties (\"binlog.enable\" = \"true\");" || exit 1 -# mysql -uroot -p123456 -e "use test;show tables;" \ No newline at end of file +${mysql_client} -e "ALTER DATABASE $db SET properties (\"binlog.enable\" = \"true\", \"binlog.ttl_seconds\"=\"86400\");" || exit 1 +# mysql -uroot -p123456 -e "use test;show tables;" diff --git a/shell/start_syncer.sh b/shell/start_syncer.sh index 72362689..4b25a238 100755 --- a/shell/start_syncer.sh +++ b/shell/start_syncer.sh @@ -15,8 +15,9 @@ PID_DIR="$( usage() { echo " -Usage: $0 [--deamon] [--log_level [info|debug]] [--log_dir dir] [--db_dir dir] - [--host host] [--port port] [--pid_dir dir] +Usage: $0 [--daemon] [--log_level [info|debug|trace]] [--log_dir dir] [--db_dir dir] + [--host host] [--port port] [--pid_dir dir] [--pprof [true|false]] + [--pprof_port p_port] [--connect_timeout s] [--rpc_timeout s] " exit 1 } @@ -38,6 +39,10 @@ OPTS="$(getopt \ -l 'host:' \ -l 'port:' \ -l 'pid_dir:' \ + -l 'pprof:' \ + -l 'pprof_port:' \ + -l 'connect_timeout:' \ + -l 'rpc_timeout:' \ -- "$@")" eval set -- "${OPTS}" @@ -52,6 +57,10 @@ DB_HOST="127.0.0.1" DB_PORT="3306" DB_USER="" DB_PASSWORD="" +PPROF="false" +PPROF_PORT="6060" +CONNECT_TIMEOUT="10s" +RPC_TIMEOUT="30s" while true; do case "$1" in -h) @@ -108,6 +117,22 @@ while true; do PID_DIR=$2 shift 2 ;; + --pprof) + PPROF=$2 + shift 2 + ;; + --pprof_port) + PPROF_PORT=$2 + shift 2 + ;; + --connect_timeout) + CONNECT_TIMEOUT=$2 + shift 2 + ;; + --rpc_timeout) + RPC_TIMEOUT=$2 + shift 2 + ;; --) shift break @@ -162,8 +187,12 @@ if [[ "${RUN_DAEMON}" -eq 1 ]]; then "-db_password=${DB_PASSWORD}" \ "-host=${HOST}" \ "-port=${PORT}" \ + "-pprof=${PPROF}" \ + "-pprof_port=${PPROF_PORT}" \ "-log_level=${LOG_LEVEL}" \ "-log_filename=${LOG_DIR}" \ + "-connect_timeout=${CONNECT_TIMEOUT}" \ + "-rpc_timeout=${RPC_TIMEOUT}" \ "$@" >>"${LOG_DIR}" 2>&1 ${pidfile} else @@ -176,5 +205,9 @@ else "-db_password=${DB_PASSWORD}" \ "-host=${HOST}" \ "-port=${PORT}" \ + "-pprof=${PPROF}" \ + "-pprof_port=${PPROF_PORT}" \ + "-connect_timeout=${CONNECT_TIMEOUT}" \ + "-rpc_timeout=${RPC_TIMEOUT}" \ "-log_level=${LOG_LEVEL}" | tee -a "${LOG_DIR}" fi \ No newline at end of file